Update oslo code
Just update to the last version oslo code Change-Id: I51519dadef383ea73b462760fdf4de62678596b9
This commit is contained in:
@@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2013 OpenStack Foundation
|
# Copyright 2013 OpenStack Foundation
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
|
@@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2013 OpenStack Foundation
|
# Copyright 2013 OpenStack Foundation
|
||||||
# Copyright 2013 Spanish National Research Council.
|
# Copyright 2013 Spanish National Research Council.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
@@ -23,8 +21,8 @@ import abc
|
|||||||
import argparse
|
import argparse
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import six
|
|
||||||
|
|
||||||
|
import six
|
||||||
from stevedore import extension
|
from stevedore import extension
|
||||||
|
|
||||||
from rally.openstack.common.apiclient import exceptions
|
from rally.openstack.common.apiclient import exceptions
|
||||||
@@ -60,7 +58,7 @@ def load_auth_system_opts(parser):
|
|||||||
"""
|
"""
|
||||||
group = parser.add_argument_group("Common auth options")
|
group = parser.add_argument_group("Common auth options")
|
||||||
BaseAuthPlugin.add_common_opts(group)
|
BaseAuthPlugin.add_common_opts(group)
|
||||||
for name, auth_plugin in _discovered_plugins.iteritems():
|
for name, auth_plugin in six.iteritems(_discovered_plugins):
|
||||||
group = parser.add_argument_group(
|
group = parser.add_argument_group(
|
||||||
"Auth-system '%s' options" % name,
|
"Auth-system '%s' options" % name,
|
||||||
conflict_handler="resolve")
|
conflict_handler="resolve")
|
||||||
@@ -76,7 +74,7 @@ def load_plugin(auth_system):
|
|||||||
|
|
||||||
|
|
||||||
def load_plugin_from_args(args):
|
def load_plugin_from_args(args):
|
||||||
"""Load requred plugin and populate it with options.
|
"""Load required plugin and populate it with options.
|
||||||
|
|
||||||
Try to guess auth system if it is not specified. Systems are tried in
|
Try to guess auth system if it is not specified. Systems are tried in
|
||||||
alphabetical order.
|
alphabetical order.
|
||||||
@@ -91,7 +89,7 @@ def load_plugin_from_args(args):
|
|||||||
plugin.sufficient_options()
|
plugin.sufficient_options()
|
||||||
return plugin
|
return plugin
|
||||||
|
|
||||||
for plugin_auth_system in sorted(_discovered_plugins.iterkeys()):
|
for plugin_auth_system in sorted(six.iterkeys(_discovered_plugins)):
|
||||||
plugin_class = _discovered_plugins[plugin_auth_system]
|
plugin_class = _discovered_plugins[plugin_auth_system]
|
||||||
plugin = plugin_class()
|
plugin = plugin_class()
|
||||||
plugin.parse_opts(args)
|
plugin.parse_opts(args)
|
||||||
|
@@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2010 Jacob Kaplan-Moss
|
# Copyright 2010 Jacob Kaplan-Moss
|
||||||
# Copyright 2011 OpenStack Foundation
|
# Copyright 2011 OpenStack Foundation
|
||||||
# Copyright 2012 Grid Dynamics
|
# Copyright 2012 Grid Dynamics
|
||||||
@@ -26,10 +24,11 @@ Base utilities to build API operation managers and objects on top of.
|
|||||||
# pylint: disable=E1102
|
# pylint: disable=E1102
|
||||||
|
|
||||||
import abc
|
import abc
|
||||||
|
|
||||||
import six
|
import six
|
||||||
import urllib
|
|
||||||
|
|
||||||
from rally.openstack.common.apiclient import exceptions
|
from rally.openstack.common.apiclient import exceptions
|
||||||
|
from rally.openstack.common.py3kcompat import urlutils
|
||||||
from rally.openstack.common import strutils
|
from rally.openstack.common import strutils
|
||||||
|
|
||||||
|
|
||||||
@@ -292,7 +291,7 @@ class CrudManager(BaseManager):
|
|||||||
|
|
||||||
def _filter_kwargs(self, kwargs):
|
def _filter_kwargs(self, kwargs):
|
||||||
"""Drop null values and handle ids."""
|
"""Drop null values and handle ids."""
|
||||||
for key, ref in kwargs.copy().iteritems():
|
for key, ref in six.iteritems(kwargs.copy()):
|
||||||
if ref is None:
|
if ref is None:
|
||||||
kwargs.pop(key)
|
kwargs.pop(key)
|
||||||
else:
|
else:
|
||||||
@@ -328,7 +327,7 @@ class CrudManager(BaseManager):
|
|||||||
return self._list(
|
return self._list(
|
||||||
'%(base_url)s%(query)s' % {
|
'%(base_url)s%(query)s' % {
|
||||||
'base_url': self.build_url(base_url=base_url, **kwargs),
|
'base_url': self.build_url(base_url=base_url, **kwargs),
|
||||||
'query': '?%s' % urllib.urlencode(kwargs) if kwargs else '',
|
'query': '?%s' % urlutils.urlencode(kwargs) if kwargs else '',
|
||||||
},
|
},
|
||||||
self.collection_key)
|
self.collection_key)
|
||||||
|
|
||||||
@@ -367,7 +366,7 @@ class CrudManager(BaseManager):
|
|||||||
rl = self._list(
|
rl = self._list(
|
||||||
'%(base_url)s%(query)s' % {
|
'%(base_url)s%(query)s' % {
|
||||||
'base_url': self.build_url(base_url=base_url, **kwargs),
|
'base_url': self.build_url(base_url=base_url, **kwargs),
|
||||||
'query': '?%s' % urllib.urlencode(kwargs) if kwargs else '',
|
'query': '?%s' % urlutils.urlencode(kwargs) if kwargs else '',
|
||||||
},
|
},
|
||||||
self.collection_key)
|
self.collection_key)
|
||||||
num = len(rl)
|
num = len(rl)
|
||||||
@@ -446,7 +445,7 @@ class Resource(object):
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
def _add_details(self, info):
|
def _add_details(self, info):
|
||||||
for (k, v) in info.iteritems():
|
for (k, v) in six.iteritems(info):
|
||||||
try:
|
try:
|
||||||
setattr(self, k, v)
|
setattr(self, k, v)
|
||||||
self._info[k] = v
|
self._info[k] = v
|
||||||
|
@@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2010 Jacob Kaplan-Moss
|
# Copyright 2010 Jacob Kaplan-Moss
|
||||||
# Copyright 2011 OpenStack Foundation
|
# Copyright 2011 OpenStack Foundation
|
||||||
# Copyright 2011 Piston Cloud Computing, Inc.
|
# Copyright 2011 Piston Cloud Computing, Inc.
|
||||||
@@ -52,7 +50,7 @@ class HTTPClient(object):
|
|||||||
services (e.g., for compute and image clients);
|
services (e.g., for compute and image clients);
|
||||||
- reissue authentication request for expired tokens;
|
- reissue authentication request for expired tokens;
|
||||||
- encode/decode JSON bodies;
|
- encode/decode JSON bodies;
|
||||||
- raise exeptions on HTTP errors;
|
- raise exceptions on HTTP errors;
|
||||||
- pluggable authentication;
|
- pluggable authentication;
|
||||||
- store authentication information in a keyring;
|
- store authentication information in a keyring;
|
||||||
- store time spent for requests;
|
- store time spent for requests;
|
||||||
|
@@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2010 Jacob Kaplan-Moss
|
# Copyright 2010 Jacob Kaplan-Moss
|
||||||
# Copyright 2011 Nebula, Inc.
|
# Copyright 2011 Nebula, Inc.
|
||||||
# Copyright 2013 Alessio Ababilov
|
# Copyright 2013 Alessio Ababilov
|
||||||
@@ -22,8 +20,11 @@
|
|||||||
Exception definitions.
|
Exception definitions.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import inspect
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
|
|
||||||
class ClientException(Exception):
|
class ClientException(Exception):
|
||||||
"""The base exception class for all exceptions this library raises.
|
"""The base exception class for all exceptions this library raises.
|
||||||
@@ -387,20 +388,12 @@ class HttpVersionNotSupported(HttpServerError):
|
|||||||
message = "HTTP Version Not Supported"
|
message = "HTTP Version Not Supported"
|
||||||
|
|
||||||
|
|
||||||
# In Python 2.4 Exception is old-style and thus doesn't have a __subclasses__()
|
# _code_map contains all the classes that have http_status attribute.
|
||||||
# so we can do this:
|
_code_map = dict(
|
||||||
# _code_map = dict((c.http_status, c)
|
(getattr(obj, 'http_status', None), obj)
|
||||||
# for c in HttpError.__subclasses__())
|
for name, obj in six.iteritems(vars(sys.modules[__name__]))
|
||||||
_code_map = {}
|
if inspect.isclass(obj) and getattr(obj, 'http_status', False)
|
||||||
for obj in sys.modules[__name__].__dict__.values():
|
)
|
||||||
if isinstance(obj, type):
|
|
||||||
try:
|
|
||||||
http_status = obj.http_status
|
|
||||||
except AttributeError:
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
if http_status:
|
|
||||||
_code_map[http_status] = obj
|
|
||||||
|
|
||||||
|
|
||||||
def from_response(response, method, url):
|
def from_response(response, method, url):
|
||||||
|
@@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2013 OpenStack Foundation
|
# Copyright 2013 OpenStack Foundation
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
@@ -27,11 +25,13 @@ places where actual behavior differs from the spec.
|
|||||||
# pylint: disable=W0102
|
# pylint: disable=W0102
|
||||||
|
|
||||||
import json
|
import json
|
||||||
import urlparse
|
|
||||||
|
|
||||||
import requests
|
import requests
|
||||||
|
import six
|
||||||
|
|
||||||
from rally.openstack.common.apiclient import client
|
from rally.openstack.common.apiclient import client
|
||||||
|
from rally.openstack.common.py3kcompat import urlutils
|
||||||
|
from rally.openstack.common import strutils
|
||||||
|
|
||||||
|
|
||||||
def assert_has_keys(dct, required=[], optional=[]):
|
def assert_has_keys(dct, required=[], optional=[]):
|
||||||
@@ -63,6 +63,8 @@ class TestResponse(requests.Response):
|
|||||||
else:
|
else:
|
||||||
self._content = text
|
self._content = text
|
||||||
default_headers = {}
|
default_headers = {}
|
||||||
|
if six.PY3 and isinstance(self._content, six.string_types):
|
||||||
|
self._content = strutils.safe_encode(self._content)
|
||||||
self.headers = data.get('headers') or default_headers
|
self.headers = data.get('headers') or default_headers
|
||||||
else:
|
else:
|
||||||
self.status_code = data
|
self.status_code = data
|
||||||
@@ -146,7 +148,7 @@ class FakeHTTPClient(client.HTTPClient):
|
|||||||
"text": fixture[1]})
|
"text": fixture[1]})
|
||||||
|
|
||||||
# Call the method
|
# Call the method
|
||||||
args = urlparse.parse_qsl(urlparse.urlparse(url)[4])
|
args = urlutils.parse_qsl(urlutils.urlparse(url)[4])
|
||||||
kwargs.update(args)
|
kwargs.update(args)
|
||||||
munged_url = url.rsplit('?', 1)[0]
|
munged_url = url.rsplit('?', 1)[0]
|
||||||
munged_url = munged_url.strip('/').replace('/', '_').replace('.', '_')
|
munged_url = munged_url.strip('/').replace('/', '_').replace('.', '_')
|
||||||
|
@@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2012 Red Hat, Inc.
|
# Copyright 2012 Red Hat, Inc.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
@@ -25,6 +23,8 @@ import sys
|
|||||||
import textwrap
|
import textwrap
|
||||||
|
|
||||||
import prettytable
|
import prettytable
|
||||||
|
import six
|
||||||
|
from six import moves
|
||||||
|
|
||||||
from rally.openstack.common.apiclient import exceptions
|
from rally.openstack.common.apiclient import exceptions
|
||||||
from rally.openstack.common import strutils
|
from rally.openstack.common import strutils
|
||||||
@@ -141,9 +141,9 @@ def print_list(objs, fields, formatters=None, sortby_index=0,
|
|||||||
formatters = formatters or {}
|
formatters = formatters or {}
|
||||||
mixed_case_fields = mixed_case_fields or []
|
mixed_case_fields = mixed_case_fields or []
|
||||||
if sortby_index is None:
|
if sortby_index is None:
|
||||||
sortby = None
|
kwargs = {}
|
||||||
else:
|
else:
|
||||||
sortby = fields[sortby_index]
|
kwargs = {'sortby': fields[sortby_index]}
|
||||||
pt = prettytable.PrettyTable(fields, caching=False)
|
pt = prettytable.PrettyTable(fields, caching=False)
|
||||||
pt.align = 'l'
|
pt.align = 'l'
|
||||||
|
|
||||||
@@ -161,7 +161,7 @@ def print_list(objs, fields, formatters=None, sortby_index=0,
|
|||||||
row.append(data)
|
row.append(data)
|
||||||
pt.add_row(row)
|
pt.add_row(row)
|
||||||
|
|
||||||
print(strutils.safe_encode(pt.get_string(sortby=sortby)))
|
print(strutils.safe_encode(pt.get_string(**kwargs)))
|
||||||
|
|
||||||
|
|
||||||
def print_dict(dct, dict_property="Property", wrap=0):
|
def print_dict(dct, dict_property="Property", wrap=0):
|
||||||
@@ -173,7 +173,7 @@ def print_dict(dct, dict_property="Property", wrap=0):
|
|||||||
"""
|
"""
|
||||||
pt = prettytable.PrettyTable([dict_property, 'Value'], caching=False)
|
pt = prettytable.PrettyTable([dict_property, 'Value'], caching=False)
|
||||||
pt.align = 'l'
|
pt.align = 'l'
|
||||||
for k, v in dct.iteritems():
|
for k, v in six.iteritems(dct):
|
||||||
# convert dict to str to check length
|
# convert dict to str to check length
|
||||||
if isinstance(v, dict):
|
if isinstance(v, dict):
|
||||||
v = str(v)
|
v = str(v)
|
||||||
@@ -181,7 +181,7 @@ def print_dict(dct, dict_property="Property", wrap=0):
|
|||||||
v = textwrap.fill(str(v), wrap)
|
v = textwrap.fill(str(v), wrap)
|
||||||
# if value has a newline, add in multiple rows
|
# if value has a newline, add in multiple rows
|
||||||
# e.g. fault with stacktrace
|
# e.g. fault with stacktrace
|
||||||
if v and isinstance(v, basestring) and r'\n' in v:
|
if v and isinstance(v, six.string_types) and r'\n' in v:
|
||||||
lines = v.strip().split(r'\n')
|
lines = v.strip().split(r'\n')
|
||||||
col1 = k
|
col1 = k
|
||||||
for line in lines:
|
for line in lines:
|
||||||
@@ -199,7 +199,7 @@ def get_password(max_password_prompts=3):
|
|||||||
if hasattr(sys.stdin, "isatty") and sys.stdin.isatty():
|
if hasattr(sys.stdin, "isatty") and sys.stdin.isatty():
|
||||||
# Check for Ctrl-D
|
# Check for Ctrl-D
|
||||||
try:
|
try:
|
||||||
for _ in xrange(max_password_prompts):
|
for _ in moves.range(max_password_prompts):
|
||||||
pw1 = getpass.getpass("OS Password: ")
|
pw1 = getpass.getpass("OS Password: ")
|
||||||
if verify:
|
if verify:
|
||||||
pw2 = getpass.getpass("Please verify: ")
|
pw2 = getpass.getpass("Please verify: ")
|
||||||
|
@@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2012 SINA Corporation
|
# Copyright 2012 SINA Corporation
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
@@ -28,6 +26,7 @@ import sys
|
|||||||
import textwrap
|
import textwrap
|
||||||
|
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
import six
|
||||||
|
|
||||||
from rally.openstack.common import gettextutils
|
from rally.openstack.common import gettextutils
|
||||||
from rally.openstack.common import importutils
|
from rally.openstack.common import importutils
|
||||||
@@ -78,12 +77,15 @@ def generate(srcfiles):
|
|||||||
# The options list is a list of (module, options) tuples
|
# The options list is a list of (module, options) tuples
|
||||||
opts_by_group = {'DEFAULT': []}
|
opts_by_group = {'DEFAULT': []}
|
||||||
|
|
||||||
for module_name in os.getenv(
|
extra_modules = os.getenv("RALLY_CONFIG_GENERATOR_EXTRA_MODULES", "")
|
||||||
"OSLO_CONFIG_GENERATOR_EXTRA_MODULES", "").split(','):
|
if extra_modules:
|
||||||
module = _import_module(module_name)
|
for module_name in extra_modules.split(','):
|
||||||
if module:
|
module_name = module_name.strip()
|
||||||
for group, opts in _list_opts(module):
|
module = _import_module(module_name)
|
||||||
opts_by_group.setdefault(group, []).append((module_name, opts))
|
if module:
|
||||||
|
for group, opts in _list_opts(module):
|
||||||
|
opts_by_group.setdefault(group, []).append((module_name,
|
||||||
|
opts))
|
||||||
|
|
||||||
for pkg_name in pkg_names:
|
for pkg_name in pkg_names:
|
||||||
mods = mods_by_pkg.get(pkg_name)
|
mods = mods_by_pkg.get(pkg_name)
|
||||||
@@ -94,14 +96,14 @@ def generate(srcfiles):
|
|||||||
|
|
||||||
mod_obj = _import_module(mod_str)
|
mod_obj = _import_module(mod_str)
|
||||||
if not mod_obj:
|
if not mod_obj:
|
||||||
continue
|
raise RuntimeError("Unable to import module %s" % mod_str)
|
||||||
|
|
||||||
for group, opts in _list_opts(mod_obj):
|
for group, opts in _list_opts(mod_obj):
|
||||||
opts_by_group.setdefault(group, []).append((mod_str, opts))
|
opts_by_group.setdefault(group, []).append((mod_str, opts))
|
||||||
|
|
||||||
print_group_opts('DEFAULT', opts_by_group.pop('DEFAULT', []))
|
print_group_opts('DEFAULT', opts_by_group.pop('DEFAULT', []))
|
||||||
for group, opts in opts_by_group.items():
|
for group in sorted(opts_by_group.keys()):
|
||||||
print_group_opts(group, opts)
|
print_group_opts(group, opts_by_group[group])
|
||||||
|
|
||||||
|
|
||||||
def _import_module(mod_str):
|
def _import_module(mod_str):
|
||||||
@@ -111,17 +113,17 @@ def _import_module(mod_str):
|
|||||||
return sys.modules[mod_str[4:]]
|
return sys.modules[mod_str[4:]]
|
||||||
else:
|
else:
|
||||||
return importutils.import_module(mod_str)
|
return importutils.import_module(mod_str)
|
||||||
except ImportError as ie:
|
except Exception as e:
|
||||||
sys.stderr.write("%s\n" % str(ie))
|
sys.stderr.write("Error importing module %s: %s\n" % (mod_str, str(e)))
|
||||||
return None
|
|
||||||
except Exception:
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def _is_in_group(opt, group):
|
def _is_in_group(opt, group):
|
||||||
"Check if opt is in group."
|
"Check if opt is in group."
|
||||||
for key, value in group._opts.items():
|
for key, value in group._opts.items():
|
||||||
if value['opt'] == opt:
|
# NOTE(llu): Temporary workaround for bug #1262148, wait until
|
||||||
|
# newly released oslo.config support '==' operator.
|
||||||
|
if not(value['opt'] != opt):
|
||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@@ -221,11 +223,19 @@ def _print_opt(opt):
|
|||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
opt_help += ' (' + OPT_TYPES[opt_type] + ')'
|
opt_help += ' (' + OPT_TYPES[opt_type] + ')'
|
||||||
print('#', "\n# ".join(textwrap.wrap(opt_help, WORDWRAP_WIDTH)))
|
print('#', "\n# ".join(textwrap.wrap(opt_help, WORDWRAP_WIDTH)))
|
||||||
|
if opt.deprecated_opts:
|
||||||
|
for deprecated_opt in opt.deprecated_opts:
|
||||||
|
if deprecated_opt.name:
|
||||||
|
deprecated_group = (deprecated_opt.group if
|
||||||
|
deprecated_opt.group else "DEFAULT")
|
||||||
|
print('# Deprecated group/name - [%s]/%s' %
|
||||||
|
(deprecated_group,
|
||||||
|
deprecated_opt.name))
|
||||||
try:
|
try:
|
||||||
if opt_default is None:
|
if opt_default is None:
|
||||||
print('#%s=<None>' % opt_name)
|
print('#%s=<None>' % opt_name)
|
||||||
elif opt_type == STROPT:
|
elif opt_type == STROPT:
|
||||||
assert(isinstance(opt_default, basestring))
|
assert(isinstance(opt_default, six.string_types))
|
||||||
print('#%s=%s' % (opt_name, _sanitize_default(opt_name,
|
print('#%s=%s' % (opt_name, _sanitize_default(opt_name,
|
||||||
opt_default)))
|
opt_default)))
|
||||||
elif opt_type == BOOLOPT:
|
elif opt_type == BOOLOPT:
|
||||||
|
@@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2012 Cloudscaling Group, Inc
|
# Copyright 2012 Cloudscaling Group, Inc
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
|
@@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright (c) 2013 Rackspace Hosting
|
# Copyright (c) 2013 Rackspace Hosting
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
@@ -21,27 +19,15 @@ Supported configuration options:
|
|||||||
|
|
||||||
The following two parameters are in the 'database' group:
|
The following two parameters are in the 'database' group:
|
||||||
`backend`: DB backend name or full module path to DB backend module.
|
`backend`: DB backend name or full module path to DB backend module.
|
||||||
`use_tpool`: Enable thread pooling of DB API calls.
|
|
||||||
|
|
||||||
A DB backend module should implement a method named 'get_backend' which
|
A DB backend module should implement a method named 'get_backend' which
|
||||||
takes no arguments. The method can return any object that implements DB
|
takes no arguments. The method can return any object that implements DB
|
||||||
API methods.
|
API methods.
|
||||||
|
|
||||||
*NOTE*: There are bugs in eventlet when using tpool combined with
|
|
||||||
threading locks. The python logging module happens to use such locks. To
|
|
||||||
work around this issue, be sure to specify thread=False with
|
|
||||||
eventlet.monkey_patch().
|
|
||||||
|
|
||||||
A bug for eventlet has been filed here:
|
|
||||||
|
|
||||||
https://bitbucket.org/eventlet/eventlet/issue/137/
|
|
||||||
"""
|
"""
|
||||||
import functools
|
|
||||||
|
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
from rally.openstack.common import importutils
|
from rally.openstack.common import importutils
|
||||||
from rally.openstack.common import lockutils
|
|
||||||
|
|
||||||
|
|
||||||
db_opts = [
|
db_opts = [
|
||||||
@@ -50,12 +36,6 @@ db_opts = [
|
|||||||
deprecated_name='db_backend',
|
deprecated_name='db_backend',
|
||||||
deprecated_group='DEFAULT',
|
deprecated_group='DEFAULT',
|
||||||
help='The backend to use for db'),
|
help='The backend to use for db'),
|
||||||
cfg.BoolOpt('use_tpool',
|
|
||||||
default=False,
|
|
||||||
deprecated_name='dbapi_use_tpool',
|
|
||||||
deprecated_group='DEFAULT',
|
|
||||||
help='Enable the experimental use of thread pooling for '
|
|
||||||
'all DB API calls')
|
|
||||||
]
|
]
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
@@ -66,41 +46,12 @@ class DBAPI(object):
|
|||||||
def __init__(self, backend_mapping=None):
|
def __init__(self, backend_mapping=None):
|
||||||
if backend_mapping is None:
|
if backend_mapping is None:
|
||||||
backend_mapping = {}
|
backend_mapping = {}
|
||||||
self.__backend = None
|
|
||||||
self.__backend_mapping = backend_mapping
|
|
||||||
|
|
||||||
@lockutils.synchronized('dbapi_backend', 'rally-')
|
|
||||||
def __get_backend(self):
|
|
||||||
"""Get the actual backend. May be a module or an instance of
|
|
||||||
a class. Doesn't matter to us. We do this synchronized as it's
|
|
||||||
possible multiple greenthreads started very quickly trying to do
|
|
||||||
DB calls and eventlet can switch threads before self.__backend gets
|
|
||||||
assigned.
|
|
||||||
"""
|
|
||||||
if self.__backend:
|
|
||||||
# Another thread assigned it
|
|
||||||
return self.__backend
|
|
||||||
backend_name = CONF.database.backend
|
backend_name = CONF.database.backend
|
||||||
self.__use_tpool = CONF.database.use_tpool
|
|
||||||
if self.__use_tpool:
|
|
||||||
from eventlet import tpool
|
|
||||||
self.__tpool = tpool
|
|
||||||
# Import the untranslated name if we don't have a
|
# Import the untranslated name if we don't have a
|
||||||
# mapping.
|
# mapping.
|
||||||
backend_path = self.__backend_mapping.get(backend_name,
|
backend_path = backend_mapping.get(backend_name, backend_name)
|
||||||
backend_name)
|
|
||||||
backend_mod = importutils.import_module(backend_path)
|
backend_mod = importutils.import_module(backend_path)
|
||||||
self.__backend = backend_mod.get_backend()
|
self.__backend = backend_mod.get_backend()
|
||||||
return self.__backend
|
|
||||||
|
|
||||||
def __getattr__(self, key):
|
def __getattr__(self, key):
|
||||||
backend = self.__backend or self.__get_backend()
|
return getattr(self.__backend, key)
|
||||||
attr = getattr(backend, key)
|
|
||||||
if not self.__use_tpool or not hasattr(attr, '__call__'):
|
|
||||||
return attr
|
|
||||||
|
|
||||||
def tpool_wrapper(*args, **kwargs):
|
|
||||||
return self.__tpool.execute(attr, *args, **kwargs)
|
|
||||||
|
|
||||||
functools.update_wrapper(tpool_wrapper, attr)
|
|
||||||
return tpool_wrapper
|
|
||||||
|
@@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
# Copyright 2010 United States Government as represented by the
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
@@ -18,7 +16,7 @@
|
|||||||
|
|
||||||
"""DB related custom exceptions."""
|
"""DB related custom exceptions."""
|
||||||
|
|
||||||
from rally.openstack.common.gettextutils import _ # noqa
|
from rally.openstack.common.gettextutils import _
|
||||||
|
|
||||||
|
|
||||||
class DBError(Exception):
|
class DBError(Exception):
|
||||||
@@ -49,3 +47,8 @@ class DbMigrationError(DBError):
|
|||||||
"""Wraps migration specific exception."""
|
"""Wraps migration specific exception."""
|
||||||
def __init__(self, message=None):
|
def __init__(self, message=None):
|
||||||
super(DbMigrationError, self).__init__(str(message))
|
super(DbMigrationError, self).__init__(str(message))
|
||||||
|
|
||||||
|
|
||||||
|
class DBConnectionError(DBError):
|
||||||
|
"""Wraps connection specific exception."""
|
||||||
|
pass
|
||||||
|
@@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2012 Cloudscaling Group, Inc
|
# Copyright 2012 Cloudscaling Group, Inc
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
|
@@ -36,53 +36,25 @@
|
|||||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
|
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
# THE SOFTWARE.
|
||||||
|
|
||||||
import distutils.version as dist_version
|
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
|
|
||||||
import migrate
|
|
||||||
from migrate.changeset import ansisql
|
from migrate.changeset import ansisql
|
||||||
from migrate.changeset.databases import sqlite
|
from migrate.changeset.databases import sqlite
|
||||||
from migrate.versioning import util as migrate_util
|
from migrate import exceptions as versioning_exceptions
|
||||||
|
from migrate.versioning import api as versioning_api
|
||||||
|
from migrate.versioning.repository import Repository
|
||||||
import sqlalchemy
|
import sqlalchemy
|
||||||
from sqlalchemy.schema import UniqueConstraint
|
from sqlalchemy.schema import UniqueConstraint
|
||||||
|
|
||||||
from rally.openstack.common.db import exception
|
from rally.openstack.common.db import exception
|
||||||
from rally.openstack.common.db.sqlalchemy import session as db_session
|
from rally.openstack.common.db.sqlalchemy import session as db_session
|
||||||
from rally.openstack.common.gettextutils import _ # noqa
|
from rally.openstack.common.gettextutils import _
|
||||||
|
|
||||||
|
|
||||||
@migrate_util.decorator
|
|
||||||
def patched_with_engine(f, *a, **kw):
|
|
||||||
url = a[0]
|
|
||||||
engine = migrate_util.construct_engine(url, **kw)
|
|
||||||
|
|
||||||
try:
|
|
||||||
kw['engine'] = engine
|
|
||||||
return f(*a, **kw)
|
|
||||||
finally:
|
|
||||||
if isinstance(engine, migrate_util.Engine) and engine is not url:
|
|
||||||
migrate_util.log.debug('Disposing SQLAlchemy engine %s', engine)
|
|
||||||
engine.dispose()
|
|
||||||
|
|
||||||
|
|
||||||
# TODO(jkoelker) When migrate 0.7.3 is released and nova depends
|
|
||||||
# on that version or higher, this can be removed
|
|
||||||
MIN_PKG_VERSION = dist_version.StrictVersion('0.7.3')
|
|
||||||
if (not hasattr(migrate, '__version__') or
|
|
||||||
dist_version.StrictVersion(migrate.__version__) < MIN_PKG_VERSION):
|
|
||||||
migrate_util.with_engine = patched_with_engine
|
|
||||||
|
|
||||||
|
|
||||||
# NOTE(jkoelker) Delay importing migrate until we are patched
|
|
||||||
from migrate import exceptions as versioning_exceptions
|
|
||||||
from migrate.versioning import api as versioning_api
|
|
||||||
from migrate.versioning.repository import Repository
|
|
||||||
|
|
||||||
_REPOSITORY = None
|
|
||||||
|
|
||||||
get_engine = db_session.get_engine
|
get_engine = db_session.get_engine
|
||||||
|
|
||||||
|
|
||||||
@@ -270,9 +242,6 @@ def _find_migrate_repo(abs_path):
|
|||||||
|
|
||||||
:param abs_path: Absolute path to migrate repository
|
:param abs_path: Absolute path to migrate repository
|
||||||
"""
|
"""
|
||||||
global _REPOSITORY
|
|
||||||
if not os.path.exists(abs_path):
|
if not os.path.exists(abs_path):
|
||||||
raise exception.DbMigrationError("Path %s not found" % abs_path)
|
raise exception.DbMigrationError("Path %s not found" % abs_path)
|
||||||
if _REPOSITORY is None:
|
return Repository(abs_path)
|
||||||
_REPOSITORY = Repository(abs_path)
|
|
||||||
return _REPOSITORY
|
|
||||||
|
@@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
|
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
|
||||||
# Copyright 2010 United States Government as represented by the
|
# Copyright 2010 United States Government as represented by the
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
@@ -41,13 +39,13 @@ class ModelBase(object):
|
|||||||
if not session:
|
if not session:
|
||||||
session = sa.get_session()
|
session = sa.get_session()
|
||||||
# NOTE(boris-42): This part of code should be look like:
|
# NOTE(boris-42): This part of code should be look like:
|
||||||
# sesssion.add(self)
|
# session.add(self)
|
||||||
# session.flush()
|
# session.flush()
|
||||||
# But there is a bug in sqlalchemy and eventlet that
|
# But there is a bug in sqlalchemy and eventlet that
|
||||||
# raises NoneType exception if there is no running
|
# raises NoneType exception if there is no running
|
||||||
# transaction and rollback is called. As long as
|
# transaction and rollback is called. As long as
|
||||||
# sqlalchemy has this bug we have to create transaction
|
# sqlalchemy has this bug we have to create transaction
|
||||||
# explicity.
|
# explicitly.
|
||||||
with session.begin(subtransactions=True):
|
with session.begin(subtransactions=True):
|
||||||
session.add(self)
|
session.add(self)
|
||||||
session.flush()
|
session.flush()
|
||||||
@@ -61,7 +59,16 @@ class ModelBase(object):
|
|||||||
def get(self, key, default=None):
|
def get(self, key, default=None):
|
||||||
return getattr(self, key, default)
|
return getattr(self, key, default)
|
||||||
|
|
||||||
def _get_extra_keys(self):
|
@property
|
||||||
|
def _extra_keys(self):
|
||||||
|
"""Specifies custom fields
|
||||||
|
|
||||||
|
Subclasses can override this property to return a list
|
||||||
|
of custom fields that should be included in their dict
|
||||||
|
representation.
|
||||||
|
|
||||||
|
For reference check tests/db/sqlalchemy/test_models.py
|
||||||
|
"""
|
||||||
return []
|
return []
|
||||||
|
|
||||||
def __iter__(self):
|
def __iter__(self):
|
||||||
@@ -69,7 +76,7 @@ class ModelBase(object):
|
|||||||
# NOTE(russellb): Allow models to specify other keys that can be looked
|
# NOTE(russellb): Allow models to specify other keys that can be looked
|
||||||
# up, beyond the actual db columns. An example would be the 'name'
|
# up, beyond the actual db columns. An example would be the 'name'
|
||||||
# property for an Instance.
|
# property for an Instance.
|
||||||
columns.extend(self._get_extra_keys())
|
columns.extend(self._extra_keys)
|
||||||
self._i = iter(columns)
|
self._i = iter(columns)
|
||||||
return self
|
return self
|
||||||
|
|
||||||
@@ -91,7 +98,7 @@ class ModelBase(object):
|
|||||||
joined = dict([(k, v) for k, v in six.iteritems(self.__dict__)
|
joined = dict([(k, v) for k, v in six.iteritems(self.__dict__)
|
||||||
if not k[0] == '_'])
|
if not k[0] == '_'])
|
||||||
local.update(joined)
|
local.update(joined)
|
||||||
return local.iteritems()
|
return six.iteritems(local)
|
||||||
|
|
||||||
|
|
||||||
class TimestampMixin(object):
|
class TimestampMixin(object):
|
||||||
|
189
rally/openstack/common/db/sqlalchemy/provision.py
Normal file
189
rally/openstack/common/db/sqlalchemy/provision.py
Normal file
@@ -0,0 +1,189 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2013 Mirantis.inc
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Provision test environment for specific DB backends"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import os
|
||||||
|
import random
|
||||||
|
import string
|
||||||
|
|
||||||
|
from six import moves
|
||||||
|
import sqlalchemy
|
||||||
|
|
||||||
|
from rally.openstack.common.db import exception as exc
|
||||||
|
|
||||||
|
|
||||||
|
SQL_CONNECTION = os.getenv('OS_TEST_DBAPI_ADMIN_CONNECTION', 'sqlite://')
|
||||||
|
|
||||||
|
|
||||||
|
def _gen_credentials(*names):
|
||||||
|
"""Generate credentials."""
|
||||||
|
auth_dict = {}
|
||||||
|
for name in names:
|
||||||
|
val = ''.join(random.choice(string.ascii_lowercase)
|
||||||
|
for i in moves.range(10))
|
||||||
|
auth_dict[name] = val
|
||||||
|
return auth_dict
|
||||||
|
|
||||||
|
|
||||||
|
def _get_engine(uri=SQL_CONNECTION):
|
||||||
|
"""Engine creation
|
||||||
|
|
||||||
|
By default the uri is SQL_CONNECTION which is admin credentials.
|
||||||
|
Call the function without arguments to get admin connection. Admin
|
||||||
|
connection required to create temporary user and database for each
|
||||||
|
particular test. Otherwise use existing connection to recreate connection
|
||||||
|
to the temporary database.
|
||||||
|
"""
|
||||||
|
return sqlalchemy.create_engine(uri, poolclass=sqlalchemy.pool.NullPool)
|
||||||
|
|
||||||
|
|
||||||
|
def _execute_sql(engine, sql, driver):
|
||||||
|
"""Initialize connection, execute sql query and close it."""
|
||||||
|
try:
|
||||||
|
with engine.connect() as conn:
|
||||||
|
if driver == 'postgresql':
|
||||||
|
conn.connection.set_isolation_level(0)
|
||||||
|
for s in sql:
|
||||||
|
conn.execute(s)
|
||||||
|
except sqlalchemy.exc.OperationalError:
|
||||||
|
msg = ('%s does not match database admin '
|
||||||
|
'credentials or database does not exist.')
|
||||||
|
raise exc.DBConnectionError(msg % SQL_CONNECTION)
|
||||||
|
|
||||||
|
|
||||||
|
def create_database(engine):
|
||||||
|
"""Provide temporary user and database for each particular test."""
|
||||||
|
driver = engine.name
|
||||||
|
|
||||||
|
auth = _gen_credentials('database', 'user', 'passwd')
|
||||||
|
|
||||||
|
sqls = {
|
||||||
|
'mysql': [
|
||||||
|
"drop database if exists %(database)s;",
|
||||||
|
"grant all on %(database)s.* to '%(user)s'@'localhost'"
|
||||||
|
" identified by '%(passwd)s';",
|
||||||
|
"create database %(database)s;",
|
||||||
|
],
|
||||||
|
'postgresql': [
|
||||||
|
"drop database if exists %(database)s;",
|
||||||
|
"drop user if exists %(user)s;",
|
||||||
|
"create user %(user)s with password '%(passwd)s';",
|
||||||
|
"create database %(database)s owner %(user)s;",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
if driver == 'sqlite':
|
||||||
|
return 'sqlite:////tmp/%s' % auth['database']
|
||||||
|
|
||||||
|
try:
|
||||||
|
sql_rows = sqls[driver]
|
||||||
|
except KeyError:
|
||||||
|
raise ValueError('Unsupported RDBMS %s' % driver)
|
||||||
|
sql_query = map(lambda x: x % auth, sql_rows)
|
||||||
|
|
||||||
|
_execute_sql(engine, sql_query, driver)
|
||||||
|
|
||||||
|
params = auth.copy()
|
||||||
|
params['backend'] = driver
|
||||||
|
return "%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s" % params
|
||||||
|
|
||||||
|
|
||||||
|
def drop_database(engine, current_uri):
|
||||||
|
"""Drop temporary database and user after each particular test."""
|
||||||
|
engine = _get_engine(current_uri)
|
||||||
|
admin_engine = _get_engine()
|
||||||
|
driver = engine.name
|
||||||
|
auth = {'database': engine.url.database, 'user': engine.url.username}
|
||||||
|
|
||||||
|
if driver == 'sqlite':
|
||||||
|
try:
|
||||||
|
os.remove(auth['database'])
|
||||||
|
except OSError:
|
||||||
|
pass
|
||||||
|
return
|
||||||
|
|
||||||
|
sqls = {
|
||||||
|
'mysql': [
|
||||||
|
"drop database if exists %(database)s;",
|
||||||
|
"drop user '%(user)s'@'localhost';",
|
||||||
|
],
|
||||||
|
'postgresql': [
|
||||||
|
"drop database if exists %(database)s;",
|
||||||
|
"drop user if exists %(user)s;",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
sql_rows = sqls[driver]
|
||||||
|
except KeyError:
|
||||||
|
raise ValueError('Unsupported RDBMS %s' % driver)
|
||||||
|
sql_query = map(lambda x: x % auth, sql_rows)
|
||||||
|
|
||||||
|
_execute_sql(admin_engine, sql_query, driver)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
"""Controller to handle commands
|
||||||
|
|
||||||
|
::create: Create test user and database with random names.
|
||||||
|
::drop: Drop user and database created by previous command.
|
||||||
|
"""
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description='Controller to handle database creation and dropping'
|
||||||
|
' commands.',
|
||||||
|
epilog='Under normal circumstances is not used directly.'
|
||||||
|
' Used in .testr.conf to automate test database creation'
|
||||||
|
' and dropping processes.')
|
||||||
|
subparsers = parser.add_subparsers(
|
||||||
|
help='Subcommands to manipulate temporary test databases.')
|
||||||
|
|
||||||
|
create = subparsers.add_parser(
|
||||||
|
'create',
|
||||||
|
help='Create temporary test '
|
||||||
|
'databases and users.')
|
||||||
|
create.set_defaults(which='create')
|
||||||
|
create.add_argument(
|
||||||
|
'instances_count',
|
||||||
|
type=int,
|
||||||
|
help='Number of databases to create.')
|
||||||
|
|
||||||
|
drop = subparsers.add_parser(
|
||||||
|
'drop',
|
||||||
|
help='Drop temporary test databases and users.')
|
||||||
|
drop.set_defaults(which='drop')
|
||||||
|
drop.add_argument(
|
||||||
|
'instances',
|
||||||
|
nargs='+',
|
||||||
|
help='List of databases uri to be dropped.')
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
engine = _get_engine()
|
||||||
|
which = args.which
|
||||||
|
|
||||||
|
if which == "create":
|
||||||
|
for i in range(int(args.instances_count)):
|
||||||
|
print(create_database(engine))
|
||||||
|
elif which == "drop":
|
||||||
|
for db in args.instances:
|
||||||
|
drop_database(engine, db)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
@@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
# Copyright 2010 United States Government as represented by the
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
@@ -23,7 +21,7 @@ Initializing:
|
|||||||
* Call set_defaults with the minimal of the following kwargs:
|
* Call set_defaults with the minimal of the following kwargs:
|
||||||
sql_connection, sqlite_db
|
sql_connection, sqlite_db
|
||||||
|
|
||||||
Example:
|
Example::
|
||||||
|
|
||||||
session.set_defaults(
|
session.set_defaults(
|
||||||
sql_connection="sqlite:///var/lib/rally/sqlite.db",
|
sql_connection="sqlite:///var/lib/rally/sqlite.db",
|
||||||
@@ -44,17 +42,17 @@ Recommended ways to use sessions within this framework:
|
|||||||
functionality should be handled at a logical level. For an example, look at
|
functionality should be handled at a logical level. For an example, look at
|
||||||
the code around quotas and reservation_rollback().
|
the code around quotas and reservation_rollback().
|
||||||
|
|
||||||
Examples:
|
Examples::
|
||||||
|
|
||||||
def get_foo(context, foo):
|
def get_foo(context, foo):
|
||||||
return model_query(context, models.Foo).\
|
return (model_query(context, models.Foo).
|
||||||
filter_by(foo=foo).\
|
filter_by(foo=foo).
|
||||||
first()
|
first())
|
||||||
|
|
||||||
def update_foo(context, id, newfoo):
|
def update_foo(context, id, newfoo):
|
||||||
model_query(context, models.Foo).\
|
(model_query(context, models.Foo).
|
||||||
filter_by(id=id).\
|
filter_by(id=id).
|
||||||
update({'foo': newfoo})
|
update({'foo': newfoo}))
|
||||||
|
|
||||||
def create_foo(context, values):
|
def create_foo(context, values):
|
||||||
foo_ref = models.Foo()
|
foo_ref = models.Foo()
|
||||||
@@ -68,14 +66,21 @@ Recommended ways to use sessions within this framework:
|
|||||||
handler will take care of calling flush() and commit() for you.
|
handler will take care of calling flush() and commit() for you.
|
||||||
If using this approach, you should not explicitly call flush() or commit().
|
If using this approach, you should not explicitly call flush() or commit().
|
||||||
Any error within the context of the session will cause the session to emit
|
Any error within the context of the session will cause the session to emit
|
||||||
a ROLLBACK. If the connection is dropped before this is possible, the
|
a ROLLBACK. Database Errors like IntegrityError will be raised in
|
||||||
database will implicitly rollback the transaction.
|
session's __exit__ handler, and any try/except within the context managed
|
||||||
|
by session will not be triggered. And catching other non-database errors in
|
||||||
|
the session will not trigger the ROLLBACK, so exception handlers should
|
||||||
|
always be outside the session, unless the developer wants to do a partial
|
||||||
|
commit on purpose. If the connection is dropped before this is possible,
|
||||||
|
the database will implicitly roll back the transaction.
|
||||||
|
|
||||||
Note: statements in the session scope will not be automatically retried.
|
Note: statements in the session scope will not be automatically retried.
|
||||||
|
|
||||||
If you create models within the session, they need to be added, but you
|
If you create models within the session, they need to be added, but you
|
||||||
do not need to call model.save()
|
do not need to call model.save()
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
def create_many_foo(context, foos):
|
def create_many_foo(context, foos):
|
||||||
session = get_session()
|
session = get_session()
|
||||||
with session.begin():
|
with session.begin():
|
||||||
@@ -87,33 +92,50 @@ Recommended ways to use sessions within this framework:
|
|||||||
def update_bar(context, foo_id, newbar):
|
def update_bar(context, foo_id, newbar):
|
||||||
session = get_session()
|
session = get_session()
|
||||||
with session.begin():
|
with session.begin():
|
||||||
foo_ref = model_query(context, models.Foo, session).\
|
foo_ref = (model_query(context, models.Foo, session).
|
||||||
filter_by(id=foo_id).\
|
filter_by(id=foo_id).
|
||||||
first()
|
first())
|
||||||
model_query(context, models.Bar, session).\
|
(model_query(context, models.Bar, session).
|
||||||
filter_by(id=foo_ref['bar_id']).\
|
filter_by(id=foo_ref['bar_id']).
|
||||||
update({'bar': newbar})
|
update({'bar': newbar}))
|
||||||
|
|
||||||
Note: update_bar is a trivially simple example of using "with session.begin".
|
Note: update_bar is a trivially simple example of using "with session.begin".
|
||||||
Whereas create_many_foo is a good example of when a transaction is needed,
|
Whereas create_many_foo is a good example of when a transaction is needed,
|
||||||
it is always best to use as few queries as possible. The two queries in
|
it is always best to use as few queries as possible. The two queries in
|
||||||
update_bar can be better expressed using a single query which avoids
|
update_bar can be better expressed using a single query which avoids
|
||||||
the need for an explicit transaction. It can be expressed like so:
|
the need for an explicit transaction. It can be expressed like so::
|
||||||
|
|
||||||
def update_bar(context, foo_id, newbar):
|
def update_bar(context, foo_id, newbar):
|
||||||
subq = model_query(context, models.Foo.id).\
|
subq = (model_query(context, models.Foo.id).
|
||||||
filter_by(id=foo_id).\
|
filter_by(id=foo_id).
|
||||||
limit(1).\
|
limit(1).
|
||||||
subquery()
|
subquery())
|
||||||
model_query(context, models.Bar).\
|
(model_query(context, models.Bar).
|
||||||
filter_by(id=subq.as_scalar()).\
|
filter_by(id=subq.as_scalar()).
|
||||||
update({'bar': newbar})
|
update({'bar': newbar}))
|
||||||
|
|
||||||
For reference, this emits approximagely the following SQL statement:
|
For reference, this emits approximately the following SQL statement::
|
||||||
|
|
||||||
UPDATE bar SET bar = ${newbar}
|
UPDATE bar SET bar = ${newbar}
|
||||||
WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1);
|
WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1);
|
||||||
|
|
||||||
|
Note: create_duplicate_foo is a trivially simple example of catching an
|
||||||
|
exception while using "with session.begin". Here create two duplicate
|
||||||
|
instances with same primary key, must catch the exception out of context
|
||||||
|
managed by a single session:
|
||||||
|
|
||||||
|
def create_duplicate_foo(context):
|
||||||
|
foo1 = models.Foo()
|
||||||
|
foo2 = models.Foo()
|
||||||
|
foo1.id = foo2.id = 1
|
||||||
|
session = get_session()
|
||||||
|
try:
|
||||||
|
with session.begin():
|
||||||
|
session.add(foo1)
|
||||||
|
session.add(foo2)
|
||||||
|
except exception.DBDuplicateEntry as e:
|
||||||
|
handle_error(e)
|
||||||
|
|
||||||
* Passing an active session between methods. Sessions should only be passed
|
* Passing an active session between methods. Sessions should only be passed
|
||||||
to private methods. The private method must use a subtransaction; otherwise
|
to private methods. The private method must use a subtransaction; otherwise
|
||||||
SQLAlchemy will throw an error when you call session.begin() on an existing
|
SQLAlchemy will throw an error when you call session.begin() on an existing
|
||||||
@@ -129,6 +151,8 @@ Recommended ways to use sessions within this framework:
|
|||||||
becomes less clear in this situation. When this is needed for code clarity,
|
becomes less clear in this situation. When this is needed for code clarity,
|
||||||
it should be clearly documented.
|
it should be clearly documented.
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
def myfunc(foo):
|
def myfunc(foo):
|
||||||
session = get_session()
|
session = get_session()
|
||||||
with session.begin():
|
with session.begin():
|
||||||
@@ -173,7 +197,7 @@ There are some things which it is best to avoid:
|
|||||||
Enabling soft deletes:
|
Enabling soft deletes:
|
||||||
|
|
||||||
* To use/enable soft-deletes, the SoftDeleteMixin must be added
|
* To use/enable soft-deletes, the SoftDeleteMixin must be added
|
||||||
to your model class. For example:
|
to your model class. For example::
|
||||||
|
|
||||||
class NovaBase(models.SoftDeleteMixin, models.ModelBase):
|
class NovaBase(models.SoftDeleteMixin, models.ModelBase):
|
||||||
pass
|
pass
|
||||||
@@ -181,14 +205,15 @@ Enabling soft deletes:
|
|||||||
|
|
||||||
Efficient use of soft deletes:
|
Efficient use of soft deletes:
|
||||||
|
|
||||||
* There are two possible ways to mark a record as deleted:
|
* There are two possible ways to mark a record as deleted::
|
||||||
|
|
||||||
model.soft_delete() and query.soft_delete().
|
model.soft_delete() and query.soft_delete().
|
||||||
|
|
||||||
model.soft_delete() method works with single already fetched entry.
|
model.soft_delete() method works with single already fetched entry.
|
||||||
query.soft_delete() makes only one db request for all entries that correspond
|
query.soft_delete() makes only one db request for all entries that correspond
|
||||||
to query.
|
to query.
|
||||||
|
|
||||||
* In almost all cases you should use query.soft_delete(). Some examples:
|
* In almost all cases you should use query.soft_delete(). Some examples::
|
||||||
|
|
||||||
def soft_delete_bar():
|
def soft_delete_bar():
|
||||||
count = model_query(BarModel).find(some_condition).soft_delete()
|
count = model_query(BarModel).find(some_condition).soft_delete()
|
||||||
@@ -199,9 +224,9 @@ Efficient use of soft deletes:
|
|||||||
if session is None:
|
if session is None:
|
||||||
session = get_session()
|
session = get_session()
|
||||||
with session.begin(subtransactions=True):
|
with session.begin(subtransactions=True):
|
||||||
count = model_query(BarModel).\
|
count = (model_query(BarModel).
|
||||||
find(some_condition).\
|
find(some_condition).
|
||||||
soft_delete(synchronize_session=True)
|
soft_delete(synchronize_session=True))
|
||||||
# Here synchronize_session is required, because we
|
# Here synchronize_session is required, because we
|
||||||
# don't know what is going on in outer session.
|
# don't know what is going on in outer session.
|
||||||
if count == 0:
|
if count == 0:
|
||||||
@@ -211,6 +236,8 @@ Efficient use of soft deletes:
|
|||||||
you fetch a single record, work with it, and mark it as deleted in the same
|
you fetch a single record, work with it, and mark it as deleted in the same
|
||||||
transaction.
|
transaction.
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
def soft_delete_bar_model():
|
def soft_delete_bar_model():
|
||||||
session = get_session()
|
session = get_session()
|
||||||
with session.begin():
|
with session.begin():
|
||||||
@@ -219,13 +246,13 @@ Efficient use of soft deletes:
|
|||||||
bar_ref.soft_delete(session=session)
|
bar_ref.soft_delete(session=session)
|
||||||
|
|
||||||
However, if you need to work with all entries that correspond to query and
|
However, if you need to work with all entries that correspond to query and
|
||||||
then soft delete them you should use query.soft_delete() method:
|
then soft delete them you should use query.soft_delete() method::
|
||||||
|
|
||||||
def soft_delete_multi_models():
|
def soft_delete_multi_models():
|
||||||
session = get_session()
|
session = get_session()
|
||||||
with session.begin():
|
with session.begin():
|
||||||
query = model_query(BarModel, session=session).\
|
query = (model_query(BarModel, session=session).
|
||||||
find(some_condition)
|
find(some_condition))
|
||||||
model_refs = query.all()
|
model_refs = query.all()
|
||||||
# Work with model_refs
|
# Work with model_refs
|
||||||
query.soft_delete(synchronize_session=False)
|
query.soft_delete(synchronize_session=False)
|
||||||
@@ -236,6 +263,8 @@ Efficient use of soft deletes:
|
|||||||
which issues a single query. Using model.soft_delete(), as in the following
|
which issues a single query. Using model.soft_delete(), as in the following
|
||||||
example, is very inefficient.
|
example, is very inefficient.
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
for bar_ref in bar_refs:
|
for bar_ref in bar_refs:
|
||||||
bar_ref.soft_delete(session=session)
|
bar_ref.soft_delete(session=session)
|
||||||
# This will produce count(bar_refs) db requests.
|
# This will produce count(bar_refs) db requests.
|
||||||
@@ -249,14 +278,13 @@ import time
|
|||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
import six
|
import six
|
||||||
from sqlalchemy import exc as sqla_exc
|
from sqlalchemy import exc as sqla_exc
|
||||||
import sqlalchemy.interfaces
|
|
||||||
from sqlalchemy.interfaces import PoolListener
|
from sqlalchemy.interfaces import PoolListener
|
||||||
import sqlalchemy.orm
|
import sqlalchemy.orm
|
||||||
from sqlalchemy.pool import NullPool, StaticPool
|
from sqlalchemy.pool import NullPool, StaticPool
|
||||||
from sqlalchemy.sql.expression import literal_column
|
from sqlalchemy.sql.expression import literal_column
|
||||||
|
|
||||||
from rally.openstack.common.db import exception
|
from rally.openstack.common.db import exception
|
||||||
from rally.openstack.common.gettextutils import _ # noqa
|
from rally.openstack.common.gettextutils import _
|
||||||
from rally.openstack.common import log as logging
|
from rally.openstack.common import log as logging
|
||||||
from rally.openstack.common import timeutils
|
from rally.openstack.common import timeutils
|
||||||
|
|
||||||
@@ -291,7 +319,9 @@ database_opts = [
|
|||||||
deprecated_opts=[cfg.DeprecatedOpt('sql_idle_timeout',
|
deprecated_opts=[cfg.DeprecatedOpt('sql_idle_timeout',
|
||||||
group='DEFAULT'),
|
group='DEFAULT'),
|
||||||
cfg.DeprecatedOpt('sql_idle_timeout',
|
cfg.DeprecatedOpt('sql_idle_timeout',
|
||||||
group='DATABASE')],
|
group='DATABASE'),
|
||||||
|
cfg.DeprecatedOpt('idle_timeout',
|
||||||
|
group='sql')],
|
||||||
help='timeout before idle sql connections are reaped'),
|
help='timeout before idle sql connections are reaped'),
|
||||||
cfg.IntOpt('min_pool_size',
|
cfg.IntOpt('min_pool_size',
|
||||||
default=1,
|
default=1,
|
||||||
@@ -409,8 +439,8 @@ class SqliteForeignKeysListener(PoolListener):
|
|||||||
dbapi_con.execute('pragma foreign_keys=ON')
|
dbapi_con.execute('pragma foreign_keys=ON')
|
||||||
|
|
||||||
|
|
||||||
def get_session(autocommit=True, expire_on_commit=False,
|
def get_session(autocommit=True, expire_on_commit=False, sqlite_fk=False,
|
||||||
sqlite_fk=False, slave_session=False):
|
slave_session=False, mysql_traditional_mode=False):
|
||||||
"""Return a SQLAlchemy session."""
|
"""Return a SQLAlchemy session."""
|
||||||
global _MAKER
|
global _MAKER
|
||||||
global _SLAVE_MAKER
|
global _SLAVE_MAKER
|
||||||
@@ -420,7 +450,8 @@ def get_session(autocommit=True, expire_on_commit=False,
|
|||||||
maker = _SLAVE_MAKER
|
maker = _SLAVE_MAKER
|
||||||
|
|
||||||
if maker is None:
|
if maker is None:
|
||||||
engine = get_engine(sqlite_fk=sqlite_fk, slave_engine=slave_session)
|
engine = get_engine(sqlite_fk=sqlite_fk, slave_engine=slave_session,
|
||||||
|
mysql_traditional_mode=mysql_traditional_mode)
|
||||||
maker = get_maker(engine, autocommit, expire_on_commit)
|
maker = get_maker(engine, autocommit, expire_on_commit)
|
||||||
|
|
||||||
if slave_session:
|
if slave_session:
|
||||||
@@ -439,6 +470,11 @@ def get_session(autocommit=True, expire_on_commit=False,
|
|||||||
# 1 column - (IntegrityError) column c1 is not unique
|
# 1 column - (IntegrityError) column c1 is not unique
|
||||||
# N columns - (IntegrityError) column c1, c2, ..., N are not unique
|
# N columns - (IntegrityError) column c1, c2, ..., N are not unique
|
||||||
#
|
#
|
||||||
|
# sqlite since 3.7.16:
|
||||||
|
# 1 column - (IntegrityError) UNIQUE constraint failed: k1
|
||||||
|
#
|
||||||
|
# N columns - (IntegrityError) UNIQUE constraint failed: k1, k2
|
||||||
|
#
|
||||||
# postgres:
|
# postgres:
|
||||||
# 1 column - (IntegrityError) duplicate key value violates unique
|
# 1 column - (IntegrityError) duplicate key value violates unique
|
||||||
# constraint "users_c1_key"
|
# constraint "users_c1_key"
|
||||||
@@ -451,9 +487,10 @@ def get_session(autocommit=True, expire_on_commit=False,
|
|||||||
# N columns - (IntegrityError) (1062, "Duplicate entry 'values joined
|
# N columns - (IntegrityError) (1062, "Duplicate entry 'values joined
|
||||||
# with -' for key 'name_of_our_constraint'")
|
# with -' for key 'name_of_our_constraint'")
|
||||||
_DUP_KEY_RE_DB = {
|
_DUP_KEY_RE_DB = {
|
||||||
"sqlite": re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"),
|
"sqlite": (re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"),
|
||||||
"postgresql": re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"),
|
re.compile(r"^.*UNIQUE\s+constraint\s+failed:\s+(.+)$")),
|
||||||
"mysql": re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$")
|
"postgresql": (re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"),),
|
||||||
|
"mysql": (re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$"),)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -483,10 +520,14 @@ def _raise_if_duplicate_entry_error(integrity_error, engine_name):
|
|||||||
# SQLAlchemy can differ when using unicode() and accessing .message.
|
# SQLAlchemy can differ when using unicode() and accessing .message.
|
||||||
# An audit across all three supported engines will be necessary to
|
# An audit across all three supported engines will be necessary to
|
||||||
# ensure there are no regressions.
|
# ensure there are no regressions.
|
||||||
m = _DUP_KEY_RE_DB[engine_name].match(integrity_error.message)
|
for pattern in _DUP_KEY_RE_DB[engine_name]:
|
||||||
if not m:
|
match = pattern.match(integrity_error.message)
|
||||||
|
if match:
|
||||||
|
break
|
||||||
|
else:
|
||||||
return
|
return
|
||||||
columns = m.group(1)
|
|
||||||
|
columns = match.group(1)
|
||||||
|
|
||||||
if engine_name == "sqlite":
|
if engine_name == "sqlite":
|
||||||
columns = columns.strip().split(", ")
|
columns = columns.strip().split(", ")
|
||||||
@@ -555,7 +596,8 @@ def _wrap_db_error(f):
|
|||||||
return _wrap
|
return _wrap
|
||||||
|
|
||||||
|
|
||||||
def get_engine(sqlite_fk=False, slave_engine=False):
|
def get_engine(sqlite_fk=False, slave_engine=False,
|
||||||
|
mysql_traditional_mode=False):
|
||||||
"""Return a SQLAlchemy engine."""
|
"""Return a SQLAlchemy engine."""
|
||||||
global _ENGINE
|
global _ENGINE
|
||||||
global _SLAVE_ENGINE
|
global _SLAVE_ENGINE
|
||||||
@@ -567,8 +609,8 @@ def get_engine(sqlite_fk=False, slave_engine=False):
|
|||||||
db_uri = CONF.database.slave_connection
|
db_uri = CONF.database.slave_connection
|
||||||
|
|
||||||
if engine is None:
|
if engine is None:
|
||||||
engine = create_engine(db_uri,
|
engine = create_engine(db_uri, sqlite_fk=sqlite_fk,
|
||||||
sqlite_fk=sqlite_fk)
|
mysql_traditional_mode=mysql_traditional_mode)
|
||||||
if slave_engine:
|
if slave_engine:
|
||||||
_SLAVE_ENGINE = engine
|
_SLAVE_ENGINE = engine
|
||||||
else:
|
else:
|
||||||
@@ -603,34 +645,53 @@ def _thread_yield(dbapi_con, con_record):
|
|||||||
time.sleep(0)
|
time.sleep(0)
|
||||||
|
|
||||||
|
|
||||||
def _ping_listener(dbapi_conn, connection_rec, connection_proxy):
|
def _ping_listener(engine, dbapi_conn, connection_rec, connection_proxy):
|
||||||
"""Ensures that MySQL connections checked out of the pool are alive.
|
"""Ensures that MySQL and DB2 connections are alive.
|
||||||
|
|
||||||
Borrowed from:
|
Borrowed from:
|
||||||
http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f
|
http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f
|
||||||
"""
|
"""
|
||||||
|
cursor = dbapi_conn.cursor()
|
||||||
try:
|
try:
|
||||||
dbapi_conn.cursor().execute('select 1')
|
ping_sql = 'select 1'
|
||||||
except dbapi_conn.OperationalError as ex:
|
if engine.name == 'ibm_db_sa':
|
||||||
if ex.args[0] in (2006, 2013, 2014, 2045, 2055):
|
# DB2 requires a table expression
|
||||||
LOG.warn(_('Got mysql server has gone away: %s'), ex)
|
ping_sql = 'select 1 from (values (1)) AS t1'
|
||||||
raise sqla_exc.DisconnectionError("Database server went away")
|
cursor.execute(ping_sql)
|
||||||
|
except Exception as ex:
|
||||||
|
if engine.dialect.is_disconnect(ex, dbapi_conn, cursor):
|
||||||
|
msg = _('Database server has gone away: %s') % ex
|
||||||
|
LOG.warning(msg)
|
||||||
|
raise sqla_exc.DisconnectionError(msg)
|
||||||
else:
|
else:
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def _set_mode_traditional(dbapi_con, connection_rec, connection_proxy):
|
||||||
|
"""Set engine mode to 'traditional'.
|
||||||
|
|
||||||
|
Required to prevent silent truncates at insert or update operations
|
||||||
|
under MySQL. By default MySQL truncates inserted string if it longer
|
||||||
|
than a declared field just with warning. That is fraught with data
|
||||||
|
corruption.
|
||||||
|
"""
|
||||||
|
dbapi_con.cursor().execute("SET SESSION sql_mode = TRADITIONAL;")
|
||||||
|
|
||||||
|
|
||||||
def _is_db_connection_error(args):
|
def _is_db_connection_error(args):
|
||||||
"""Return True if error in connecting to db."""
|
"""Return True if error in connecting to db."""
|
||||||
# NOTE(adam_g): This is currently MySQL specific and needs to be extended
|
# NOTE(adam_g): This is currently MySQL specific and needs to be extended
|
||||||
# to support Postgres and others.
|
# to support Postgres and others.
|
||||||
conn_err_codes = ('2002', '2003', '2006')
|
# For the db2, the error code is -30081 since the db2 is still not ready
|
||||||
|
conn_err_codes = ('2002', '2003', '2006', '-30081')
|
||||||
for err_code in conn_err_codes:
|
for err_code in conn_err_codes:
|
||||||
if args.find(err_code) != -1:
|
if args.find(err_code) != -1:
|
||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def create_engine(sql_connection, sqlite_fk=False):
|
def create_engine(sql_connection, sqlite_fk=False,
|
||||||
|
mysql_traditional_mode=False):
|
||||||
"""Return a new SQLAlchemy engine."""
|
"""Return a new SQLAlchemy engine."""
|
||||||
# NOTE(geekinutah): At this point we could be connecting to the normal
|
# NOTE(geekinutah): At this point we could be connecting to the normal
|
||||||
# db handle or the slave db handle. Things like
|
# db handle or the slave db handle. Things like
|
||||||
@@ -671,8 +732,16 @@ def create_engine(sql_connection, sqlite_fk=False):
|
|||||||
|
|
||||||
sqlalchemy.event.listen(engine, 'checkin', _thread_yield)
|
sqlalchemy.event.listen(engine, 'checkin', _thread_yield)
|
||||||
|
|
||||||
if 'mysql' in connection_dict.drivername:
|
if engine.name in ['mysql', 'ibm_db_sa']:
|
||||||
sqlalchemy.event.listen(engine, 'checkout', _ping_listener)
|
callback = functools.partial(_ping_listener, engine)
|
||||||
|
sqlalchemy.event.listen(engine, 'checkout', callback)
|
||||||
|
if mysql_traditional_mode:
|
||||||
|
sqlalchemy.event.listen(engine, 'checkout', _set_mode_traditional)
|
||||||
|
else:
|
||||||
|
LOG.warning(_("This application has not enabled MySQL traditional"
|
||||||
|
" mode, which means silent data corruption may"
|
||||||
|
" occur. Please encourage the application"
|
||||||
|
" developers to enable this mode."))
|
||||||
elif 'sqlite' in connection_dict.drivername:
|
elif 'sqlite' in connection_dict.drivername:
|
||||||
if not CONF.sqlite_synchronous:
|
if not CONF.sqlite_synchronous:
|
||||||
sqlalchemy.event.listen(engine, 'connect',
|
sqlalchemy.event.listen(engine, 'connect',
|
||||||
@@ -694,7 +763,7 @@ def create_engine(sql_connection, sqlite_fk=False):
|
|||||||
remaining = 'infinite'
|
remaining = 'infinite'
|
||||||
while True:
|
while True:
|
||||||
msg = _('SQL connection failed. %s attempts left.')
|
msg = _('SQL connection failed. %s attempts left.')
|
||||||
LOG.warn(msg % remaining)
|
LOG.warning(msg % remaining)
|
||||||
if remaining != 'infinite':
|
if remaining != 'infinite':
|
||||||
remaining -= 1
|
remaining -= 1
|
||||||
time.sleep(CONF.database.retry_interval)
|
time.sleep(CONF.database.retry_interval)
|
||||||
@@ -753,25 +822,25 @@ def _patch_mysqldb_with_stacktrace_comments():
|
|||||||
|
|
||||||
def _do_query(self, q):
|
def _do_query(self, q):
|
||||||
stack = ''
|
stack = ''
|
||||||
for file, line, method, function in traceback.extract_stack():
|
for filename, line, method, function in traceback.extract_stack():
|
||||||
# exclude various common things from trace
|
# exclude various common things from trace
|
||||||
if file.endswith('session.py') and method == '_do_query':
|
if filename.endswith('session.py') and method == '_do_query':
|
||||||
continue
|
continue
|
||||||
if file.endswith('api.py') and method == 'wrapper':
|
if filename.endswith('api.py') and method == 'wrapper':
|
||||||
continue
|
continue
|
||||||
if file.endswith('utils.py') and method == '_inner':
|
if filename.endswith('utils.py') and method == '_inner':
|
||||||
continue
|
continue
|
||||||
if file.endswith('exception.py') and method == '_wrap':
|
if filename.endswith('exception.py') and method == '_wrap':
|
||||||
continue
|
continue
|
||||||
# db/api is just a wrapper around db/sqlalchemy/api
|
# db/api is just a wrapper around db/sqlalchemy/api
|
||||||
if file.endswith('db/api.py'):
|
if filename.endswith('db/api.py'):
|
||||||
continue
|
continue
|
||||||
# only trace inside rally
|
# only trace inside rally
|
||||||
index = file.rfind('rally')
|
index = filename.rfind('rally')
|
||||||
if index == -1:
|
if index == -1:
|
||||||
continue
|
continue
|
||||||
stack += "File:%s:%s Method:%s() Line:%s | " \
|
stack += "File:%s:%s Method:%s() Line:%s | " \
|
||||||
% (file[index:], line, method, function)
|
% (filename[index:], line, method, function)
|
||||||
|
|
||||||
# strip trailing " | " from stack
|
# strip trailing " | " from stack
|
||||||
if stack:
|
if stack:
|
||||||
|
@@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2010-2011 OpenStack Foundation
|
# Copyright 2010-2011 OpenStack Foundation
|
||||||
# Copyright 2012-2013 IBM Corp.
|
# Copyright 2012-2013 IBM Corp.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
@@ -16,17 +14,18 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
import functools
|
||||||
import commands
|
|
||||||
import ConfigParser
|
|
||||||
import os
|
import os
|
||||||
import urlparse
|
import subprocess
|
||||||
|
|
||||||
|
import lockfile
|
||||||
|
from six import moves
|
||||||
import sqlalchemy
|
import sqlalchemy
|
||||||
import sqlalchemy.exc
|
import sqlalchemy.exc
|
||||||
|
|
||||||
from rally.openstack.common import lockutils
|
from rally.openstack.common.gettextutils import _
|
||||||
from rally.openstack.common import log as logging
|
from rally.openstack.common import log as logging
|
||||||
|
from rally.openstack.common.py3kcompat import urlutils
|
||||||
from rally.openstack.common import test
|
from rally.openstack.common import test
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@@ -93,6 +92,22 @@ def get_db_connection_info(conn_pieces):
|
|||||||
return (user, password, database, host)
|
return (user, password, database, host)
|
||||||
|
|
||||||
|
|
||||||
|
def _set_db_lock(lock_path=None, lock_prefix=None):
|
||||||
|
def decorator(f):
|
||||||
|
@functools.wraps(f)
|
||||||
|
def wrapper(*args, **kwargs):
|
||||||
|
try:
|
||||||
|
path = lock_path or os.environ.get("RALLY_LOCK_PATH")
|
||||||
|
lock = lockfile.FileLock(os.path.join(path, lock_prefix))
|
||||||
|
with lock:
|
||||||
|
LOG.debug(_('Got lock "%s"') % f.__name__)
|
||||||
|
return f(*args, **kwargs)
|
||||||
|
finally:
|
||||||
|
LOG.debug(_('Lock released "%s"') % f.__name__)
|
||||||
|
return wrapper
|
||||||
|
return decorator
|
||||||
|
|
||||||
|
|
||||||
class BaseMigrationTestCase(test.BaseTestCase):
|
class BaseMigrationTestCase(test.BaseTestCase):
|
||||||
"""Base class fort testing of migration utils."""
|
"""Base class fort testing of migration utils."""
|
||||||
|
|
||||||
@@ -115,13 +130,13 @@ class BaseMigrationTestCase(test.BaseTestCase):
|
|||||||
# once. No need to re-run this on each test...
|
# once. No need to re-run this on each test...
|
||||||
LOG.debug('config_path is %s' % self.CONFIG_FILE_PATH)
|
LOG.debug('config_path is %s' % self.CONFIG_FILE_PATH)
|
||||||
if os.path.exists(self.CONFIG_FILE_PATH):
|
if os.path.exists(self.CONFIG_FILE_PATH):
|
||||||
cp = ConfigParser.RawConfigParser()
|
cp = moves.configparser.RawConfigParser()
|
||||||
try:
|
try:
|
||||||
cp.read(self.CONFIG_FILE_PATH)
|
cp.read(self.CONFIG_FILE_PATH)
|
||||||
defaults = cp.defaults()
|
defaults = cp.defaults()
|
||||||
for key, value in defaults.items():
|
for key, value in defaults.items():
|
||||||
self.test_databases[key] = value
|
self.test_databases[key] = value
|
||||||
except ConfigParser.ParsingError as e:
|
except moves.configparser.ParsingError as e:
|
||||||
self.fail("Failed to read test_migrations.conf config "
|
self.fail("Failed to read test_migrations.conf config "
|
||||||
"file. Got error: %s" % e)
|
"file. Got error: %s" % e)
|
||||||
else:
|
else:
|
||||||
@@ -143,12 +158,13 @@ class BaseMigrationTestCase(test.BaseTestCase):
|
|||||||
super(BaseMigrationTestCase, self).tearDown()
|
super(BaseMigrationTestCase, self).tearDown()
|
||||||
|
|
||||||
def execute_cmd(self, cmd=None):
|
def execute_cmd(self, cmd=None):
|
||||||
status, output = commands.getstatusoutput(cmd)
|
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.STDOUT)
|
||||||
|
output = process.communicate()[0]
|
||||||
LOG.debug(output)
|
LOG.debug(output)
|
||||||
self.assertEqual(0, status,
|
self.assertEqual(0, process.returncode,
|
||||||
"Failed to run: %s\n%s" % (cmd, output))
|
"Failed to run: %s\n%s" % (cmd, output))
|
||||||
|
|
||||||
@lockutils.synchronized('pgadmin', 'tests-', external=True)
|
|
||||||
def _reset_pg(self, conn_pieces):
|
def _reset_pg(self, conn_pieces):
|
||||||
(user, password, database, host) = get_db_connection_info(conn_pieces)
|
(user, password, database, host) = get_db_connection_info(conn_pieces)
|
||||||
os.environ['PGPASSWORD'] = password
|
os.environ['PGPASSWORD'] = password
|
||||||
@@ -170,10 +186,11 @@ class BaseMigrationTestCase(test.BaseTestCase):
|
|||||||
os.unsetenv('PGPASSWORD')
|
os.unsetenv('PGPASSWORD')
|
||||||
os.unsetenv('PGUSER')
|
os.unsetenv('PGUSER')
|
||||||
|
|
||||||
|
@_set_db_lock(lock_prefix='migration_tests-')
|
||||||
def _reset_databases(self):
|
def _reset_databases(self):
|
||||||
for key, engine in self.engines.items():
|
for key, engine in self.engines.items():
|
||||||
conn_string = self.test_databases[key]
|
conn_string = self.test_databases[key]
|
||||||
conn_pieces = urlparse.urlparse(conn_string)
|
conn_pieces = urlutils.urlparse(conn_string)
|
||||||
engine.dispose()
|
engine.dispose()
|
||||||
if conn_string.startswith('sqlite'):
|
if conn_string.startswith('sqlite'):
|
||||||
# We can just delete the SQLite database, which is
|
# We can just delete the SQLite database, which is
|
||||||
|
@@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
# Copyright 2010 United States Government as represented by the
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
# Copyright 2010-2011 OpenStack Foundation.
|
# Copyright 2010-2011 OpenStack Foundation.
|
||||||
@@ -38,7 +36,7 @@ from sqlalchemy import String
|
|||||||
from sqlalchemy import Table
|
from sqlalchemy import Table
|
||||||
from sqlalchemy.types import NullType
|
from sqlalchemy.types import NullType
|
||||||
|
|
||||||
from rally.openstack.common.gettextutils import _ # noqa
|
from rally.openstack.common.gettextutils import _
|
||||||
|
|
||||||
from rally.openstack.common import log as logging
|
from rally.openstack.common import log as logging
|
||||||
from rally.openstack.common import timeutils
|
from rally.openstack.common import timeutils
|
||||||
@@ -96,7 +94,7 @@ def paginate_query(query, model, limit, sort_keys, marker=None,
|
|||||||
if 'id' not in sort_keys:
|
if 'id' not in sort_keys:
|
||||||
# TODO(justinsb): If this ever gives a false-positive, check
|
# TODO(justinsb): If this ever gives a false-positive, check
|
||||||
# the actual primary key, rather than assuming its id
|
# the actual primary key, rather than assuming its id
|
||||||
LOG.warn(_('Id not in sort_keys; is sort_keys unique?'))
|
LOG.warning(_('Id not in sort_keys; is sort_keys unique?'))
|
||||||
|
|
||||||
assert(not (sort_dir and sort_dirs))
|
assert(not (sort_dir and sort_dirs))
|
||||||
|
|
||||||
@@ -135,9 +133,9 @@ def paginate_query(query, model, limit, sort_keys, marker=None,
|
|||||||
|
|
||||||
# Build up an array of sort criteria as in the docstring
|
# Build up an array of sort criteria as in the docstring
|
||||||
criteria_list = []
|
criteria_list = []
|
||||||
for i in range(0, len(sort_keys)):
|
for i in range(len(sort_keys)):
|
||||||
crit_attrs = []
|
crit_attrs = []
|
||||||
for j in range(0, i):
|
for j in range(i):
|
||||||
model_attr = getattr(model, sort_keys[j])
|
model_attr = getattr(model, sort_keys[j])
|
||||||
crit_attrs.append((model_attr == marker_values[j]))
|
crit_attrs.append((model_attr == marker_values[j]))
|
||||||
|
|
||||||
|
@@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2011 OpenStack Foundation.
|
# Copyright 2011 OpenStack Foundation.
|
||||||
# Copyright 2012, Red Hat, Inc.
|
# Copyright 2012, Red Hat, Inc.
|
||||||
#
|
#
|
||||||
@@ -26,7 +24,7 @@ import traceback
|
|||||||
|
|
||||||
import six
|
import six
|
||||||
|
|
||||||
from rally.openstack.common.gettextutils import _ # noqa
|
from rally.openstack.common.gettextutils import _
|
||||||
|
|
||||||
|
|
||||||
class save_and_reraise_exception(object):
|
class save_and_reraise_exception(object):
|
||||||
@@ -44,13 +42,13 @@ class save_and_reraise_exception(object):
|
|||||||
|
|
||||||
In some cases the caller may not want to re-raise the exception, and
|
In some cases the caller may not want to re-raise the exception, and
|
||||||
for those circumstances this context provides a reraise flag that
|
for those circumstances this context provides a reraise flag that
|
||||||
can be used to suppress the exception. For example:
|
can be used to suppress the exception. For example::
|
||||||
|
|
||||||
except Exception:
|
except Exception:
|
||||||
with save_and_reraise_exception() as ctxt:
|
with save_and_reraise_exception() as ctxt:
|
||||||
decide_if_need_reraise()
|
decide_if_need_reraise()
|
||||||
if not should_be_reraised:
|
if not should_be_reraised:
|
||||||
ctxt.reraise = False
|
ctxt.reraise = False
|
||||||
"""
|
"""
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.reraise = True
|
self.reraise = True
|
||||||
|
@@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2011 OpenStack Foundation.
|
# Copyright 2011 OpenStack Foundation.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
@@ -22,7 +20,7 @@ import os
|
|||||||
import tempfile
|
import tempfile
|
||||||
|
|
||||||
from rally.openstack.common import excutils
|
from rally.openstack.common import excutils
|
||||||
from rally.openstack.common.gettextutils import _ # noqa
|
from rally.openstack.common.gettextutils import _
|
||||||
from rally.openstack.common import log as logging
|
from rally.openstack.common import log as logging
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
@@ -1,4 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
#
|
#
|
||||||
# Copyright 2013 Mirantis, Inc.
|
# Copyright 2013 Mirantis, Inc.
|
||||||
# Copyright 2013 OpenStack Foundation
|
# Copyright 2013 OpenStack Foundation
|
||||||
@@ -30,7 +29,7 @@ class Config(fixtures.Fixture):
|
|||||||
the specified configuration option group.
|
the specified configuration option group.
|
||||||
|
|
||||||
All overrides are automatically cleared at the end of the current
|
All overrides are automatically cleared at the end of the current
|
||||||
test by the reset() method, which is registred by addCleanup().
|
test by the reset() method, which is registered by addCleanup().
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, conf=cfg.CONF):
|
def __init__(self, conf=cfg.CONF):
|
||||||
|
51
rally/openstack/common/fixture/lockutils.py
Normal file
51
rally/openstack/common/fixture/lockutils.py
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
# Copyright 2011 OpenStack Foundation.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import fixtures
|
||||||
|
|
||||||
|
from rally.openstack.common import lockutils
|
||||||
|
|
||||||
|
|
||||||
|
class LockFixture(fixtures.Fixture):
|
||||||
|
"""External locking fixture.
|
||||||
|
|
||||||
|
This fixture is basically an alternative to the synchronized decorator with
|
||||||
|
the external flag so that tearDowns and addCleanups will be included in
|
||||||
|
the lock context for locking between tests. The fixture is recommended to
|
||||||
|
be the first line in a test method, like so::
|
||||||
|
|
||||||
|
def test_method(self):
|
||||||
|
self.useFixture(LockFixture)
|
||||||
|
...
|
||||||
|
|
||||||
|
or the first line in setUp if all the test methods in the class are
|
||||||
|
required to be serialized. Something like::
|
||||||
|
|
||||||
|
class TestCase(testtools.testcase):
|
||||||
|
def setUp(self):
|
||||||
|
self.useFixture(LockFixture)
|
||||||
|
super(TestCase, self).setUp()
|
||||||
|
...
|
||||||
|
|
||||||
|
This is because addCleanups are put on a LIFO queue that gets run after the
|
||||||
|
test method exits. (either by completing or raising an exception)
|
||||||
|
"""
|
||||||
|
def __init__(self, name, lock_file_prefix=None):
|
||||||
|
self.mgr = lockutils.lock(name, lock_file_prefix, True)
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super(LockFixture, self).setUp()
|
||||||
|
self.addCleanup(self.mgr.__exit__, None, None, None)
|
||||||
|
self.mgr.__enter__()
|
@@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
# Copyright 2010 United States Government as represented by the
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
# Copyright 2013 Hewlett-Packard Development Company, L.P.
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
|
||||||
|
@@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
# Copyright 2010 United States Government as represented by the
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
# Copyright 2013 Hewlett-Packard Development Company, L.P.
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
|
||||||
@@ -19,7 +17,6 @@
|
|||||||
|
|
||||||
import fixtures
|
import fixtures
|
||||||
import mox
|
import mox
|
||||||
import stubout
|
|
||||||
|
|
||||||
|
|
||||||
class MoxStubout(fixtures.Fixture):
|
class MoxStubout(fixtures.Fixture):
|
||||||
@@ -30,8 +27,6 @@ class MoxStubout(fixtures.Fixture):
|
|||||||
# emulate some of the mox stuff, we can't use the metaclass
|
# emulate some of the mox stuff, we can't use the metaclass
|
||||||
# because it screws with our generators
|
# because it screws with our generators
|
||||||
self.mox = mox.Mox()
|
self.mox = mox.Mox()
|
||||||
self.stubs = stubout.StubOutForTesting()
|
self.stubs = self.mox.stubs
|
||||||
self.addCleanup(self.mox.UnsetStubs)
|
self.addCleanup(self.mox.UnsetStubs)
|
||||||
self.addCleanup(self.stubs.UnsetAll)
|
|
||||||
self.addCleanup(self.stubs.SmartUnsetAll)
|
|
||||||
self.addCleanup(self.mox.VerifyAll)
|
self.addCleanup(self.mox.VerifyAll)
|
||||||
|
@@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2012 Red Hat, Inc.
|
# Copyright 2012 Red Hat, Inc.
|
||||||
# Copyright 2013 IBM Corp.
|
# Copyright 2013 IBM Corp.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
@@ -317,7 +315,7 @@ def get_available_languages(domain):
|
|||||||
# NOTE(luisg): Babel <1.0 used a function called list(), which was
|
# NOTE(luisg): Babel <1.0 used a function called list(), which was
|
||||||
# renamed to locale_identifiers() in >=1.0, the requirements master list
|
# renamed to locale_identifiers() in >=1.0, the requirements master list
|
||||||
# requires >=0.9.6, uncapped, so defensively work with both. We can remove
|
# requires >=0.9.6, uncapped, so defensively work with both. We can remove
|
||||||
# this check when the master list updates to >=1.0, and all projects udpate
|
# this check when the master list updates to >=1.0, and update all projects
|
||||||
list_identifiers = (getattr(localedata, 'list', None) or
|
list_identifiers = (getattr(localedata, 'list', None) or
|
||||||
getattr(localedata, 'locale_identifiers'))
|
getattr(localedata, 'locale_identifiers'))
|
||||||
locale_identifiers = list_identifiers()
|
locale_identifiers = list_identifiers()
|
||||||
@@ -329,13 +327,21 @@ def get_available_languages(domain):
|
|||||||
|
|
||||||
|
|
||||||
def get_localized_message(message, user_locale):
|
def get_localized_message(message, user_locale):
|
||||||
"""Gets a localized version of the given message in the given locale."""
|
"""Gets a localized version of the given message in the given locale.
|
||||||
|
|
||||||
|
If the message is not a Message object the message is returned as-is.
|
||||||
|
If the locale is None the message is translated to the default locale.
|
||||||
|
|
||||||
|
:returns: the translated message in unicode, or the original message if
|
||||||
|
it could not be translated
|
||||||
|
"""
|
||||||
|
translated = message
|
||||||
if isinstance(message, Message):
|
if isinstance(message, Message):
|
||||||
if user_locale:
|
original_locale = message.locale
|
||||||
message.locale = user_locale
|
message.locale = user_locale
|
||||||
return six.text_type(message)
|
translated = six.text_type(message)
|
||||||
else:
|
message.locale = original_locale
|
||||||
return message
|
return translated
|
||||||
|
|
||||||
|
|
||||||
class LocaleHandler(logging.Handler):
|
class LocaleHandler(logging.Handler):
|
||||||
|
@@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2011 OpenStack Foundation.
|
# Copyright 2011 OpenStack Foundation.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
|
@@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
# Copyright 2010 United States Government as represented by the
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
# Copyright 2011 Justin Santa Barbara
|
# Copyright 2011 Justin Santa Barbara
|
||||||
@@ -41,8 +39,12 @@ import json
|
|||||||
try:
|
try:
|
||||||
import xmlrpclib
|
import xmlrpclib
|
||||||
except ImportError:
|
except ImportError:
|
||||||
# NOTE(jd): xmlrpclib is not shipped with Python 3
|
# NOTE(jaypipes): xmlrpclib was renamed to xmlrpc.client in Python3
|
||||||
xmlrpclib = None
|
# however the function and object call signatures
|
||||||
|
# remained the same. This whole try/except block should
|
||||||
|
# be removed and replaced with a call to six.moves once
|
||||||
|
# six 1.4.2 is released. See http://bit.ly/1bqrVzu
|
||||||
|
import xmlrpc.client as xmlrpclib
|
||||||
|
|
||||||
import six
|
import six
|
||||||
|
|
||||||
@@ -124,14 +126,14 @@ def to_primitive(value, convert_instances=False, convert_datetime=True,
|
|||||||
level=level,
|
level=level,
|
||||||
max_depth=max_depth)
|
max_depth=max_depth)
|
||||||
if isinstance(value, dict):
|
if isinstance(value, dict):
|
||||||
return dict((k, recursive(v)) for k, v in value.iteritems())
|
return dict((k, recursive(v)) for k, v in six.iteritems(value))
|
||||||
elif isinstance(value, (list, tuple)):
|
elif isinstance(value, (list, tuple)):
|
||||||
return [recursive(lv) for lv in value]
|
return [recursive(lv) for lv in value]
|
||||||
|
|
||||||
# It's not clear why xmlrpclib created their own DateTime type, but
|
# It's not clear why xmlrpclib created their own DateTime type, but
|
||||||
# for our purposes, make it a datetime type which is explicitly
|
# for our purposes, make it a datetime type which is explicitly
|
||||||
# handled
|
# handled
|
||||||
if xmlrpclib and isinstance(value, xmlrpclib.DateTime):
|
if isinstance(value, xmlrpclib.DateTime):
|
||||||
value = datetime.datetime(*tuple(value.timetuple())[:6])
|
value = datetime.datetime(*tuple(value.timetuple())[:6])
|
||||||
|
|
||||||
if convert_datetime and isinstance(value, datetime.datetime):
|
if convert_datetime and isinstance(value, datetime.datetime):
|
||||||
|
@@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2011 OpenStack Foundation.
|
# Copyright 2011 OpenStack Foundation.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
|
@@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2011 OpenStack Foundation.
|
# Copyright 2011 OpenStack Foundation.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
@@ -20,15 +18,18 @@ import contextlib
|
|||||||
import errno
|
import errno
|
||||||
import functools
|
import functools
|
||||||
import os
|
import os
|
||||||
|
import shutil
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
import tempfile
|
||||||
import threading
|
import threading
|
||||||
import time
|
import time
|
||||||
import weakref
|
import weakref
|
||||||
|
|
||||||
import fixtures
|
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
from rally.openstack.common import fileutils
|
from rally.openstack.common import fileutils
|
||||||
from rally.openstack.common.gettextutils import _ # noqa
|
from rally.openstack.common.gettextutils import _
|
||||||
from rally.openstack.common import local
|
from rally.openstack.common import local
|
||||||
from rally.openstack.common import log as logging
|
from rally.openstack.common import log as logging
|
||||||
|
|
||||||
@@ -40,6 +41,7 @@ util_opts = [
|
|||||||
cfg.BoolOpt('disable_process_locking', default=False,
|
cfg.BoolOpt('disable_process_locking', default=False,
|
||||||
help='Whether to disable inter-process locks'),
|
help='Whether to disable inter-process locks'),
|
||||||
cfg.StrOpt('lock_path',
|
cfg.StrOpt('lock_path',
|
||||||
|
default=os.environ.get("RALLY_LOCK_PATH"),
|
||||||
help=('Directory to use for lock files.'))
|
help=('Directory to use for lock files.'))
|
||||||
]
|
]
|
||||||
|
|
||||||
@@ -132,6 +134,7 @@ else:
|
|||||||
InterProcessLock = _PosixLock
|
InterProcessLock = _PosixLock
|
||||||
|
|
||||||
_semaphores = weakref.WeakValueDictionary()
|
_semaphores = weakref.WeakValueDictionary()
|
||||||
|
_semaphores_lock = threading.Lock()
|
||||||
|
|
||||||
|
|
||||||
@contextlib.contextmanager
|
@contextlib.contextmanager
|
||||||
@@ -143,26 +146,23 @@ def lock(name, lock_file_prefix=None, external=False, lock_path=None):
|
|||||||
True, in which case, it'll yield an InterProcessLock instance.
|
True, in which case, it'll yield an InterProcessLock instance.
|
||||||
|
|
||||||
:param lock_file_prefix: The lock_file_prefix argument is used to provide
|
:param lock_file_prefix: The lock_file_prefix argument is used to provide
|
||||||
lock files on disk with a meaningful prefix.
|
lock files on disk with a meaningful prefix.
|
||||||
|
|
||||||
:param external: The external keyword argument denotes whether this lock
|
:param external: The external keyword argument denotes whether this lock
|
||||||
should work across multiple processes. This means that if two different
|
should work across multiple processes. This means that if two different
|
||||||
workers both run a a method decorated with @synchronized('mylock',
|
workers both run a a method decorated with @synchronized('mylock',
|
||||||
external=True), only one of them will execute at a time.
|
external=True), only one of them will execute at a time.
|
||||||
|
|
||||||
:param lock_path: The lock_path keyword argument is used to specify a
|
:param lock_path: The lock_path keyword argument is used to specify a
|
||||||
special location for external lock files to live. If nothing is set, then
|
special location for external lock files to live. If nothing is set, then
|
||||||
CONF.lock_path is used as a default.
|
CONF.lock_path is used as a default.
|
||||||
"""
|
"""
|
||||||
# NOTE(soren): If we ever go natively threaded, this will be racy.
|
with _semaphores_lock:
|
||||||
# See http://stackoverflow.com/questions/5390569/dyn
|
try:
|
||||||
# amically-allocating-and-destroying-mutexes
|
sem = _semaphores[name]
|
||||||
sem = _semaphores.get(name, threading.Semaphore())
|
except KeyError:
|
||||||
if name not in _semaphores:
|
sem = threading.Semaphore()
|
||||||
# this check is not racy - we're already holding ref locally
|
_semaphores[name] = sem
|
||||||
# so GC won't remove the item and there was no IO switch
|
|
||||||
# (only valid in greenthreads)
|
|
||||||
_semaphores[name] = sem
|
|
||||||
|
|
||||||
with sem:
|
with sem:
|
||||||
LOG.debug(_('Got semaphore "%(lock)s"'), {'lock': name})
|
LOG.debug(_('Got semaphore "%(lock)s"'), {'lock': name})
|
||||||
@@ -279,34 +279,25 @@ def synchronized_with_prefix(lock_file_prefix):
|
|||||||
return functools.partial(synchronized, lock_file_prefix=lock_file_prefix)
|
return functools.partial(synchronized, lock_file_prefix=lock_file_prefix)
|
||||||
|
|
||||||
|
|
||||||
class LockFixture(fixtures.Fixture):
|
def main(argv):
|
||||||
"""External locking fixture.
|
"""Create a dir for locks and pass it to command from arguments
|
||||||
|
|
||||||
This fixture is basically an alternative to the synchronized decorator with
|
If you run this:
|
||||||
the external flag so that tearDowns and addCleanups will be included in
|
python -m openstack.common.lockutils python setup.py testr <etc>
|
||||||
the lock context for locking between tests. The fixture is recommended to
|
|
||||||
be the first line in a test method, like so::
|
|
||||||
|
|
||||||
def test_method(self):
|
a temporary directory will be created for all your locks and passed to all
|
||||||
self.useFixture(LockFixture)
|
your tests in an environment variable. The temporary dir will be deleted
|
||||||
...
|
afterwards and the return value will be preserved.
|
||||||
|
|
||||||
or the first line in setUp if all the test methods in the class are
|
|
||||||
required to be serialized. Something like::
|
|
||||||
|
|
||||||
class TestCase(testtools.testcase):
|
|
||||||
def setUp(self):
|
|
||||||
self.useFixture(LockFixture)
|
|
||||||
super(TestCase, self).setUp()
|
|
||||||
...
|
|
||||||
|
|
||||||
This is because addCleanups are put on a LIFO queue that gets run after the
|
|
||||||
test method exits. (either by completing or raising an exception)
|
|
||||||
"""
|
"""
|
||||||
def __init__(self, name, lock_file_prefix=None):
|
|
||||||
self.mgr = lock(name, lock_file_prefix, True)
|
|
||||||
|
|
||||||
def setUp(self):
|
lock_dir = tempfile.mkdtemp()
|
||||||
super(LockFixture, self).setUp()
|
os.environ["RALLY_LOCK_PATH"] = lock_dir
|
||||||
self.addCleanup(self.mgr.__exit__, None, None, None)
|
try:
|
||||||
self.mgr.__enter__()
|
ret_val = subprocess.call(argv[1:])
|
||||||
|
finally:
|
||||||
|
shutil.rmtree(lock_dir, ignore_errors=True)
|
||||||
|
return ret_val
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
sys.exit(main(sys.argv))
|
||||||
|
@@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2011 OpenStack Foundation.
|
# Copyright 2011 OpenStack Foundation.
|
||||||
# Copyright 2010 United States Government as represented by the
|
# Copyright 2010 United States Government as represented by the
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
@@ -35,6 +33,7 @@ import logging
|
|||||||
import logging.config
|
import logging.config
|
||||||
import logging.handlers
|
import logging.handlers
|
||||||
import os
|
import os
|
||||||
|
import re
|
||||||
import sys
|
import sys
|
||||||
import traceback
|
import traceback
|
||||||
|
|
||||||
@@ -42,7 +41,7 @@ from oslo.config import cfg
|
|||||||
import six
|
import six
|
||||||
from six import moves
|
from six import moves
|
||||||
|
|
||||||
from rally.openstack.common.gettextutils import _ # noqa
|
from rally.openstack.common.gettextutils import _
|
||||||
from rally.openstack.common import importutils
|
from rally.openstack.common import importutils
|
||||||
from rally.openstack.common import jsonutils
|
from rally.openstack.common import jsonutils
|
||||||
from rally.openstack.common import local
|
from rally.openstack.common import local
|
||||||
@@ -50,6 +49,24 @@ from rally.openstack.common import local
|
|||||||
|
|
||||||
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
|
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
|
||||||
|
|
||||||
|
_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password']
|
||||||
|
|
||||||
|
# NOTE(ldbragst): Let's build a list of regex objects using the list of
|
||||||
|
# _SANITIZE_KEYS we already have. This way, we only have to add the new key
|
||||||
|
# to the list of _SANITIZE_KEYS and we can generate regular expressions
|
||||||
|
# for XML and JSON automatically.
|
||||||
|
_SANITIZE_PATTERNS = []
|
||||||
|
_FORMAT_PATTERNS = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])',
|
||||||
|
r'(<%(key)s>).*?(</%(key)s>)',
|
||||||
|
r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])',
|
||||||
|
r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])']
|
||||||
|
|
||||||
|
for key in _SANITIZE_KEYS:
|
||||||
|
for pattern in _FORMAT_PATTERNS:
|
||||||
|
reg_ex = re.compile(pattern % {'key': key}, re.DOTALL)
|
||||||
|
_SANITIZE_PATTERNS.append(reg_ex)
|
||||||
|
|
||||||
|
|
||||||
common_cli_opts = [
|
common_cli_opts = [
|
||||||
cfg.BoolOpt('debug',
|
cfg.BoolOpt('debug',
|
||||||
short='d',
|
short='d',
|
||||||
@@ -64,11 +81,13 @@ common_cli_opts = [
|
|||||||
]
|
]
|
||||||
|
|
||||||
logging_cli_opts = [
|
logging_cli_opts = [
|
||||||
cfg.StrOpt('log-config',
|
cfg.StrOpt('log-config-append',
|
||||||
metavar='PATH',
|
metavar='PATH',
|
||||||
help='If this option is specified, the logging configuration '
|
deprecated_name='log-config',
|
||||||
'file specified is used and overrides any other logging '
|
help='The name of logging configuration file. It does not '
|
||||||
'options specified. Please see the Python logging module '
|
'disable existing loggers, but just appends specified '
|
||||||
|
'logging configuration to any other existing logging '
|
||||||
|
'options. Please see the Python logging module '
|
||||||
'documentation for details on logging configuration '
|
'documentation for details on logging configuration '
|
||||||
'files.'),
|
'files.'),
|
||||||
cfg.StrOpt('log-format',
|
cfg.StrOpt('log-format',
|
||||||
@@ -111,7 +130,7 @@ generic_log_opts = [
|
|||||||
log_opts = [
|
log_opts = [
|
||||||
cfg.StrOpt('logging_context_format_string',
|
cfg.StrOpt('logging_context_format_string',
|
||||||
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
|
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
|
||||||
'%(name)s [%(request_id)s %(user)s %(tenant)s] '
|
'%(name)s [%(request_id)s %(user_identity)s] '
|
||||||
'%(instance)s%(message)s',
|
'%(instance)s%(message)s',
|
||||||
help='format string to use for log messages with context'),
|
help='format string to use for log messages with context'),
|
||||||
cfg.StrOpt('logging_default_format_string',
|
cfg.StrOpt('logging_default_format_string',
|
||||||
@@ -127,12 +146,13 @@ log_opts = [
|
|||||||
help='prefix each line of exception output with this format'),
|
help='prefix each line of exception output with this format'),
|
||||||
cfg.ListOpt('default_log_levels',
|
cfg.ListOpt('default_log_levels',
|
||||||
default=[
|
default=[
|
||||||
|
'amqp=WARN',
|
||||||
'amqplib=WARN',
|
'amqplib=WARN',
|
||||||
'sqlalchemy=WARN',
|
|
||||||
'boto=WARN',
|
'boto=WARN',
|
||||||
|
'qpid=WARN',
|
||||||
|
'sqlalchemy=WARN',
|
||||||
'suds=INFO',
|
'suds=INFO',
|
||||||
'keystone=INFO',
|
'iso8601=WARN',
|
||||||
'eventlet.wsgi.server=WARN'
|
|
||||||
],
|
],
|
||||||
help='list of logger=LEVEL pairs'),
|
help='list of logger=LEVEL pairs'),
|
||||||
cfg.BoolOpt('publish_errors',
|
cfg.BoolOpt('publish_errors',
|
||||||
@@ -211,6 +231,40 @@ def _get_log_file_path(binary=None):
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def mask_password(message, secret="***"):
|
||||||
|
"""Replace password with 'secret' in message.
|
||||||
|
|
||||||
|
:param message: The string which includes security information.
|
||||||
|
:param secret: value with which to replace passwords.
|
||||||
|
:returns: The unicode value of message with the password fields masked.
|
||||||
|
|
||||||
|
For example:
|
||||||
|
|
||||||
|
>>> mask_password("'adminPass' : 'aaaaa'")
|
||||||
|
"'adminPass' : '***'"
|
||||||
|
>>> mask_password("'admin_pass' : 'aaaaa'")
|
||||||
|
"'admin_pass' : '***'"
|
||||||
|
>>> mask_password('"password" : "aaaaa"')
|
||||||
|
'"password" : "***"'
|
||||||
|
>>> mask_password("'original_password' : 'aaaaa'")
|
||||||
|
"'original_password' : '***'"
|
||||||
|
>>> mask_password("u'original_password' : u'aaaaa'")
|
||||||
|
"u'original_password' : u'***'"
|
||||||
|
"""
|
||||||
|
message = six.text_type(message)
|
||||||
|
|
||||||
|
# NOTE(ldbragst): Check to see if anything in message contains any key
|
||||||
|
# specified in _SANITIZE_KEYS, if not then just return the message since
|
||||||
|
# we don't have to mask any passwords.
|
||||||
|
if not any(key in message for key in _SANITIZE_KEYS):
|
||||||
|
return message
|
||||||
|
|
||||||
|
secret = r'\g<1>' + secret + r'\g<2>'
|
||||||
|
for pattern in _SANITIZE_PATTERNS:
|
||||||
|
message = re.sub(pattern, secret, message)
|
||||||
|
return message
|
||||||
|
|
||||||
|
|
||||||
class BaseLoggerAdapter(logging.LoggerAdapter):
|
class BaseLoggerAdapter(logging.LoggerAdapter):
|
||||||
|
|
||||||
def audit(self, msg, *args, **kwargs):
|
def audit(self, msg, *args, **kwargs):
|
||||||
@@ -278,10 +332,12 @@ class ContextAdapter(BaseLoggerAdapter):
|
|||||||
elif instance_uuid:
|
elif instance_uuid:
|
||||||
instance_extra = (CONF.instance_uuid_format
|
instance_extra = (CONF.instance_uuid_format
|
||||||
% {'uuid': instance_uuid})
|
% {'uuid': instance_uuid})
|
||||||
extra.update({'instance': instance_extra})
|
extra['instance'] = instance_extra
|
||||||
|
|
||||||
extra.update({"project": self.project})
|
extra.setdefault('user_identity', kwargs.pop('user_identity', None))
|
||||||
extra.update({"version": self.version})
|
|
||||||
|
extra['project'] = self.project
|
||||||
|
extra['version'] = self.version
|
||||||
extra['extra'] = extra.copy()
|
extra['extra'] = extra.copy()
|
||||||
return msg, kwargs
|
return msg, kwargs
|
||||||
|
|
||||||
@@ -295,7 +351,7 @@ class JSONFormatter(logging.Formatter):
|
|||||||
def formatException(self, ei, strip_newlines=True):
|
def formatException(self, ei, strip_newlines=True):
|
||||||
lines = traceback.format_exception(*ei)
|
lines = traceback.format_exception(*ei)
|
||||||
if strip_newlines:
|
if strip_newlines:
|
||||||
lines = [itertools.ifilter(
|
lines = [moves.filter(
|
||||||
lambda x: x,
|
lambda x: x,
|
||||||
line.rstrip().splitlines()) for line in lines]
|
line.rstrip().splitlines()) for line in lines]
|
||||||
lines = list(itertools.chain(*lines))
|
lines = list(itertools.chain(*lines))
|
||||||
@@ -333,10 +389,10 @@ class JSONFormatter(logging.Formatter):
|
|||||||
|
|
||||||
|
|
||||||
def _create_logging_excepthook(product_name):
|
def _create_logging_excepthook(product_name):
|
||||||
def logging_excepthook(type, value, tb):
|
def logging_excepthook(exc_type, value, tb):
|
||||||
extra = {}
|
extra = {}
|
||||||
if CONF.verbose:
|
if CONF.verbose:
|
||||||
extra['exc_info'] = (type, value, tb)
|
extra['exc_info'] = (exc_type, value, tb)
|
||||||
getLogger(product_name).critical(str(value), **extra)
|
getLogger(product_name).critical(str(value), **extra)
|
||||||
return logging_excepthook
|
return logging_excepthook
|
||||||
|
|
||||||
@@ -354,17 +410,18 @@ class LogConfigError(Exception):
|
|||||||
err_msg=self.err_msg)
|
err_msg=self.err_msg)
|
||||||
|
|
||||||
|
|
||||||
def _load_log_config(log_config):
|
def _load_log_config(log_config_append):
|
||||||
try:
|
try:
|
||||||
logging.config.fileConfig(log_config)
|
logging.config.fileConfig(log_config_append,
|
||||||
|
disable_existing_loggers=False)
|
||||||
except moves.configparser.Error as exc:
|
except moves.configparser.Error as exc:
|
||||||
raise LogConfigError(log_config, str(exc))
|
raise LogConfigError(log_config_append, str(exc))
|
||||||
|
|
||||||
|
|
||||||
def setup(product_name):
|
def setup(product_name):
|
||||||
"""Setup logging."""
|
"""Setup logging."""
|
||||||
if CONF.log_config:
|
if CONF.log_config_append:
|
||||||
_load_log_config(CONF.log_config)
|
_load_log_config(CONF.log_config_append)
|
||||||
else:
|
else:
|
||||||
_setup_logging_from_conf()
|
_setup_logging_from_conf()
|
||||||
sys.excepthook = _create_logging_excepthook(product_name)
|
sys.excepthook = _create_logging_excepthook(product_name)
|
||||||
@@ -420,7 +477,7 @@ def _setup_logging_from_conf():
|
|||||||
streamlog = ColorHandler()
|
streamlog = ColorHandler()
|
||||||
log_root.addHandler(streamlog)
|
log_root.addHandler(streamlog)
|
||||||
|
|
||||||
elif not CONF.log_file:
|
elif not logpath:
|
||||||
# pass sys.stdout as a positional argument
|
# pass sys.stdout as a positional argument
|
||||||
# python2.6 calls the argument strm, in 2.7 it's stream
|
# python2.6 calls the argument strm, in 2.7 it's stream
|
||||||
streamlog = logging.StreamHandler(sys.stdout)
|
streamlog = logging.StreamHandler(sys.stdout)
|
||||||
|
16
rally/openstack/common/py3kcompat/__init__.py
Normal file
16
rally/openstack/common/py3kcompat/__init__.py
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2013 Canonical Ltd.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
65
rally/openstack/common/py3kcompat/urlutils.py
Normal file
65
rally/openstack/common/py3kcompat/urlutils.py
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2013 Canonical Ltd.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
"""
|
||||||
|
Python2/Python3 compatibility layer for OpenStack
|
||||||
|
"""
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
|
if six.PY3:
|
||||||
|
# python3
|
||||||
|
import urllib.error
|
||||||
|
import urllib.parse
|
||||||
|
import urllib.request
|
||||||
|
|
||||||
|
urlencode = urllib.parse.urlencode
|
||||||
|
urljoin = urllib.parse.urljoin
|
||||||
|
quote = urllib.parse.quote
|
||||||
|
parse_qsl = urllib.parse.parse_qsl
|
||||||
|
unquote = urllib.parse.unquote
|
||||||
|
unquote_plus = urllib.parse.unquote_plus
|
||||||
|
urlparse = urllib.parse.urlparse
|
||||||
|
urlsplit = urllib.parse.urlsplit
|
||||||
|
urlunsplit = urllib.parse.urlunsplit
|
||||||
|
SplitResult = urllib.parse.SplitResult
|
||||||
|
|
||||||
|
urlopen = urllib.request.urlopen
|
||||||
|
URLError = urllib.error.URLError
|
||||||
|
pathname2url = urllib.request.pathname2url
|
||||||
|
else:
|
||||||
|
# python2
|
||||||
|
import urllib
|
||||||
|
import urllib2
|
||||||
|
import urlparse
|
||||||
|
|
||||||
|
urlencode = urllib.urlencode
|
||||||
|
quote = urllib.quote
|
||||||
|
unquote = urllib.unquote
|
||||||
|
unquote_plus = urllib.unquote_plus
|
||||||
|
|
||||||
|
parse = urlparse
|
||||||
|
parse_qsl = parse.parse_qsl
|
||||||
|
urljoin = parse.urljoin
|
||||||
|
urlparse = parse.urlparse
|
||||||
|
urlsplit = parse.urlsplit
|
||||||
|
urlunsplit = parse.urlunsplit
|
||||||
|
SplitResult = parse.SplitResult
|
||||||
|
|
||||||
|
urlopen = urllib2.urlopen
|
||||||
|
URLError = urllib2.URLError
|
||||||
|
pathname2url = urllib.pathname2url
|
@@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2011 OpenStack Foundation.
|
# Copyright 2011 OpenStack Foundation.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
@@ -25,7 +23,7 @@ import unicodedata
|
|||||||
|
|
||||||
import six
|
import six
|
||||||
|
|
||||||
from rally.openstack.common.gettextutils import _ # noqa
|
from rally.openstack.common.gettextutils import _
|
||||||
|
|
||||||
|
|
||||||
# Used for looking up extensions of text
|
# Used for looking up extensions of text
|
||||||
@@ -101,7 +99,7 @@ def safe_decode(text, incoming=None, errors='strict'):
|
|||||||
values http://docs.python.org/2/library/codecs.html
|
values http://docs.python.org/2/library/codecs.html
|
||||||
:returns: text or a unicode `incoming` encoded
|
:returns: text or a unicode `incoming` encoded
|
||||||
representation of it.
|
representation of it.
|
||||||
:raises TypeError: If text is not an isntance of str
|
:raises TypeError: If text is not an instance of str
|
||||||
"""
|
"""
|
||||||
if not isinstance(text, six.string_types):
|
if not isinstance(text, six.string_types):
|
||||||
raise TypeError("%s can't be decoded" % type(text))
|
raise TypeError("%s can't be decoded" % type(text))
|
||||||
@@ -144,7 +142,7 @@ def safe_encode(text, incoming=None,
|
|||||||
values http://docs.python.org/2/library/codecs.html
|
values http://docs.python.org/2/library/codecs.html
|
||||||
:returns: text or a bytestring `encoding` encoded
|
:returns: text or a bytestring `encoding` encoded
|
||||||
representation of it.
|
representation of it.
|
||||||
:raises TypeError: If text is not an isntance of str
|
:raises TypeError: If text is not an instance of str
|
||||||
"""
|
"""
|
||||||
if not isinstance(text, six.string_types):
|
if not isinstance(text, six.string_types):
|
||||||
raise TypeError("%s can't be encoded" % type(text))
|
raise TypeError("%s can't be encoded" % type(text))
|
||||||
@@ -154,11 +152,17 @@ def safe_encode(text, incoming=None,
|
|||||||
sys.getdefaultencoding())
|
sys.getdefaultencoding())
|
||||||
|
|
||||||
if isinstance(text, six.text_type):
|
if isinstance(text, six.text_type):
|
||||||
return text.encode(encoding, errors)
|
if six.PY3:
|
||||||
|
return text.encode(encoding, errors).decode(incoming)
|
||||||
|
else:
|
||||||
|
return text.encode(encoding, errors)
|
||||||
elif text and encoding != incoming:
|
elif text and encoding != incoming:
|
||||||
# Decode text before encoding it with `encoding`
|
# Decode text before encoding it with `encoding`
|
||||||
text = safe_decode(text, incoming, errors)
|
text = safe_decode(text, incoming, errors)
|
||||||
return text.encode(encoding, errors)
|
if six.PY3:
|
||||||
|
return text.encode(encoding, errors).decode(incoming)
|
||||||
|
else:
|
||||||
|
return text.encode(encoding, errors)
|
||||||
|
|
||||||
return text
|
return text
|
||||||
|
|
||||||
|
@@ -1,19 +1,17 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
|
||||||
|
|
||||||
# Copyright 2010-2011 OpenStack Foundation
|
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
# not use this file except in compliance with the License. You may obtain
|
# not use this file except in compliance with the License. You may obtain
|
||||||
# a copy of the License at
|
# a copy of the License at
|
||||||
#
|
#
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
#
|
#
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
"""Common utilities used in testing"""
|
"""Common utilities used in testing"""
|
||||||
|
|
||||||
@@ -22,6 +20,8 @@ import os
|
|||||||
import fixtures
|
import fixtures
|
||||||
import testtools
|
import testtools
|
||||||
|
|
||||||
|
_TRUE_VALUES = ('True', 'true', '1', 'yes')
|
||||||
|
|
||||||
|
|
||||||
class BaseTestCase(testtools.TestCase):
|
class BaseTestCase(testtools.TestCase):
|
||||||
|
|
||||||
@@ -29,8 +29,9 @@ class BaseTestCase(testtools.TestCase):
|
|||||||
super(BaseTestCase, self).setUp()
|
super(BaseTestCase, self).setUp()
|
||||||
self._set_timeout()
|
self._set_timeout()
|
||||||
self._fake_output()
|
self._fake_output()
|
||||||
self.useFixture(fixtures.FakeLogger('rally.openstack.common'))
|
self.useFixture(fixtures.FakeLogger())
|
||||||
self.useFixture(fixtures.NestedTempfile())
|
self.useFixture(fixtures.NestedTempfile())
|
||||||
|
self.useFixture(fixtures.TempHomeDir())
|
||||||
|
|
||||||
def _set_timeout(self):
|
def _set_timeout(self):
|
||||||
test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
|
test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
|
||||||
@@ -43,11 +44,9 @@ class BaseTestCase(testtools.TestCase):
|
|||||||
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
|
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
|
||||||
|
|
||||||
def _fake_output(self):
|
def _fake_output(self):
|
||||||
if (os.environ.get('OS_STDOUT_CAPTURE') == 'True' or
|
if os.environ.get('OS_STDOUT_CAPTURE') in _TRUE_VALUES:
|
||||||
os.environ.get('OS_STDOUT_CAPTURE') == '1'):
|
|
||||||
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
|
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
|
||||||
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
|
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
|
||||||
if (os.environ.get('OS_STDERR_CAPTURE') == 'True' or
|
if os.environ.get('OS_STDERR_CAPTURE') in _TRUE_VALUES:
|
||||||
os.environ.get('OS_STDERR_CAPTURE') == '1'):
|
|
||||||
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
|
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
|
||||||
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
|
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
|
||||||
|
@@ -1,5 +1,3 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2011 OpenStack Foundation.
|
# Copyright 2011 OpenStack Foundation.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
@@ -50,9 +48,9 @@ def parse_isotime(timestr):
|
|||||||
try:
|
try:
|
||||||
return iso8601.parse_date(timestr)
|
return iso8601.parse_date(timestr)
|
||||||
except iso8601.ParseError as e:
|
except iso8601.ParseError as e:
|
||||||
raise ValueError(unicode(e))
|
raise ValueError(six.text_type(e))
|
||||||
except TypeError as e:
|
except TypeError as e:
|
||||||
raise ValueError(unicode(e))
|
raise ValueError(six.text_type(e))
|
||||||
|
|
||||||
|
|
||||||
def strtime(at=None, fmt=PERFECT_TIME_FORMAT):
|
def strtime(at=None, fmt=PERFECT_TIME_FORMAT):
|
||||||
@@ -79,6 +77,9 @@ def is_older_than(before, seconds):
|
|||||||
"""Return True if before is older than seconds."""
|
"""Return True if before is older than seconds."""
|
||||||
if isinstance(before, six.string_types):
|
if isinstance(before, six.string_types):
|
||||||
before = parse_strtime(before).replace(tzinfo=None)
|
before = parse_strtime(before).replace(tzinfo=None)
|
||||||
|
else:
|
||||||
|
before = before.replace(tzinfo=None)
|
||||||
|
|
||||||
return utcnow() - before > datetime.timedelta(seconds=seconds)
|
return utcnow() - before > datetime.timedelta(seconds=seconds)
|
||||||
|
|
||||||
|
|
||||||
@@ -86,6 +87,9 @@ def is_newer_than(after, seconds):
|
|||||||
"""Return True if after is newer than seconds."""
|
"""Return True if after is newer than seconds."""
|
||||||
if isinstance(after, six.string_types):
|
if isinstance(after, six.string_types):
|
||||||
after = parse_strtime(after).replace(tzinfo=None)
|
after = parse_strtime(after).replace(tzinfo=None)
|
||||||
|
else:
|
||||||
|
after = after.replace(tzinfo=None)
|
||||||
|
|
||||||
return after - utcnow() > datetime.timedelta(seconds=seconds)
|
return after - utcnow() > datetime.timedelta(seconds=seconds)
|
||||||
|
|
||||||
|
|
||||||
@@ -178,6 +182,15 @@ def delta_seconds(before, after):
|
|||||||
datetime objects (as a float, to microsecond resolution).
|
datetime objects (as a float, to microsecond resolution).
|
||||||
"""
|
"""
|
||||||
delta = after - before
|
delta = after - before
|
||||||
|
return total_seconds(delta)
|
||||||
|
|
||||||
|
|
||||||
|
def total_seconds(delta):
|
||||||
|
"""Return the total seconds of datetime.timedelta object.
|
||||||
|
|
||||||
|
Compute total seconds of datetime.timedelta, datetime.timedelta
|
||||||
|
doesn't have method total_seconds in Python2.6, calculate it manually.
|
||||||
|
"""
|
||||||
try:
|
try:
|
||||||
return delta.total_seconds()
|
return delta.total_seconds()
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
|
@@ -73,6 +73,7 @@ then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
BASEDIRESC=`echo $BASEDIR | sed -e 's/\//\\\\\//g'`
|
BASEDIRESC=`echo $BASEDIR | sed -e 's/\//\\\\\//g'`
|
||||||
|
find $TARGETDIR -type f -name "*.pyc" -delete
|
||||||
FILES=$(find $TARGETDIR -type f -name "*.py" ! -path "*/tests/*" \
|
FILES=$(find $TARGETDIR -type f -name "*.py" ! -path "*/tests/*" \
|
||||||
-exec grep -l "Opt(" {} + | sed -e "s/^$BASEDIRESC\///g" | sort -u)
|
-exec grep -l "Opt(" {} + | sed -e "s/^$BASEDIRESC\///g" | sort -u)
|
||||||
|
|
||||||
@@ -86,7 +87,13 @@ export EVENTLET_NO_GREENDNS=yes
|
|||||||
|
|
||||||
OS_VARS=$(set | sed -n '/^OS_/s/=[^=]*$//gp' | xargs)
|
OS_VARS=$(set | sed -n '/^OS_/s/=[^=]*$//gp' | xargs)
|
||||||
[ "$OS_VARS" ] && eval "unset \$OS_VARS"
|
[ "$OS_VARS" ] && eval "unset \$OS_VARS"
|
||||||
|
DEFAULT_MODULEPATH=rally.openstack.common.config.generator
|
||||||
MODULEPATH=rally.openstack.common.config.generator
|
MODULEPATH=${MODULEPATH:-$DEFAULT_MODULEPATH}
|
||||||
OUTPUTFILE=$OUTPUTDIR/$PACKAGENAME.conf.sample
|
OUTPUTFILE=$OUTPUTDIR/$PACKAGENAME.conf.sample
|
||||||
python -m $MODULEPATH $FILES > $OUTPUTFILE
|
python -m $MODULEPATH $FILES > $OUTPUTFILE
|
||||||
|
|
||||||
|
# Hook to allow projects to append custom config file snippets
|
||||||
|
CONCAT_FILES=$(ls $BASEDIR/tools/config/*.conf.sample 2>/dev/null)
|
||||||
|
for CONCAT_FILE in $CONCAT_FILES; do
|
||||||
|
cat $CONCAT_FILE >> $OUTPUTFILE
|
||||||
|
done
|
||||||
|
Reference in New Issue
Block a user