diff --git a/mistral/db/api.py b/mistral/db/api.py index fddeae531..ac89c6822 100644 --- a/mistral/db/api.py +++ b/mistral/db/api.py @@ -23,7 +23,7 @@ _BACKEND_MAPPING = { 'sqlalchemy': 'mistral.db.sqlalchemy.api', } -IMPL = db_api.DBAPI(backend_mapping=_BACKEND_MAPPING) +IMPL = db_api.DBAPI('sqlalchemy', backend_mapping=_BACKEND_MAPPING) LOG = logging.getLogger(__name__) diff --git a/mistral/db/sqlalchemy/api.py b/mistral/db/sqlalchemy/api.py index 34dca6967..9f0f31297 100644 --- a/mistral/db/sqlalchemy/api.py +++ b/mistral/db/sqlalchemy/api.py @@ -17,6 +17,8 @@ import sys import sqlalchemy as sa +from oslo.config import cfg + from mistral import utils from mistral import exceptions as exc from mistral.db.sqlalchemy import models as m @@ -27,11 +29,31 @@ from mistral.openstack.common.db import exception as db_exc LOG = logging.getLogger(__name__) -get_engine = db_session.get_engine -get_session = db_session.get_session +cfg.CONF.import_opt('connection', + 'mistral.openstack.common.db.options', + group='database') _DB_SESSION_THREAD_LOCAL_NAME = "db_sql_alchemy_session" +_facade = None + + +def get_facade(): + global _facade + if not _facade: + _facade = db_session.EngineFacade( + cfg.CONF.database.connection, sqlite_fk=True, autocommit=False, + **dict(cfg.CONF.database.iteritems())) + return _facade + + +def get_engine(): + return get_facade().get_engine() + + +def get_session(): + return get_facade().get_session() + def get_backend(): """The backend is this module itself.""" @@ -40,7 +62,7 @@ def get_backend(): def setup_db(): try: - engine = db_session.get_engine(sqlite_fk=True) + engine = get_engine() m.Trigger.metadata.create_all(engine) except sa.exc.OperationalError as e: LOG.exception("Database registration exception: %s", e) @@ -50,7 +72,7 @@ def setup_db(): def drop_db(): try: - engine = db_session.get_engine(sqlite_fk=True) + engine = get_engine() m.Trigger.metadata.drop_all(engine) except Exception as e: LOG.exception("Database shutdown exception: %s", e) @@ -83,7 +105,7 @@ def _get_or_create_thread_local_session(): if ses: return ses, False - ses = get_session(autocommit=False) + ses = get_session() _set_thread_local_session(ses) return ses, True @@ -139,7 +161,7 @@ def start_tx(): raise exc.DataAccessException("Database transaction has already been" " started.") - _set_thread_local_session(get_session(autocommit=False)) + _set_thread_local_session(get_session()) def commit_tx(): diff --git a/mistral/exceptions.py b/mistral/exceptions.py index 386eec6ab..cacd359d6 100644 --- a/mistral/exceptions.py +++ b/mistral/exceptions.py @@ -14,10 +14,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -import mistral.openstack.common.exception as ex + +class Error(Exception): + def __init__(self, message=None): + super(Error, self).__init__(message) -class MistralException(ex.Error): +class MistralException(Error): """Base Exception for the project To correctly use this class, inherit from it and define diff --git a/mistral/openstack/common/__init__.py b/mistral/openstack/common/__init__.py index e69de29bb..d1223eaf7 100644 --- a/mistral/openstack/common/__init__.py +++ b/mistral/openstack/common/__init__.py @@ -0,0 +1,17 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import six + + +six.add_move(six.MovedModule('mox', 'mox', 'mox3.mox')) diff --git a/mistral/openstack/common/apiclient/__init__.py b/mistral/openstack/common/apiclient/__init__.py index f3d0cdefd..e69de29bb 100644 --- a/mistral/openstack/common/apiclient/__init__.py +++ b/mistral/openstack/common/apiclient/__init__.py @@ -1,14 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/mistral/openstack/common/apiclient/auth.py b/mistral/openstack/common/apiclient/auth.py index 58fc6b6b8..14875e474 100644 --- a/mistral/openstack/common/apiclient/auth.py +++ b/mistral/openstack/common/apiclient/auth.py @@ -19,7 +19,6 @@ import abc import argparse -import logging import os import six @@ -28,9 +27,6 @@ from stevedore import extension from mistral.openstack.common.apiclient import exceptions -logger = logging.getLogger(__name__) - - _discovered_plugins = {} @@ -58,7 +54,7 @@ def load_auth_system_opts(parser): """ group = parser.add_argument_group("Common auth options") BaseAuthPlugin.add_common_opts(group) - for name, auth_plugin in _discovered_plugins.iteritems(): + for name, auth_plugin in six.iteritems(_discovered_plugins): group = parser.add_argument_group( "Auth-system '%s' options" % name, conflict_handler="resolve") @@ -80,7 +76,7 @@ def load_plugin_from_args(args): alphabetical order. :type args: argparse.Namespace - :raises: AuthorizationFailure + :raises: AuthPluginOptionsMissing """ auth_system = args.os_auth_system if auth_system: @@ -89,7 +85,7 @@ def load_plugin_from_args(args): plugin.sufficient_options() return plugin - for plugin_auth_system in sorted(_discovered_plugins.iterkeys()): + for plugin_auth_system in sorted(six.iterkeys(_discovered_plugins)): plugin_class = _discovered_plugins[plugin_auth_system] plugin = plugin_class() plugin.parse_opts(args) @@ -217,8 +213,8 @@ class BaseAuthPlugin(object): :type service_type: string :param endpoint_type: Type of endpoint. Possible values: public or publicURL, - internal or internalURL, - admin or adminURL + internal or internalURL, + admin or adminURL :type endpoint_type: string :returns: tuple of token and endpoint strings :raises: EndpointException diff --git a/mistral/openstack/common/apiclient/base.py b/mistral/openstack/common/apiclient/base.py index 61551fdff..f1efea878 100644 --- a/mistral/openstack/common/apiclient/base.py +++ b/mistral/openstack/common/apiclient/base.py @@ -24,11 +24,13 @@ Base utilities to build API operation managers and objects on top of. # pylint: disable=E1102 import abc -import urllib +import copy import six +from six.moves.urllib import parse from mistral.openstack.common.apiclient import exceptions +from mistral.openstack.common.gettextutils import _ from mistral.openstack.common import strutils @@ -73,8 +75,8 @@ class HookableMixin(object): :param cls: class that registers hooks :param hook_type: hook type, e.g., '__pre_parse_args__' - :param **args: args to be passed to every hook function - :param **kwargs: kwargs to be passed to every hook function + :param args: args to be passed to every hook function + :param kwargs: kwargs to be passed to every hook function """ hook_funcs = cls._hooks_map.get(hook_type) or [] for hook_func in hook_funcs: @@ -218,7 +220,10 @@ class ManagerWithFind(BaseManager): matches = self.findall(**kwargs) num_matches = len(matches) if num_matches == 0: - msg = "No %s matching %s." % (self.resource_class.__name__, kwargs) + msg = _("No %(name)s matching %(args)s.") % { + 'name': self.resource_class.__name__, + 'args': kwargs + } raise exceptions.NotFound(msg) elif num_matches > 1: raise exceptions.NoUniqueMatch() @@ -291,7 +296,7 @@ class CrudManager(BaseManager): def _filter_kwargs(self, kwargs): """Drop null values and handle ids.""" - for key, ref in kwargs.copy().iteritems(): + for key, ref in six.iteritems(kwargs.copy()): if ref is None: kwargs.pop(key) else: @@ -327,7 +332,7 @@ class CrudManager(BaseManager): return self._list( '%(base_url)s%(query)s' % { 'base_url': self.build_url(base_url=base_url, **kwargs), - 'query': '?%s' % urllib.urlencode(kwargs) if kwargs else '', + 'query': '?%s' % parse.urlencode(kwargs) if kwargs else '', }, self.collection_key) @@ -366,13 +371,16 @@ class CrudManager(BaseManager): rl = self._list( '%(base_url)s%(query)s' % { 'base_url': self.build_url(base_url=base_url, **kwargs), - 'query': '?%s' % urllib.urlencode(kwargs) if kwargs else '', + 'query': '?%s' % parse.urlencode(kwargs) if kwargs else '', }, self.collection_key) num = len(rl) if num == 0: - msg = "No %s matching %s." % (self.resource_class.__name__, kwargs) + msg = _("No %(name)s matching %(args)s.") % { + 'name': self.resource_class.__name__, + 'args': kwargs + } raise exceptions.NotFound(404, msg) elif num > 1: raise exceptions.NoUniqueMatch @@ -445,7 +453,7 @@ class Resource(object): return None def _add_details(self, info): - for (k, v) in info.iteritems(): + for (k, v) in six.iteritems(info): try: setattr(self, k, v) self._info[k] = v @@ -465,6 +473,11 @@ class Resource(object): return self.__dict__[k] def get(self): + """Support for lazy loading details. + + Some clients, such as novaclient have the option to lazy load the + details, details which can be loaded with this function. + """ # set_loaded() first ... so if we have to bail, we know we tried. self.set_loaded(True) if not hasattr(self.manager, 'get'): @@ -489,3 +502,6 @@ class Resource(object): def set_loaded(self, val): self._loaded = val + + def to_dict(self): + return copy.deepcopy(self._info) diff --git a/mistral/openstack/common/apiclient/client.py b/mistral/openstack/common/apiclient/client.py index 1e9808790..f47de237a 100644 --- a/mistral/openstack/common/apiclient/client.py +++ b/mistral/openstack/common/apiclient/client.py @@ -36,6 +36,7 @@ except ImportError: import requests from mistral.openstack.common.apiclient import exceptions +from mistral.openstack.common.gettextutils import _ from mistral.openstack.common import importutils @@ -46,6 +47,7 @@ class HTTPClient(object): """This client handles sending HTTP requests to OpenStack servers. Features: + - share authentication information between several clients to different services (e.g., for compute and image clients); - reissue authentication request for expired tokens; @@ -151,7 +153,7 @@ class HTTPClient(object): :param method: method of HTTP request :param url: URL of HTTP request :param kwargs: any other parameter that can be passed to -' requests.Session.request (such as `headers`) or `json` + requests.Session.request (such as `headers`) or `json` that will be encoded as JSON and used as `data` argument """ kwargs.setdefault("headers", kwargs.get("headers", {})) @@ -206,7 +208,7 @@ class HTTPClient(object): :param method: method of HTTP request :param url: URL of HTTP request :param kwargs: any other parameter that can be passed to -' `HTTPClient.request` + `HTTPClient.request` """ filter_args = { @@ -228,7 +230,7 @@ class HTTPClient(object): **filter_args) if not (token and endpoint): raise exceptions.AuthorizationFailure( - "Cannot find endpoint or token for request") + _("Cannot find endpoint or token for request")) old_token_endpoint = (token, endpoint) kwargs.setdefault("headers", {})["X-Auth-Token"] = token @@ -351,8 +353,12 @@ class BaseClient(object): try: client_path = version_map[str(version)] except (KeyError, ValueError): - msg = "Invalid %s client version '%s'. must be one of: %s" % ( - (api_name, version, ', '.join(version_map.keys()))) + msg = _("Invalid %(api_name)s client version '%(version)s'. " + "Must be one of: %(version_map)s") % { + 'api_name': api_name, + 'version': version, + 'version_map': ', '.join(version_map.keys()) + } raise exceptions.UnsupportedVersion(msg) return importutils.import_class(client_path) diff --git a/mistral/openstack/common/apiclient/exceptions.py b/mistral/openstack/common/apiclient/exceptions.py index b364d60dc..c6482ba45 100644 --- a/mistral/openstack/common/apiclient/exceptions.py +++ b/mistral/openstack/common/apiclient/exceptions.py @@ -25,6 +25,8 @@ import sys import six +from mistral.openstack.common.gettextutils import _ + class ClientException(Exception): """The base exception class for all exceptions this library raises. @@ -36,7 +38,7 @@ class MissingArgs(ClientException): """Supplied arguments are not sufficient for calling a function.""" def __init__(self, missing): self.missing = missing - msg = "Missing argument(s): %s" % ", ".join(missing) + msg = _("Missing arguments: %s") % ", ".join(missing) super(MissingArgs, self).__init__(msg) @@ -60,11 +62,16 @@ class AuthorizationFailure(ClientException): pass +class ConnectionRefused(ClientException): + """Cannot connect to API service.""" + pass + + class AuthPluginOptionsMissing(AuthorizationFailure): """Auth plugin misses some options.""" def __init__(self, opt_names): super(AuthPluginOptionsMissing, self).__init__( - "Authentication failed. Missing options: %s" % + _("Authentication failed. Missing options: %s") % ", ".join(opt_names)) self.opt_names = opt_names @@ -73,7 +80,7 @@ class AuthSystemNotFound(AuthorizationFailure): """User has specified a AuthSystem that is not installed.""" def __init__(self, auth_system): super(AuthSystemNotFound, self).__init__( - "AuthSystemNotFound: %s" % repr(auth_system)) + _("AuthSystemNotFound: %s") % repr(auth_system)) self.auth_system = auth_system @@ -96,7 +103,7 @@ class AmbiguousEndpoints(EndpointException): """Found more than one matching endpoint in Service Catalog.""" def __init__(self, endpoints=None): super(AmbiguousEndpoints, self).__init__( - "AmbiguousEndpoints: %s" % repr(endpoints)) + _("AmbiguousEndpoints: %s") % repr(endpoints)) self.endpoints = endpoints @@ -104,7 +111,7 @@ class HttpError(ClientException): """The base exception class for all HTTP exceptions. """ http_status = 0 - message = "HTTP Error" + message = _("HTTP Error") def __init__(self, message=None, details=None, response=None, request_id=None, @@ -122,12 +129,17 @@ class HttpError(ClientException): super(HttpError, self).__init__(formatted_string) +class HTTPRedirection(HttpError): + """HTTP Redirection.""" + message = _("HTTP Redirection") + + class HTTPClientError(HttpError): """Client-side HTTP error. Exception for cases in which the client seems to have erred. """ - message = "HTTP Client Error" + message = _("HTTP Client Error") class HttpServerError(HttpError): @@ -136,7 +148,17 @@ class HttpServerError(HttpError): Exception for cases in which the server is aware that it has erred or is incapable of performing the request. """ - message = "HTTP Server Error" + message = _("HTTP Server Error") + + +class MultipleChoices(HTTPRedirection): + """HTTP 300 - Multiple Choices. + + Indicates multiple options for the resource that the client may follow. + """ + + http_status = 300 + message = _("Multiple Choices") class BadRequest(HTTPClientError): @@ -145,7 +167,7 @@ class BadRequest(HTTPClientError): The request cannot be fulfilled due to bad syntax. """ http_status = 400 - message = "Bad Request" + message = _("Bad Request") class Unauthorized(HTTPClientError): @@ -155,7 +177,7 @@ class Unauthorized(HTTPClientError): is required and has failed or has not yet been provided. """ http_status = 401 - message = "Unauthorized" + message = _("Unauthorized") class PaymentRequired(HTTPClientError): @@ -164,7 +186,7 @@ class PaymentRequired(HTTPClientError): Reserved for future use. """ http_status = 402 - message = "Payment Required" + message = _("Payment Required") class Forbidden(HTTPClientError): @@ -174,7 +196,7 @@ class Forbidden(HTTPClientError): to it. """ http_status = 403 - message = "Forbidden" + message = _("Forbidden") class NotFound(HTTPClientError): @@ -184,7 +206,7 @@ class NotFound(HTTPClientError): in the future. """ http_status = 404 - message = "Not Found" + message = _("Not Found") class MethodNotAllowed(HTTPClientError): @@ -194,7 +216,7 @@ class MethodNotAllowed(HTTPClientError): by that resource. """ http_status = 405 - message = "Method Not Allowed" + message = _("Method Not Allowed") class NotAcceptable(HTTPClientError): @@ -204,7 +226,7 @@ class NotAcceptable(HTTPClientError): acceptable according to the Accept headers sent in the request. """ http_status = 406 - message = "Not Acceptable" + message = _("Not Acceptable") class ProxyAuthenticationRequired(HTTPClientError): @@ -213,7 +235,7 @@ class ProxyAuthenticationRequired(HTTPClientError): The client must first authenticate itself with the proxy. """ http_status = 407 - message = "Proxy Authentication Required" + message = _("Proxy Authentication Required") class RequestTimeout(HTTPClientError): @@ -222,7 +244,7 @@ class RequestTimeout(HTTPClientError): The server timed out waiting for the request. """ http_status = 408 - message = "Request Timeout" + message = _("Request Timeout") class Conflict(HTTPClientError): @@ -232,7 +254,7 @@ class Conflict(HTTPClientError): in the request, such as an edit conflict. """ http_status = 409 - message = "Conflict" + message = _("Conflict") class Gone(HTTPClientError): @@ -242,7 +264,7 @@ class Gone(HTTPClientError): not be available again. """ http_status = 410 - message = "Gone" + message = _("Gone") class LengthRequired(HTTPClientError): @@ -252,7 +274,7 @@ class LengthRequired(HTTPClientError): required by the requested resource. """ http_status = 411 - message = "Length Required" + message = _("Length Required") class PreconditionFailed(HTTPClientError): @@ -262,7 +284,7 @@ class PreconditionFailed(HTTPClientError): put on the request. """ http_status = 412 - message = "Precondition Failed" + message = _("Precondition Failed") class RequestEntityTooLarge(HTTPClientError): @@ -271,7 +293,7 @@ class RequestEntityTooLarge(HTTPClientError): The request is larger than the server is willing or able to process. """ http_status = 413 - message = "Request Entity Too Large" + message = _("Request Entity Too Large") def __init__(self, *args, **kwargs): try: @@ -288,7 +310,7 @@ class RequestUriTooLong(HTTPClientError): The URI provided was too long for the server to process. """ http_status = 414 - message = "Request-URI Too Long" + message = _("Request-URI Too Long") class UnsupportedMediaType(HTTPClientError): @@ -298,7 +320,7 @@ class UnsupportedMediaType(HTTPClientError): not support. """ http_status = 415 - message = "Unsupported Media Type" + message = _("Unsupported Media Type") class RequestedRangeNotSatisfiable(HTTPClientError): @@ -308,7 +330,7 @@ class RequestedRangeNotSatisfiable(HTTPClientError): supply that portion. """ http_status = 416 - message = "Requested Range Not Satisfiable" + message = _("Requested Range Not Satisfiable") class ExpectationFailed(HTTPClientError): @@ -317,7 +339,7 @@ class ExpectationFailed(HTTPClientError): The server cannot meet the requirements of the Expect request-header field. """ http_status = 417 - message = "Expectation Failed" + message = _("Expectation Failed") class UnprocessableEntity(HTTPClientError): @@ -327,7 +349,7 @@ class UnprocessableEntity(HTTPClientError): errors. """ http_status = 422 - message = "Unprocessable Entity" + message = _("Unprocessable Entity") class InternalServerError(HttpServerError): @@ -336,7 +358,7 @@ class InternalServerError(HttpServerError): A generic error message, given when no more specific message is suitable. """ http_status = 500 - message = "Internal Server Error" + message = _("Internal Server Error") # NotImplemented is a python keyword. @@ -347,7 +369,7 @@ class HttpNotImplemented(HttpServerError): the ability to fulfill the request. """ http_status = 501 - message = "Not Implemented" + message = _("Not Implemented") class BadGateway(HttpServerError): @@ -357,7 +379,7 @@ class BadGateway(HttpServerError): response from the upstream server. """ http_status = 502 - message = "Bad Gateway" + message = _("Bad Gateway") class ServiceUnavailable(HttpServerError): @@ -366,7 +388,7 @@ class ServiceUnavailable(HttpServerError): The server is currently unavailable. """ http_status = 503 - message = "Service Unavailable" + message = _("Service Unavailable") class GatewayTimeout(HttpServerError): @@ -376,7 +398,7 @@ class GatewayTimeout(HttpServerError): response from the upstream server. """ http_status = 504 - message = "Gateway Timeout" + message = _("Gateway Timeout") class HttpVersionNotSupported(HttpServerError): @@ -385,7 +407,7 @@ class HttpVersionNotSupported(HttpServerError): The server does not support the HTTP protocol version used in the request. """ http_status = 505 - message = "HTTP Version Not Supported" + message = _("HTTP Version Not Supported") # _code_map contains all the classes that have http_status attribute. @@ -403,12 +425,17 @@ def from_response(response, method, url): :param method: HTTP method used for request :param url: URL used for request """ + + req_id = response.headers.get("x-openstack-request-id") + #NOTE(hdd) true for older versions of nova and cinder + if not req_id: + req_id = response.headers.get("x-compute-request-id") kwargs = { "http_status": response.status_code, "response": response, "method": method, "url": url, - "request_id": response.headers.get("x-compute-request-id"), + "request_id": req_id, } if "retry-after" in response.headers: kwargs["retry_after"] = response.headers["retry-after"] @@ -420,10 +447,10 @@ def from_response(response, method, url): except ValueError: pass else: - if hasattr(body, "keys"): - error = body[body.keys()[0]] - kwargs["message"] = error.get("message", None) - kwargs["details"] = error.get("details", None) + if isinstance(body, dict): + error = list(body.values())[0] + kwargs["message"] = error.get("message") + kwargs["details"] = error.get("details") elif content_type.startswith("text/"): kwargs["details"] = response.text diff --git a/mistral/openstack/common/apiclient/fake_client.py b/mistral/openstack/common/apiclient/fake_client.py index 7164c1803..2d878fe4d 100644 --- a/mistral/openstack/common/apiclient/fake_client.py +++ b/mistral/openstack/common/apiclient/fake_client.py @@ -27,9 +27,10 @@ places where actual behavior differs from the spec. import json import requests +import six +from six.moves.urllib import parse from mistral.openstack.common.apiclient import client -from mistral.openstack.common.py3kcompat import urlutils def assert_has_keys(dct, required=[], optional=[]): @@ -61,6 +62,8 @@ class TestResponse(requests.Response): else: self._content = text default_headers = {} + if six.PY3 and isinstance(self._content, six.string_types): + self._content = self._content.encode('utf-8', 'strict') self.headers = data.get('headers') or default_headers else: self.status_code = data @@ -144,7 +147,7 @@ class FakeHTTPClient(client.HTTPClient): "text": fixture[1]}) # Call the method - args = urlutils.parse_qsl(urlutils.urlparse(url)[4]) + args = parse.parse_qsl(parse.urlparse(url)[4]) kwargs.update(args) munged_url = url.rsplit('?', 1)[0] munged_url = munged_url.strip('/').replace('/', '_').replace('.', '_') diff --git a/mistral/openstack/common/cliutils.py b/mistral/openstack/common/cliutils.py index b69f17516..3665db45e 100644 --- a/mistral/openstack/common/cliutils.py +++ b/mistral/openstack/common/cliutils.py @@ -16,6 +16,8 @@ # W0621: Redefining name %s from outer scope # pylint: disable=W0603,W0621 +from __future__ import print_function + import getpass import inspect import os @@ -27,7 +29,9 @@ import six from six import moves from mistral.openstack.common.apiclient import exceptions +from mistral.openstack.common.gettextutils import _ from mistral.openstack.common import strutils +from mistral.openstack.common import uuidutils def validate_args(fn, *args, **kwargs): @@ -52,7 +56,7 @@ def validate_args(fn, *args, **kwargs): required_args = argspec.args[:len(argspec.args) - num_defaults] def isbound(method): - return getattr(method, 'im_self', None) is not None + return getattr(method, '__self__', None) is not None if isbound(fn): required_args.pop(0) @@ -84,7 +88,7 @@ def env(*args, **kwargs): If all are empty, defaults to '' or keyword arg `default`. """ for arg in args: - value = os.environ.get(arg, None) + value = os.environ.get(arg) if value: return value return kwargs.get('default', '') @@ -141,9 +145,9 @@ def print_list(objs, fields, formatters=None, sortby_index=0, formatters = formatters or {} mixed_case_fields = mixed_case_fields or [] if sortby_index is None: - sortby = None + kwargs = {} else: - sortby = fields[sortby_index] + kwargs = {'sortby': fields[sortby_index]} pt = prettytable.PrettyTable(fields, caching=False) pt.align = 'l' @@ -161,7 +165,7 @@ def print_list(objs, fields, formatters=None, sortby_index=0, row.append(data) pt.add_row(row) - print(strutils.safe_encode(pt.get_string(sortby=sortby))) + print(strutils.safe_encode(pt.get_string(**kwargs))) def print_dict(dct, dict_property="Property", wrap=0): @@ -173,12 +177,12 @@ def print_dict(dct, dict_property="Property", wrap=0): """ pt = prettytable.PrettyTable([dict_property, 'Value'], caching=False) pt.align = 'l' - for k, v in dct.iteritems(): + for k, v in six.iteritems(dct): # convert dict to str to check length if isinstance(v, dict): - v = str(v) + v = six.text_type(v) if wrap > 0: - v = textwrap.fill(str(v), wrap) + v = textwrap.fill(six.text_type(v), wrap) # if value has a newline, add in multiple rows # e.g. fault with stacktrace if v and isinstance(v, six.string_types) and r'\n' in v: @@ -199,7 +203,7 @@ def get_password(max_password_prompts=3): if hasattr(sys.stdin, "isatty") and sys.stdin.isatty(): # Check for Ctrl-D try: - for _ in moves.range(max_password_prompts): + for __ in moves.range(max_password_prompts): pw1 = getpass.getpass("OS Password: ") if verify: pw2 = getpass.getpass("Please verify: ") @@ -211,3 +215,98 @@ def get_password(max_password_prompts=3): except EOFError: pass return pw + + +def find_resource(manager, name_or_id, **find_args): + """Look for resource in a given manager. + + Used as a helper for the _find_* methods. + Example: + + def _find_hypervisor(cs, hypervisor): + #Get a hypervisor by name or ID. + return cliutils.find_resource(cs.hypervisors, hypervisor) + """ + # first try to get entity as integer id + try: + return manager.get(int(name_or_id)) + except (TypeError, ValueError, exceptions.NotFound): + pass + + # now try to get entity as uuid + try: + if six.PY2: + tmp_id = strutils.safe_encode(name_or_id) + else: + tmp_id = strutils.safe_decode(name_or_id) + + if uuidutils.is_uuid_like(tmp_id): + return manager.get(tmp_id) + except (TypeError, ValueError, exceptions.NotFound): + pass + + # for str id which is not uuid + if getattr(manager, 'is_alphanum_id_allowed', False): + try: + return manager.get(name_or_id) + except exceptions.NotFound: + pass + + try: + try: + return manager.find(human_id=name_or_id, **find_args) + except exceptions.NotFound: + pass + + # finally try to find entity by name + try: + resource = getattr(manager, 'resource_class', None) + name_attr = resource.NAME_ATTR if resource else 'name' + kwargs = {name_attr: name_or_id} + kwargs.update(find_args) + return manager.find(**kwargs) + except exceptions.NotFound: + msg = _("No %(name)s with a name or " + "ID of '%(name_or_id)s' exists.") % \ + { + "name": manager.resource_class.__name__.lower(), + "name_or_id": name_or_id + } + raise exceptions.CommandError(msg) + except exceptions.NoUniqueMatch: + msg = _("Multiple %(name)s matches found for " + "'%(name_or_id)s', use an ID to be more specific.") % \ + { + "name": manager.resource_class.__name__.lower(), + "name_or_id": name_or_id + } + raise exceptions.CommandError(msg) + + +def service_type(stype): + """Adds 'service_type' attribute to decorated function. + + Usage: + @service_type('volume') + def mymethod(f): + ... + """ + def inner(f): + f.service_type = stype + return f + return inner + + +def get_service_type(f): + """Retrieves service type from function.""" + return getattr(f, 'service_type', None) + + +def pretty_choice_list(l): + return ', '.join("'%s'" % i for i in l) + + +def exit(msg=''): + if msg: + print (msg, file=sys.stderr) + sys.exit(1) diff --git a/mistral/openstack/common/config/generator.py b/mistral/openstack/common/config/generator.py index 4c11dcebf..3f641edf8 100644 --- a/mistral/openstack/common/config/generator.py +++ b/mistral/openstack/common/config/generator.py @@ -1,4 +1,5 @@ # Copyright 2012 SINA Corporation +# Copyright 2014 Cisco Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -18,6 +19,7 @@ from __future__ import print_function +import argparse import imp import os import re @@ -27,6 +29,7 @@ import textwrap from oslo.config import cfg import six +import stevedore.named from mistral.openstack.common import gettextutils from mistral.openstack.common import importutils @@ -38,6 +41,7 @@ BOOLOPT = "BoolOpt" INTOPT = "IntOpt" FLOATOPT = "FloatOpt" LISTOPT = "ListOpt" +DICTOPT = "DictOpt" MULTISTROPT = "MultiStrOpt" OPT_TYPES = { @@ -46,11 +50,12 @@ OPT_TYPES = { INTOPT: 'integer value', FLOATOPT: 'floating point value', LISTOPT: 'list value', + DICTOPT: 'dict value', MULTISTROPT: 'multi valued', } OPTION_REGEX = re.compile(r"(%s)" % "|".join([STROPT, BOOLOPT, INTOPT, - FLOATOPT, LISTOPT, + FLOATOPT, LISTOPT, DICTOPT, MULTISTROPT])) PY_EXT = ".py" @@ -59,34 +64,60 @@ BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), WORDWRAP_WIDTH = 60 -def generate(srcfiles): +def raise_extension_exception(extmanager, ep, err): + raise + + +def generate(argv): + parser = argparse.ArgumentParser( + description='generate sample configuration file', + ) + parser.add_argument('-m', dest='modules', action='append') + parser.add_argument('-l', dest='libraries', action='append') + parser.add_argument('srcfiles', nargs='*') + parsed_args = parser.parse_args(argv) + mods_by_pkg = dict() - for filepath in srcfiles: + for filepath in parsed_args.srcfiles: pkg_name = filepath.split(os.sep)[1] mod_str = '.'.join(['.'.join(filepath.split(os.sep)[:-1]), os.path.basename(filepath).split('.')[0]]) mods_by_pkg.setdefault(pkg_name, list()).append(mod_str) # NOTE(lzyeval): place top level modules before packages - pkg_names = filter(lambda x: x.endswith(PY_EXT), mods_by_pkg.keys()) - pkg_names.sort() - ext_names = filter(lambda x: x not in pkg_names, mods_by_pkg.keys()) - ext_names.sort() + pkg_names = sorted(pkg for pkg in mods_by_pkg if pkg.endswith(PY_EXT)) + ext_names = sorted(pkg for pkg in mods_by_pkg if pkg not in pkg_names) pkg_names.extend(ext_names) # opts_by_group is a mapping of group name to an options list # The options list is a list of (module, options) tuples opts_by_group = {'DEFAULT': []} - extra_modules = os.getenv("MISTRAL_CONFIG_GENERATOR_EXTRA_MODULES", "") - if extra_modules: - for module_name in extra_modules.split(','): - module_name = module_name.strip() + if parsed_args.modules: + for module_name in parsed_args.modules: module = _import_module(module_name) if module: for group, opts in _list_opts(module): opts_by_group.setdefault(group, []).append((module_name, opts)) + # Look for entry points defined in libraries (or applications) for + # option discovery, and include their return values in the output. + # + # Each entry point should be a function returning an iterable + # of pairs with the group name (or None for the default group) + # and the list of Opt instances for that group. + if parsed_args.libraries: + loader = stevedore.named.NamedExtensionManager( + 'oslo.config.opts', + names=list(set(parsed_args.libraries)), + invoke_on_load=False, + on_load_failure_callback=raise_extension_exception + ) + for ext in loader: + for group, opts in ext.plugin(): + opt_list = opts_by_group.setdefault(group or 'DEFAULT', []) + opt_list.append((ext.name, opts)) + for pkg_name in pkg_names: mods = mods_by_pkg.get(pkg_name) mods.sort() @@ -102,8 +133,8 @@ def generate(srcfiles): opts_by_group.setdefault(group, []).append((mod_str, opts)) print_group_opts('DEFAULT', opts_by_group.pop('DEFAULT', [])) - for group, opts in opts_by_group.items(): - print_group_opts(group, opts) + for group in sorted(opts_by_group.keys()): + print_group_opts(group, opts_by_group[group]) def _import_module(mod_str): @@ -120,8 +151,10 @@ def _import_module(mod_str): def _is_in_group(opt, group): "Check if opt is in group." - for key, value in group._opts.items(): - if value['opt'] == opt: + for value in group._opts.values(): + # NOTE(llu): Temporary workaround for bug #1262148, wait until + # newly released oslo.config support '==' operator. + if not(value['opt'] != opt): return True return False @@ -132,7 +165,7 @@ def _guess_groups(opt, mod_obj): return 'DEFAULT' # what other groups is it in? - for key, value in cfg.CONF.items(): + for value in cfg.CONF.values(): if isinstance(value, cfg.CONF.GroupAttr): if _is_in_group(opt, value._group): return value._group.name @@ -190,6 +223,8 @@ def _get_my_ip(): def _sanitize_default(name, value): """Set up a reasonably sensible default for pybasedir, my_ip and host.""" + hostname = socket.gethostname() + fqdn = socket.getfqdn() if value.startswith(sys.prefix): # NOTE(jd) Don't use os.path.join, because it is likely to think the # second part is an absolute pathname and therefore drop the first @@ -201,8 +236,13 @@ def _sanitize_default(name, value): return value.replace(BASEDIR, '') elif value == _get_my_ip(): return '10.0.0.1' - elif value == socket.gethostname() and 'host' in name: - return 'mistral' + elif value in (hostname, fqdn): + if 'host' in name: + return 'mistral' + elif value.endswith(hostname): + return value.replace(hostname, 'mistral') + elif value.endswith(fqdn): + return value.replace(fqdn, 'mistral') elif value.strip() != value: return '"%s"' % value return value @@ -219,7 +259,8 @@ def _print_opt(opt): except (ValueError, AttributeError) as err: sys.stderr.write("%s\n" % str(err)) sys.exit(1) - opt_help += ' (' + OPT_TYPES[opt_type] + ')' + opt_help = u'%s (%s)' % (opt_help, + OPT_TYPES[opt_type]) print('#', "\n# ".join(textwrap.wrap(opt_help, WORDWRAP_WIDTH))) if opt.deprecated_opts: for deprecated_opt in opt.deprecated_opts: @@ -249,6 +290,11 @@ def _print_opt(opt): elif opt_type == LISTOPT: assert(isinstance(opt_default, list)) print('#%s=%s' % (opt_name, ','.join(opt_default))) + elif opt_type == DICTOPT: + assert(isinstance(opt_default, dict)) + opt_default_strlist = [str(key) + ':' + str(value) + for (key, value) in opt_default.items()] + print('#%s=%s' % (opt_name, ','.join(opt_default_strlist))) elif opt_type == MULTISTROPT: assert(isinstance(opt_default, list)) if not opt_default: diff --git a/mistral/openstack/common/context.py b/mistral/openstack/common/context.py new file mode 100644 index 000000000..3eeb445e4 --- /dev/null +++ b/mistral/openstack/common/context.py @@ -0,0 +1,111 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Simple class that stores security context information in the web request. + +Projects should subclass this class if they wish to enhance the request +context or provide additional information in their specific WSGI pipeline. +""" + +import itertools +import uuid + + +def generate_request_id(): + return b'req-' + str(uuid.uuid4()).encode('ascii') + + +class RequestContext(object): + + """Helper class to represent useful information about a request context. + + Stores information about the security context under which the user + accesses the system, as well as additional request information. + """ + + user_idt_format = '{user} {tenant} {domain} {user_domain} {p_domain}' + + def __init__(self, auth_token=None, user=None, tenant=None, domain=None, + user_domain=None, project_domain=None, is_admin=False, + read_only=False, show_deleted=False, request_id=None, + instance_uuid=None): + self.auth_token = auth_token + self.user = user + self.tenant = tenant + self.domain = domain + self.user_domain = user_domain + self.project_domain = project_domain + self.is_admin = is_admin + self.read_only = read_only + self.show_deleted = show_deleted + self.instance_uuid = instance_uuid + if not request_id: + request_id = generate_request_id() + self.request_id = request_id + + def to_dict(self): + user_idt = ( + self.user_idt_format.format(user=self.user or '-', + tenant=self.tenant or '-', + domain=self.domain or '-', + user_domain=self.user_domain or '-', + p_domain=self.project_domain or '-')) + + return {'user': self.user, + 'tenant': self.tenant, + 'domain': self.domain, + 'user_domain': self.user_domain, + 'project_domain': self.project_domain, + 'is_admin': self.is_admin, + 'read_only': self.read_only, + 'show_deleted': self.show_deleted, + 'auth_token': self.auth_token, + 'request_id': self.request_id, + 'instance_uuid': self.instance_uuid, + 'user_identity': user_idt} + + +def get_admin_context(show_deleted=False): + context = RequestContext(None, + tenant=None, + is_admin=True, + show_deleted=show_deleted) + return context + + +def get_context_from_function_and_args(function, args, kwargs): + """Find an arg of type RequestContext and return it. + + This is useful in a couple of decorators where we don't + know much about the function we're wrapping. + """ + + for arg in itertools.chain(kwargs.values(), args): + if isinstance(arg, RequestContext): + return arg + + return None + + +def is_user_context(context): + """Indicates if the request context is a normal user.""" + if not context: + return False + if context.is_admin: + return False + if not context.user_id or not context.project_id: + return False + return True diff --git a/mistral/openstack/common/db/__init__.py b/mistral/openstack/common/db/__init__.py index 5f5273f3e..e69de29bb 100644 --- a/mistral/openstack/common/db/__init__.py +++ b/mistral/openstack/common/db/__init__.py @@ -1,14 +0,0 @@ -# Copyright 2012 Cloudscaling Group, Inc -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/mistral/openstack/common/db/api.py b/mistral/openstack/common/db/api.py index c0df82d30..d69db85e2 100644 --- a/mistral/openstack/common/db/api.py +++ b/mistral/openstack/common/db/api.py @@ -15,90 +15,148 @@ """Multiple DB API backend support. -Supported configuration options: - -The following two parameters are in the 'database' group: -`backend`: DB backend name or full module path to DB backend module. -`use_tpool`: Enable thread pooling of DB API calls. - A DB backend module should implement a method named 'get_backend' which takes no arguments. The method can return any object that implements DB API methods. - -*NOTE*: There are bugs in eventlet when using tpool combined with -threading locks. The python logging module happens to use such locks. To -work around this issue, be sure to specify thread=False with -eventlet.monkey_patch(). - -A bug for eventlet has been filed here: - -https://bitbucket.org/eventlet/eventlet/issue/137/ """ + import functools +import logging +import threading +import time -from oslo.config import cfg - +from mistral.openstack.common.db import exception +from mistral.openstack.common.gettextutils import _LE from mistral.openstack.common import importutils -from mistral.openstack.common import lockutils -db_opts = [ - cfg.StrOpt('backend', - default='sqlalchemy', - deprecated_name='db_backend', - deprecated_group='DEFAULT', - help='The backend to use for db'), - cfg.BoolOpt('use_tpool', - default=False, - deprecated_name='dbapi_use_tpool', - deprecated_group='DEFAULT', - help='Enable the experimental use of thread pooling for ' - 'all DB API calls') -] +LOG = logging.getLogger(__name__) -CONF = cfg.CONF -CONF.register_opts(db_opts, 'database') + +def safe_for_db_retry(f): + """Enable db-retry for decorated function, if config option enabled.""" + f.__dict__['enable_retry'] = True + return f + + +class wrap_db_retry(object): + """Retry db.api methods, if DBConnectionError() raised + + Retry decorated db.api methods. If we enabled `use_db_reconnect` + in config, this decorator will be applied to all db.api functions, + marked with @safe_for_db_retry decorator. + Decorator catchs DBConnectionError() and retries function in a + loop until it succeeds, or until maximum retries count will be reached. + """ + + def __init__(self, retry_interval, max_retries, inc_retry_interval, + max_retry_interval): + super(wrap_db_retry, self).__init__() + + self.retry_interval = retry_interval + self.max_retries = max_retries + self.inc_retry_interval = inc_retry_interval + self.max_retry_interval = max_retry_interval + + def __call__(self, f): + @functools.wraps(f) + def wrapper(*args, **kwargs): + next_interval = self.retry_interval + remaining = self.max_retries + + while True: + try: + return f(*args, **kwargs) + except exception.DBConnectionError as e: + if remaining == 0: + LOG.exception(_LE('DB exceeded retry limit.')) + raise exception.DBError(e) + if remaining != -1: + remaining -= 1 + LOG.exception(_LE('DB connection error.')) + # NOTE(vsergeyev): We are using patched time module, so + # this effectively yields the execution + # context to another green thread. + time.sleep(next_interval) + if self.inc_retry_interval: + next_interval = min( + next_interval * 2, + self.max_retry_interval + ) + return wrapper class DBAPI(object): - def __init__(self, backend_mapping=None): - if backend_mapping is None: - backend_mapping = {} - self.__backend = None - self.__backend_mapping = backend_mapping + def __init__(self, backend_name, backend_mapping=None, lazy=False, + **kwargs): + """Initialize the chosen DB API backend. + + :param backend_name: name of the backend to load + :type backend_name: str + + :param backend_mapping: backend name -> module/class to load mapping + :type backend_mapping: dict + + :param lazy: load the DB backend lazily on the first DB API method call + :type lazy: bool + + Keyword arguments: + + :keyword use_db_reconnect: retry DB transactions on disconnect or not + :type use_db_reconnect: bool + + :keyword retry_interval: seconds between transaction retries + :type retry_interval: int + + :keyword inc_retry_interval: increase retry interval or not + :type inc_retry_interval: bool + + :keyword max_retry_interval: max interval value between retries + :type max_retry_interval: int + + :keyword max_retries: max number of retries before an error is raised + :type max_retries: int - @lockutils.synchronized('dbapi_backend', 'mistral-') - def __get_backend(self): - """Get the actual backend. May be a module or an instance of - a class. Doesn't matter to us. We do this synchronized as it's - possible multiple greenthreads started very quickly trying to do - DB calls and eventlet can switch threads before self.__backend gets - assigned. """ - if self.__backend: - # Another thread assigned it - return self.__backend - backend_name = CONF.database.backend - self.__use_tpool = CONF.database.use_tpool - if self.__use_tpool: - from eventlet import tpool - self.__tpool = tpool - # Import the untranslated name if we don't have a - # mapping. - backend_path = self.__backend_mapping.get(backend_name, - backend_name) - backend_mod = importutils.import_module(backend_path) - self.__backend = backend_mod.get_backend() - return self.__backend + + self._backend = None + self._backend_name = backend_name + self._backend_mapping = backend_mapping or {} + self._lock = threading.Lock() + + if not lazy: + self._load_backend() + + self.use_db_reconnect = kwargs.get('use_db_reconnect', False) + self.retry_interval = kwargs.get('retry_interval', 1) + self.inc_retry_interval = kwargs.get('inc_retry_interval', True) + self.max_retry_interval = kwargs.get('max_retry_interval', 10) + self.max_retries = kwargs.get('max_retries', 20) + + def _load_backend(self): + with self._lock: + if not self._backend: + # Import the untranslated name if we don't have a mapping + backend_path = self._backend_mapping.get(self._backend_name, + self._backend_name) + backend_mod = importutils.import_module(backend_path) + self._backend = backend_mod.get_backend() def __getattr__(self, key): - backend = self.__backend or self.__get_backend() - attr = getattr(backend, key) - if not self.__use_tpool or not hasattr(attr, '__call__'): + if not self._backend: + self._load_backend() + + attr = getattr(self._backend, key) + if not hasattr(attr, '__call__'): return attr + # NOTE(vsergeyev): If `use_db_reconnect` option is set to True, retry + # DB API methods, decorated with @safe_for_db_retry + # on disconnect. + if self.use_db_reconnect and hasattr(attr, 'enable_retry'): + attr = wrap_db_retry( + retry_interval=self.retry_interval, + max_retries=self.max_retries, + inc_retry_interval=self.inc_retry_interval, + max_retry_interval=self.max_retry_interval)(attr) - def tpool_wrapper(*args, **kwargs): - return self.__tpool.execute(attr, *args, **kwargs) - - functools.update_wrapper(tpool_wrapper, attr) - return tpool_wrapper + return attr diff --git a/mistral/openstack/common/db/exception.py b/mistral/openstack/common/db/exception.py index 69eecb7d3..0680977bc 100644 --- a/mistral/openstack/common/db/exception.py +++ b/mistral/openstack/common/db/exception.py @@ -16,14 +16,16 @@ """DB related custom exceptions.""" -from mistral.openstack.common.gettextutils import _ # noqa +import six + +from mistral.openstack.common.gettextutils import _ class DBError(Exception): """Wraps an implementation specific exception.""" def __init__(self, inner_exception=None): self.inner_exception = inner_exception - super(DBError, self).__init__(str(inner_exception)) + super(DBError, self).__init__(six.text_type(inner_exception)) class DBDuplicateEntry(DBError): @@ -46,7 +48,7 @@ class DBInvalidUnicodeParameter(Exception): class DbMigrationError(DBError): """Wraps migration specific exception.""" def __init__(self, message=None): - super(DbMigrationError, self).__init__(str(message)) + super(DbMigrationError, self).__init__(message) class DBConnectionError(DBError): diff --git a/mistral/openstack/common/db/options.py b/mistral/openstack/common/db/options.py new file mode 100644 index 000000000..42b616b67 --- /dev/null +++ b/mistral/openstack/common/db/options.py @@ -0,0 +1,171 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +from oslo.config import cfg + + +database_opts = [ + cfg.StrOpt('sqlite_db', + deprecated_group='DEFAULT', + default='mistral.sqlite', + help='The file name to use with SQLite'), + cfg.BoolOpt('sqlite_synchronous', + deprecated_group='DEFAULT', + default=True, + help='If True, SQLite uses synchronous mode'), + cfg.StrOpt('backend', + default='sqlalchemy', + deprecated_name='db_backend', + deprecated_group='DEFAULT', + help='The backend to use for db'), + cfg.StrOpt('connection', + help='The SQLAlchemy connection string used to connect to the ' + 'database', + secret=True, + deprecated_opts=[cfg.DeprecatedOpt('sql_connection', + group='DEFAULT'), + cfg.DeprecatedOpt('sql_connection', + group='DATABASE'), + cfg.DeprecatedOpt('connection', + group='sql'), ]), + cfg.StrOpt('mysql_sql_mode', + default='TRADITIONAL', + help='The SQL mode to be used for MySQL sessions. ' + 'This option, including the default, overrides any ' + 'server-set SQL mode. To use whatever SQL mode ' + 'is set by the server configuration, ' + 'set this to no value. Example: mysql_sql_mode='), + cfg.IntOpt('idle_timeout', + default=3600, + deprecated_opts=[cfg.DeprecatedOpt('sql_idle_timeout', + group='DEFAULT'), + cfg.DeprecatedOpt('sql_idle_timeout', + group='DATABASE'), + cfg.DeprecatedOpt('idle_timeout', + group='sql')], + help='Timeout before idle sql connections are reaped'), + cfg.IntOpt('min_pool_size', + default=1, + deprecated_opts=[cfg.DeprecatedOpt('sql_min_pool_size', + group='DEFAULT'), + cfg.DeprecatedOpt('sql_min_pool_size', + group='DATABASE')], + help='Minimum number of SQL connections to keep open in a ' + 'pool'), + cfg.IntOpt('max_pool_size', + default=None, + deprecated_opts=[cfg.DeprecatedOpt('sql_max_pool_size', + group='DEFAULT'), + cfg.DeprecatedOpt('sql_max_pool_size', + group='DATABASE')], + help='Maximum number of SQL connections to keep open in a ' + 'pool'), + cfg.IntOpt('max_retries', + default=10, + deprecated_opts=[cfg.DeprecatedOpt('sql_max_retries', + group='DEFAULT'), + cfg.DeprecatedOpt('sql_max_retries', + group='DATABASE')], + help='Maximum db connection retries during startup. ' + '(setting -1 implies an infinite retry count)'), + cfg.IntOpt('retry_interval', + default=10, + deprecated_opts=[cfg.DeprecatedOpt('sql_retry_interval', + group='DEFAULT'), + cfg.DeprecatedOpt('reconnect_interval', + group='DATABASE')], + help='Interval between retries of opening a sql connection'), + cfg.IntOpt('max_overflow', + default=None, + deprecated_opts=[cfg.DeprecatedOpt('sql_max_overflow', + group='DEFAULT'), + cfg.DeprecatedOpt('sqlalchemy_max_overflow', + group='DATABASE')], + help='If set, use this value for max_overflow with sqlalchemy'), + cfg.IntOpt('connection_debug', + default=0, + deprecated_opts=[cfg.DeprecatedOpt('sql_connection_debug', + group='DEFAULT')], + help='Verbosity of SQL debugging information. 0=None, ' + '100=Everything'), + cfg.BoolOpt('connection_trace', + default=False, + deprecated_opts=[cfg.DeprecatedOpt('sql_connection_trace', + group='DEFAULT')], + help='Add python stack traces to SQL as comment strings'), + cfg.IntOpt('pool_timeout', + default=None, + deprecated_opts=[cfg.DeprecatedOpt('sqlalchemy_pool_timeout', + group='DATABASE')], + help='If set, use this value for pool_timeout with sqlalchemy'), + cfg.BoolOpt('use_db_reconnect', + default=False, + help='Enable the experimental use of database reconnect ' + 'on connection lost'), + cfg.IntOpt('db_retry_interval', + default=1, + help='seconds between db connection retries'), + cfg.BoolOpt('db_inc_retry_interval', + default=True, + help='Whether to increase interval between db connection ' + 'retries, up to db_max_retry_interval'), + cfg.IntOpt('db_max_retry_interval', + default=10, + help='max seconds between db connection retries, if ' + 'db_inc_retry_interval is enabled'), + cfg.IntOpt('db_max_retries', + default=20, + help='maximum db connection retries before error is raised. ' + '(setting -1 implies an infinite retry count)'), +] + +CONF = cfg.CONF +CONF.register_opts(database_opts, 'database') + + +def set_defaults(sql_connection, sqlite_db, max_pool_size=None, + max_overflow=None, pool_timeout=None): + """Set defaults for configuration variables.""" + cfg.set_defaults(database_opts, + connection=sql_connection, + sqlite_db=sqlite_db) + # Update the QueuePool defaults + if max_pool_size is not None: + cfg.set_defaults(database_opts, + max_pool_size=max_pool_size) + if max_overflow is not None: + cfg.set_defaults(database_opts, + max_overflow=max_overflow) + if pool_timeout is not None: + cfg.set_defaults(database_opts, + pool_timeout=pool_timeout) + + +def list_opts(): + """Returns a list of oslo.config options available in the library. + + The returned list includes all oslo.config options which may be registered + at runtime by the library. + + Each element of the list is a tuple. The first element is the name of the + group under which the list of elements in the second element will be + registered. A group name of None corresponds to the [DEFAULT] group in + config files. + + The purpose of this is to allow tools like the Oslo sample config file + generator to discover the options exposed to users by this library. + + :returns: a list of (group_name, opts) tuples + """ + return [('database', copy.deepcopy(database_opts))] diff --git a/mistral/openstack/common/db/sqlalchemy/__init__.py b/mistral/openstack/common/db/sqlalchemy/__init__.py index 5f5273f3e..e69de29bb 100644 --- a/mistral/openstack/common/db/sqlalchemy/__init__.py +++ b/mistral/openstack/common/db/sqlalchemy/__init__.py @@ -1,14 +0,0 @@ -# Copyright 2012 Cloudscaling Group, Inc -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/mistral/openstack/common/db/sqlalchemy/migration.py b/mistral/openstack/common/db/sqlalchemy/migration.py index b76eb450e..a223dbd4e 100644 --- a/mistral/openstack/common/db/sqlalchemy/migration.py +++ b/mistral/openstack/common/db/sqlalchemy/migration.py @@ -36,54 +36,22 @@ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. -import distutils.version as dist_version import os import re -import migrate from migrate.changeset import ansisql from migrate.changeset.databases import sqlite -from migrate.versioning import util as migrate_util +from migrate import exceptions as versioning_exceptions +from migrate.versioning import api as versioning_api +from migrate.versioning.repository import Repository import sqlalchemy from sqlalchemy.schema import UniqueConstraint from mistral.openstack.common.db import exception -from mistral.openstack.common.db.sqlalchemy import session as db_session -from mistral.openstack.common.gettextutils import _ # noqa - - -@migrate_util.decorator -def patched_with_engine(f, *a, **kw): - url = a[0] - engine = migrate_util.construct_engine(url, **kw) - - try: - kw['engine'] = engine - return f(*a, **kw) - finally: - if isinstance(engine, migrate_util.Engine) and engine is not url: - migrate_util.log.debug('Disposing SQLAlchemy engine %s', engine) - engine.dispose() - - -# TODO(jkoelker) When migrate 0.7.3 is released and nova depends -# on that version or higher, this can be removed -MIN_PKG_VERSION = dist_version.StrictVersion('0.7.3') -if (not hasattr(migrate, '__version__') or - dist_version.StrictVersion(migrate.__version__) < MIN_PKG_VERSION): - migrate_util.with_engine = patched_with_engine - - -# NOTE(jkoelker) Delay importing migrate until we are patched -from migrate import exceptions as versioning_exceptions -from migrate.versioning import api as versioning_api -from migrate.versioning.repository import Repository - -_REPOSITORY = None - -get_engine = db_session.get_engine +from mistral.openstack.common.gettextutils import _ def _get_unique_constraints(self, table): @@ -200,17 +168,20 @@ def patch_migrate(): sqlite.SQLiteConstraintGenerator) -def db_sync(abs_path, version=None, init_version=0): +def db_sync(engine, abs_path, version=None, init_version=0, sanity_check=True): """Upgrade or downgrade a database. Function runs the upgrade() or downgrade() functions in change scripts. + :param engine: SQLAlchemy engine instance for a given database :param abs_path: Absolute path to migrate repository. :param version: Database will upgrade/downgrade until this version. If None - database will update to the latest available version. :param init_version: Initial database version + :param sanity_check: Require schema sanity checking for all tables """ + if version is not None: try: version = int(version) @@ -218,50 +189,82 @@ def db_sync(abs_path, version=None, init_version=0): raise exception.DbMigrationError( message=_("version should be an integer")) - current_version = db_version(abs_path, init_version) + current_version = db_version(engine, abs_path, init_version) repository = _find_migrate_repo(abs_path) + if sanity_check: + _db_schema_sanity_check(engine) if version is None or version > current_version: - return versioning_api.upgrade(get_engine(), repository, version) + return versioning_api.upgrade(engine, repository, version) else: - return versioning_api.downgrade(get_engine(), repository, + return versioning_api.downgrade(engine, repository, version) -def db_version(abs_path, init_version): +def _db_schema_sanity_check(engine): + """Ensure all database tables were created with required parameters. + + :param engine: SQLAlchemy engine instance for a given database + + """ + + if engine.name == 'mysql': + onlyutf8_sql = ('SELECT TABLE_NAME,TABLE_COLLATION ' + 'from information_schema.TABLES ' + 'where TABLE_SCHEMA=%s and ' + 'TABLE_COLLATION NOT LIKE "%%utf8%%"') + + # NOTE(morganfainberg): exclude the sqlalchemy-migrate and alembic + # versioning tables from the tables we need to verify utf8 status on. + # Non-standard table names are not supported. + EXCLUDED_TABLES = ['migrate_version', 'alembic_version'] + + table_names = [res[0] for res in + engine.execute(onlyutf8_sql, engine.url.database) if + res[0].lower() not in EXCLUDED_TABLES] + + if len(table_names) > 0: + raise ValueError(_('Tables "%s" have non utf8 collation, ' + 'please make sure all tables are CHARSET=utf8' + ) % ','.join(table_names)) + + +def db_version(engine, abs_path, init_version): """Show the current version of the repository. + :param engine: SQLAlchemy engine instance for a given database :param abs_path: Absolute path to migrate repository :param version: Initial database version """ repository = _find_migrate_repo(abs_path) try: - return versioning_api.db_version(get_engine(), repository) + return versioning_api.db_version(engine, repository) except versioning_exceptions.DatabaseNotControlledError: meta = sqlalchemy.MetaData() - engine = get_engine() meta.reflect(bind=engine) tables = meta.tables - if len(tables) == 0: - db_version_control(abs_path, init_version) - return versioning_api.db_version(get_engine(), repository) + if len(tables) == 0 or 'alembic_version' in tables: + db_version_control(engine, abs_path, version=init_version) + return versioning_api.db_version(engine, repository) else: - # Some pre-Essex DB's may not be version controlled. - # Require them to upgrade using Essex first. raise exception.DbMigrationError( - message=_("Upgrade DB using Essex release first.")) + message=_( + "The database is not under version control, but has " + "tables. Please stamp the current version of the schema " + "manually.")) -def db_version_control(abs_path, version=None): +def db_version_control(engine, abs_path, version=None): """Mark a database as under this repository's version control. Once a database is under version control, schema changes should only be done via change scripts in this repository. + :param engine: SQLAlchemy engine instance for a given database :param abs_path: Absolute path to migrate repository :param version: Initial database version """ repository = _find_migrate_repo(abs_path) - versioning_api.version_control(get_engine(), repository, version) + versioning_api.version_control(engine, repository, version) return version @@ -270,9 +273,6 @@ def _find_migrate_repo(abs_path): :param abs_path: Absolute path to migrate repository """ - global _REPOSITORY if not os.path.exists(abs_path): raise exception.DbMigrationError("Path %s not found" % abs_path) - if _REPOSITORY is None: - _REPOSITORY = Repository(abs_path) - return _REPOSITORY + return Repository(abs_path) diff --git a/mistral/openstack/common/db/sqlalchemy/models.py b/mistral/openstack/common/db/sqlalchemy/models.py index 84dbda0f7..aacdeb4c8 100644 --- a/mistral/openstack/common/db/sqlalchemy/models.py +++ b/mistral/openstack/common/db/sqlalchemy/models.py @@ -26,26 +26,24 @@ from sqlalchemy import Column, Integer from sqlalchemy import DateTime from sqlalchemy.orm import object_mapper -from mistral.openstack.common.db.sqlalchemy import session as sa from mistral.openstack.common import timeutils -class ModelBase(object): +class ModelBase(six.Iterator): """Base class for models.""" __table_initialized__ = False - def save(self, session=None): + def save(self, session): """Save this object.""" - if not session: - session = sa.get_session() + # NOTE(boris-42): This part of code should be look like: - # sesssion.add(self) + # session.add(self) # session.flush() # But there is a bug in sqlalchemy and eventlet that # raises NoneType exception if there is no running # transaction and rollback is called. As long as # sqlalchemy has this bug we have to create transaction - # explicity. + # explicitly. with session.begin(subtransactions=True): session.add(self) session.flush() @@ -59,22 +57,35 @@ class ModelBase(object): def get(self, key, default=None): return getattr(self, key, default) - def _get_extra_keys(self): + @property + def _extra_keys(self): + """Specifies custom fields + + Subclasses can override this property to return a list + of custom fields that should be included in their dict + representation. + + For reference check tests/db/sqlalchemy/test_models.py + """ return [] def __iter__(self): - columns = dict(object_mapper(self).columns).keys() + columns = list(dict(object_mapper(self).columns).keys()) # NOTE(russellb): Allow models to specify other keys that can be looked # up, beyond the actual db columns. An example would be the 'name' # property for an Instance. - columns.extend(self._get_extra_keys()) + columns.extend(self._extra_keys) self._i = iter(columns) return self - def next(self): + # In Python 3, __next__() has replaced next(). + def __next__(self): n = six.advance_iterator(self._i) return n, getattr(self, n) + def next(self): + return self.__next__() + def update(self, values): """Make the model object behave like a dict.""" for k, v in six.iteritems(values): @@ -89,19 +100,19 @@ class ModelBase(object): joined = dict([(k, v) for k, v in six.iteritems(self.__dict__) if not k[0] == '_']) local.update(joined) - return local.iteritems() + return six.iteritems(local) class TimestampMixin(object): - created_at = Column(DateTime, default=timeutils.utcnow) - updated_at = Column(DateTime, onupdate=timeutils.utcnow) + created_at = Column(DateTime, default=lambda: timeutils.utcnow()) + updated_at = Column(DateTime, onupdate=lambda: timeutils.utcnow()) class SoftDeleteMixin(object): deleted_at = Column(DateTime) deleted = Column(Integer, default=0) - def soft_delete(self, session=None): + def soft_delete(self, session): """Mark this object as deleted.""" self.deleted = self.id self.deleted_at = timeutils.utcnow() diff --git a/mistral/openstack/common/db/sqlalchemy/provision.py b/mistral/openstack/common/db/sqlalchemy/provision.py index 33b466fc3..0b6c5db90 100644 --- a/mistral/openstack/common/db/sqlalchemy/provision.py +++ b/mistral/openstack/common/db/sqlalchemy/provision.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2013 Mirantis.inc # All Rights Reserved. # @@ -18,31 +16,23 @@ """Provision test environment for specific DB backends""" import argparse +import logging import os import random import string +from six import moves import sqlalchemy from mistral.openstack.common.db import exception as exc -SQL_CONNECTION = os.getenv('OS_TEST_DBAPI_ADMIN_CONNECTION', 'sqlite://') +LOG = logging.getLogger(__name__) -def _gen_credentials(*names): - """Generate credentials.""" - auth_dict = {} - for name in names: - val = ''.join(random.choice(string.lowercase) for i in xrange(10)) - auth_dict[name] = val - return auth_dict - - -def _get_engine(uri=SQL_CONNECTION): +def get_engine(uri): """Engine creation - By default the uri is SQL_CONNECTION which is admin credentials. Call the function without arguments to get admin connection. Admin connection required to create temporary user and database for each particular test. Otherwise use existing connection to recreate connection @@ -62,50 +52,43 @@ def _execute_sql(engine, sql, driver): except sqlalchemy.exc.OperationalError: msg = ('%s does not match database admin ' 'credentials or database does not exist.') - raise exc.DBConnectionError(msg % SQL_CONNECTION) + LOG.exception(msg % engine.url) + raise exc.DBConnectionError(msg % engine.url) def create_database(engine): """Provide temporary user and database for each particular test.""" driver = engine.name - auth = _gen_credentials('database', 'user', 'passwd') - - sqls = { - 'mysql': [ - "drop database if exists %(database)s;", - "grant all on %(database)s.* to '%(user)s'@'localhost'" - " identified by '%(passwd)s';", - "create database %(database)s;", - ], - 'postgresql': [ - "drop database if exists %(database)s;", - "drop user if exists %(user)s;", - "create user %(user)s with password '%(passwd)s';", - "create database %(database)s owner %(user)s;", - ] + auth = { + 'database': ''.join(random.choice(string.ascii_lowercase) + for i in moves.range(10)), + 'user': engine.url.username, + 'passwd': engine.url.password, } + sqls = [ + "drop database if exists %(database)s;", + "create database %(database)s;" + ] + if driver == 'sqlite': return 'sqlite:////tmp/%s' % auth['database'] - - try: - sql_rows = sqls[driver] - except KeyError: + elif driver in ['mysql', 'postgresql']: + sql_query = map(lambda x: x % auth, sqls) + _execute_sql(engine, sql_query, driver) + else: raise ValueError('Unsupported RDBMS %s' % driver) - sql_query = map(lambda x: x % auth, sql_rows) - - _execute_sql(engine, sql_query, driver) params = auth.copy() params['backend'] = driver return "%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s" % params -def drop_database(engine, current_uri): +def drop_database(admin_engine, current_uri): """Drop temporary database and user after each particular test.""" - engine = _get_engine(current_uri) - admin_engine = _get_engine() + + engine = get_engine(current_uri) driver = engine.name auth = {'database': engine.url.database, 'user': engine.url.username} @@ -114,26 +97,11 @@ def drop_database(engine, current_uri): os.remove(auth['database']) except OSError: pass - return - - sqls = { - 'mysql': [ - "drop database if exists %(database)s;", - "drop user '%(user)s'@'localhost';", - ], - 'postgresql': [ - "drop database if exists %(database)s;", - "drop user if exists %(user)s;", - ] - } - - try: - sql_rows = sqls[driver] - except KeyError: + elif driver in ['mysql', 'postgresql']: + sql = "drop database if exists %(database)s;" + _execute_sql(admin_engine, [sql % auth], driver) + else: raise ValueError('Unsupported RDBMS %s' % driver) - sql_query = map(lambda x: x % auth, sql_rows) - - _execute_sql(admin_engine, sql_query, driver) def main(): @@ -172,7 +140,9 @@ def main(): args = parser.parse_args() - engine = _get_engine() + connection_string = os.getenv('OS_TEST_DBAPI_ADMIN_CONNECTION', + 'sqlite://') + engine = get_engine(connection_string) which = args.which if which == "create": diff --git a/mistral/openstack/common/db/sqlalchemy/session.py b/mistral/openstack/common/db/sqlalchemy/session.py index ae2c0bc5c..5a6508033 100644 --- a/mistral/openstack/common/db/sqlalchemy/session.py +++ b/mistral/openstack/common/db/sqlalchemy/session.py @@ -16,43 +16,34 @@ """Session Handling for SQLAlchemy backend. -Initializing: - -* Call set_defaults with the minimal of the following kwargs: - sql_connection, sqlite_db - - Example: - - session.set_defaults( - sql_connection="sqlite:///var/lib/mistral/sqlite.db", - sqlite_db="/var/lib/mistral/sqlite.db") - Recommended ways to use sessions within this framework: -* Don't use them explicitly; this is like running with AUTOCOMMIT=1. - model_query() will implicitly use a session when called without one +* Don't use them explicitly; this is like running with ``AUTOCOMMIT=1``. + `model_query()` will implicitly use a session when called without one supplied. This is the ideal situation because it will allow queries to be automatically retried if the database connection is interrupted. - Note: Automatic retry will be enabled in a future patch. + .. note:: Automatic retry will be enabled in a future patch. It is generally fine to issue several queries in a row like this. Even though they may be run in separate transactions and/or separate sessions, each one will see the data from the prior calls. If needed, undo- or rollback-like functionality should be handled at a logical level. For an example, look at - the code around quotas and reservation_rollback(). + the code around quotas and `reservation_rollback()`. Examples: + .. code:: python + def get_foo(context, foo): - return model_query(context, models.Foo).\ - filter_by(foo=foo).\ - first() + return (model_query(context, models.Foo). + filter_by(foo=foo). + first()) def update_foo(context, id, newfoo): - model_query(context, models.Foo).\ - filter_by(id=id).\ - update({'foo': newfoo}) + (model_query(context, models.Foo). + filter_by(id=id). + update({'foo': newfoo})) def create_foo(context, values): foo_ref = models.Foo() @@ -61,21 +52,29 @@ Recommended ways to use sessions within this framework: return foo_ref -* Within the scope of a single method, keeping all the reads and writes within - the context managed by a single session. In this way, the session's __exit__ - handler will take care of calling flush() and commit() for you. - If using this approach, you should not explicitly call flush() or commit(). - Any error within the context of the session will cause the session to emit - a ROLLBACK. If the connection is dropped before this is possible, the - database will implicitly rollback the transaction. +* Within the scope of a single method, keep all the reads and writes within + the context managed by a single session. In this way, the session's + `__exit__` handler will take care of calling `flush()` and `commit()` for + you. If using this approach, you should not explicitly call `flush()` or + `commit()`. Any error within the context of the session will cause the + session to emit a `ROLLBACK`. Database errors like `IntegrityError` will be + raised in `session`'s `__exit__` handler, and any try/except within the + context managed by `session` will not be triggered. And catching other + non-database errors in the session will not trigger the ROLLBACK, so + exception handlers should always be outside the session, unless the + developer wants to do a partial commit on purpose. If the connection is + dropped before this is possible, the database will implicitly roll back the + transaction. - Note: statements in the session scope will not be automatically retried. + .. note:: Statements in the session scope will not be automatically retried. If you create models within the session, they need to be added, but you - do not need to call model.save() + do not need to call `model.save()`: + + .. code:: python def create_many_foo(context, foos): - session = get_session() + session = sessionmaker() with session.begin(): for foo in foos: foo_ref = models.Foo() @@ -83,38 +82,64 @@ Recommended ways to use sessions within this framework: session.add(foo_ref) def update_bar(context, foo_id, newbar): - session = get_session() + session = sessionmaker() with session.begin(): - foo_ref = model_query(context, models.Foo, session).\ - filter_by(id=foo_id).\ - first() - model_query(context, models.Bar, session).\ - filter_by(id=foo_ref['bar_id']).\ - update({'bar': newbar}) + foo_ref = (model_query(context, models.Foo, session). + filter_by(id=foo_id). + first()) + (model_query(context, models.Bar, session). + filter_by(id=foo_ref['bar_id']). + update({'bar': newbar})) - Note: update_bar is a trivially simple example of using "with session.begin". - Whereas create_many_foo is a good example of when a transaction is needed, - it is always best to use as few queries as possible. The two queries in - update_bar can be better expressed using a single query which avoids - the need for an explicit transaction. It can be expressed like so: + .. note:: `update_bar` is a trivially simple example of using + ``with session.begin``. Whereas `create_many_foo` is a good example of + when a transaction is needed, it is always best to use as few queries as + possible. + + The two queries in `update_bar` can be better expressed using a single query + which avoids the need for an explicit transaction. It can be expressed like + so: + + .. code:: python def update_bar(context, foo_id, newbar): - subq = model_query(context, models.Foo.id).\ - filter_by(id=foo_id).\ - limit(1).\ - subquery() - model_query(context, models.Bar).\ - filter_by(id=subq.as_scalar()).\ - update({'bar': newbar}) + subq = (model_query(context, models.Foo.id). + filter_by(id=foo_id). + limit(1). + subquery()) + (model_query(context, models.Bar). + filter_by(id=subq.as_scalar()). + update({'bar': newbar})) - For reference, this emits approximagely the following SQL statement: + For reference, this emits approximately the following SQL statement: + + .. code:: sql UPDATE bar SET bar = ${newbar} WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1); + .. note:: `create_duplicate_foo` is a trivially simple example of catching an + exception while using ``with session.begin``. Here create two duplicate + instances with same primary key, must catch the exception out of context + managed by a single session: + + .. code:: python + + def create_duplicate_foo(context): + foo1 = models.Foo() + foo2 = models.Foo() + foo1.id = foo2.id = 1 + session = sessionmaker() + try: + with session.begin(): + session.add(foo1) + session.add(foo2) + except exception.DBDuplicateEntry as e: + handle_error(e) + * Passing an active session between methods. Sessions should only be passed to private methods. The private method must use a subtransaction; otherwise - SQLAlchemy will throw an error when you call session.begin() on an existing + SQLAlchemy will throw an error when you call `session.begin()` on an existing transaction. Public methods should not accept a session parameter and should not be involved in sessions within the caller's scope. @@ -127,8 +152,10 @@ Recommended ways to use sessions within this framework: becomes less clear in this situation. When this is needed for code clarity, it should be clearly documented. + .. code:: python + def myfunc(foo): - session = get_session() + session = sessionmaker() with session.begin(): # do some database things bar = _private_func(foo, session) @@ -136,7 +163,7 @@ Recommended ways to use sessions within this framework: def _private_func(foo, session=None): if not session: - session = get_session() + session = sessionmaker() with session.begin(subtransaction=True): # do some other database things return bar @@ -146,13 +173,13 @@ There are some things which it is best to avoid: * Don't keep a transaction open any longer than necessary. - This means that your "with session.begin()" block should be as short + This means that your ``with session.begin()`` block should be as short as possible, while still containing all the related calls for that transaction. -* Avoid "with_lockmode('UPDATE')" when possible. +* Avoid ``with_lockmode('UPDATE')`` when possible. - In MySQL/InnoDB, when a "SELECT ... FOR UPDATE" query does not match + In MySQL/InnoDB, when a ``SELECT ... FOR UPDATE`` query does not match any rows, it will take a gap-lock. This is a form of write-lock on the "gap" where no rows exist, and prevents any other writes to that space. This can effectively prevent any INSERT into a table by locking the gap @@ -163,16 +190,19 @@ There are some things which it is best to avoid: number of rows matching a query, and if only one row is returned, then issue the SELECT FOR UPDATE. - The better long-term solution is to use INSERT .. ON DUPLICATE KEY UPDATE. + The better long-term solution is to use + ``INSERT .. ON DUPLICATE KEY UPDATE``. However, this can not be done until the "deleted" columns are removed and proper UNIQUE constraints are added to the tables. Enabling soft deletes: -* To use/enable soft-deletes, the SoftDeleteMixin must be added +* To use/enable soft-deletes, the `SoftDeleteMixin` must be added to your model class. For example: + .. code:: python + class NovaBase(models.SoftDeleteMixin, models.ModelBase): pass @@ -180,13 +210,15 @@ Enabling soft deletes: Efficient use of soft deletes: * There are two possible ways to mark a record as deleted: - model.soft_delete() and query.soft_delete(). + `model.soft_delete()` and `query.soft_delete()`. - model.soft_delete() method works with single already fetched entry. - query.soft_delete() makes only one db request for all entries that correspond - to query. + The `model.soft_delete()` method works with a single already-fetched entry. + `query.soft_delete()` makes only one db request for all entries that + correspond to the query. -* In almost all cases you should use query.soft_delete(). Some examples: +* In almost all cases you should use `query.soft_delete()`. Some examples: + + .. code:: python def soft_delete_bar(): count = model_query(BarModel).find(some_condition).soft_delete() @@ -195,35 +227,39 @@ Efficient use of soft deletes: def complex_soft_delete_with_synchronization_bar(session=None): if session is None: - session = get_session() + session = sessionmaker() with session.begin(subtransactions=True): - count = model_query(BarModel).\ - find(some_condition).\ - soft_delete(synchronize_session=True) + count = (model_query(BarModel). + find(some_condition). + soft_delete(synchronize_session=True)) # Here synchronize_session is required, because we # don't know what is going on in outer session. if count == 0: raise Exception("0 entries were soft deleted") -* There is only one situation where model.soft_delete() is appropriate: when +* There is only one situation where `model.soft_delete()` is appropriate: when you fetch a single record, work with it, and mark it as deleted in the same transaction. + .. code:: python + def soft_delete_bar_model(): - session = get_session() + session = sessionmaker() with session.begin(): bar_ref = model_query(BarModel).find(some_condition).first() # Work with bar_ref bar_ref.soft_delete(session=session) However, if you need to work with all entries that correspond to query and - then soft delete them you should use query.soft_delete() method: + then soft delete them you should use the `query.soft_delete()` method: + + .. code:: python def soft_delete_multi_models(): - session = get_session() + session = sessionmaker() with session.begin(): - query = model_query(BarModel, session=session).\ - find(some_condition) + query = (model_query(BarModel, session=session). + find(some_condition)) model_refs = query.all() # Work with model_refs query.soft_delete(synchronize_session=False) @@ -231,170 +267,36 @@ Efficient use of soft deletes: # session and these entries are not used after this. When working with many rows, it is very important to use query.soft_delete, - which issues a single query. Using model.soft_delete(), as in the following + which issues a single query. Using `model.soft_delete()`, as in the following example, is very inefficient. + .. code:: python + for bar_ref in bar_refs: bar_ref.soft_delete(session=session) # This will produce count(bar_refs) db requests. + """ import functools -import os.path +import logging import re import time -from oslo.config import cfg import six from sqlalchemy import exc as sqla_exc -import sqlalchemy.interfaces from sqlalchemy.interfaces import PoolListener import sqlalchemy.orm from sqlalchemy.pool import NullPool, StaticPool from sqlalchemy.sql.expression import literal_column from mistral.openstack.common.db import exception -from mistral.openstack.common.gettextutils import _ # noqa -from mistral.openstack.common import log as logging +from mistral.openstack.common.gettextutils import _LE, _LW from mistral.openstack.common import timeutils -sqlite_db_opts = [ - cfg.StrOpt('sqlite_db', - default='mistral.sqlite', - help='the filename to use with sqlite'), - cfg.BoolOpt('sqlite_synchronous', - default=True, - help='If true, use synchronous mode for sqlite'), -] - -database_opts = [ - cfg.StrOpt('connection', - default='sqlite:///' + - os.path.abspath(os.path.join(os.path.dirname(__file__), - '../', '$sqlite_db')), - help='The SQLAlchemy connection string used to connect to the ' - 'database', - deprecated_opts=[cfg.DeprecatedOpt('sql_connection', - group='DEFAULT'), - cfg.DeprecatedOpt('sql_connection', - group='DATABASE'), - cfg.DeprecatedOpt('connection', - group='sql'), ]), - cfg.StrOpt('slave_connection', - default='', - help='The SQLAlchemy connection string used to connect to the ' - 'slave database'), - cfg.IntOpt('idle_timeout', - default=3600, - deprecated_opts=[cfg.DeprecatedOpt('sql_idle_timeout', - group='DEFAULT'), - cfg.DeprecatedOpt('sql_idle_timeout', - group='DATABASE')], - help='timeout before idle sql connections are reaped'), - cfg.IntOpt('min_pool_size', - default=1, - deprecated_opts=[cfg.DeprecatedOpt('sql_min_pool_size', - group='DEFAULT'), - cfg.DeprecatedOpt('sql_min_pool_size', - group='DATABASE')], - help='Minimum number of SQL connections to keep open in a ' - 'pool'), - cfg.IntOpt('max_pool_size', - default=None, - deprecated_opts=[cfg.DeprecatedOpt('sql_max_pool_size', - group='DEFAULT'), - cfg.DeprecatedOpt('sql_max_pool_size', - group='DATABASE')], - help='Maximum number of SQL connections to keep open in a ' - 'pool'), - cfg.IntOpt('max_retries', - default=10, - deprecated_opts=[cfg.DeprecatedOpt('sql_max_retries', - group='DEFAULT'), - cfg.DeprecatedOpt('sql_max_retries', - group='DATABASE')], - help='maximum db connection retries during startup. ' - '(setting -1 implies an infinite retry count)'), - cfg.IntOpt('retry_interval', - default=10, - deprecated_opts=[cfg.DeprecatedOpt('sql_retry_interval', - group='DEFAULT'), - cfg.DeprecatedOpt('reconnect_interval', - group='DATABASE')], - help='interval between retries of opening a sql connection'), - cfg.IntOpt('max_overflow', - default=None, - deprecated_opts=[cfg.DeprecatedOpt('sql_max_overflow', - group='DEFAULT'), - cfg.DeprecatedOpt('sqlalchemy_max_overflow', - group='DATABASE')], - help='If set, use this value for max_overflow with sqlalchemy'), - cfg.IntOpt('connection_debug', - default=0, - deprecated_opts=[cfg.DeprecatedOpt('sql_connection_debug', - group='DEFAULT')], - help='Verbosity of SQL debugging information. 0=None, ' - '100=Everything'), - cfg.BoolOpt('connection_trace', - default=False, - deprecated_opts=[cfg.DeprecatedOpt('sql_connection_trace', - group='DEFAULT')], - help='Add python stack traces to SQL as comment strings'), - cfg.IntOpt('pool_timeout', - default=None, - deprecated_opts=[cfg.DeprecatedOpt('sqlalchemy_pool_timeout', - group='DATABASE')], - help='If set, use this value for pool_timeout with sqlalchemy'), -] - -CONF = cfg.CONF -CONF.register_opts(sqlite_db_opts) -CONF.register_opts(database_opts, 'database') LOG = logging.getLogger(__name__) -_ENGINE = None -_MAKER = None -_SLAVE_ENGINE = None -_SLAVE_MAKER = None - - -def set_defaults(sql_connection, sqlite_db, max_pool_size=None, - max_overflow=None, pool_timeout=None): - """Set defaults for configuration variables.""" - cfg.set_defaults(database_opts, - connection=sql_connection) - cfg.set_defaults(sqlite_db_opts, - sqlite_db=sqlite_db) - # Update the QueuePool defaults - if max_pool_size is not None: - cfg.set_defaults(database_opts, - max_pool_size=max_pool_size) - if max_overflow is not None: - cfg.set_defaults(database_opts, - max_overflow=max_overflow) - if pool_timeout is not None: - cfg.set_defaults(database_opts, - pool_timeout=pool_timeout) - - -def cleanup(): - global _ENGINE, _MAKER - global _SLAVE_ENGINE, _SLAVE_MAKER - - if _MAKER: - _MAKER.close_all() - _MAKER = None - if _ENGINE: - _ENGINE.dispose() - _ENGINE = None - if _SLAVE_MAKER: - _SLAVE_MAKER.close_all() - _SLAVE_MAKER = None - if _SLAVE_ENGINE: - _SLAVE_ENGINE.dispose() - _SLAVE_ENGINE = None - class SqliteForeignKeysListener(PoolListener): """Ensures that the foreign key constraints are enforced in SQLite. @@ -407,29 +309,6 @@ class SqliteForeignKeysListener(PoolListener): dbapi_con.execute('pragma foreign_keys=ON') -def get_session(autocommit=True, expire_on_commit=False, - sqlite_fk=False, slave_session=False): - """Return a SQLAlchemy session.""" - global _MAKER - global _SLAVE_MAKER - maker = _MAKER - - if slave_session: - maker = _SLAVE_MAKER - - if maker is None: - engine = get_engine(sqlite_fk=sqlite_fk, slave_engine=slave_session) - maker = get_maker(engine, autocommit, expire_on_commit) - - if slave_session: - _SLAVE_MAKER = maker - else: - _MAKER = maker - - session = maker() - return session - - # note(boris-42): In current versions of DB backends unique constraint # violation messages follow the structure: # @@ -437,6 +316,11 @@ def get_session(autocommit=True, expire_on_commit=False, # 1 column - (IntegrityError) column c1 is not unique # N columns - (IntegrityError) column c1, c2, ..., N are not unique # +# sqlite since 3.7.16: +# 1 column - (IntegrityError) UNIQUE constraint failed: tbl.k1 +# +# N columns - (IntegrityError) UNIQUE constraint failed: tbl.k1, tbl.k2 +# # postgres: # 1 column - (IntegrityError) duplicate key value violates unique # constraint "users_c1_key" @@ -448,10 +332,20 @@ def get_session(autocommit=True, expire_on_commit=False, # 'c1'") # N columns - (IntegrityError) (1062, "Duplicate entry 'values joined # with -' for key 'name_of_our_constraint'") +# +# ibm_db_sa: +# N columns - (IntegrityError) SQL0803N One or more values in the INSERT +# statement, UPDATE statement, or foreign key update caused by a +# DELETE statement are not valid because the primary key, unique +# constraint or unique index identified by "2" constrains table +# "NOVA.KEY_PAIRS" from having duplicate values for the index +# key. _DUP_KEY_RE_DB = { - "sqlite": re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"), - "postgresql": re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"), - "mysql": re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$") + "sqlite": (re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"), + re.compile(r"^.*UNIQUE\s+constraint\s+failed:\s+(.+)$")), + "postgresql": (re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"),), + "mysql": (re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$"),), + "ibm_db_sa": (re.compile(r"^.*SQL0803N.*$"),), } @@ -473,7 +367,7 @@ def _raise_if_duplicate_entry_error(integrity_error, engine_name): return [columns] return columns[len(uniqbase):].split("0")[1:] - if engine_name not in ["mysql", "sqlite", "postgresql"]: + if engine_name not in ("ibm_db_sa", "mysql", "sqlite", "postgresql"): return # FIXME(johannes): The usage of the .message attribute has been @@ -481,13 +375,22 @@ def _raise_if_duplicate_entry_error(integrity_error, engine_name): # SQLAlchemy can differ when using unicode() and accessing .message. # An audit across all three supported engines will be necessary to # ensure there are no regressions. - m = _DUP_KEY_RE_DB[engine_name].match(integrity_error.message) - if not m: + for pattern in _DUP_KEY_RE_DB[engine_name]: + match = pattern.match(integrity_error.message) + if match: + break + else: return - columns = m.group(1) + + # NOTE(mriedem): The ibm_db_sa integrity error message doesn't provide the + # columns so we have to omit that from the DBDuplicateEntry error. + columns = '' + + if engine_name != 'ibm_db_sa': + columns = match.group(1) if engine_name == "sqlite": - columns = columns.strip().split(", ") + columns = [c.split('.')[-1] for c in columns.strip().split(", ")] else: columns = get_columns_from_uniq_cons_or_name(columns) raise exception.DBDuplicateEntry(columns, integrity_error) @@ -526,55 +429,39 @@ def _raise_if_deadlock_error(operational_error, engine_name): def _wrap_db_error(f): @functools.wraps(f) - def _wrap(*args, **kwargs): + def _wrap(self, *args, **kwargs): try: - return f(*args, **kwargs) + assert issubclass( + self.__class__, sqlalchemy.orm.session.Session + ), ('_wrap_db_error() can only be applied to methods of ' + 'subclasses of sqlalchemy.orm.session.Session.') + + return f(self, *args, **kwargs) except UnicodeEncodeError: raise exception.DBInvalidUnicodeParameter() - # note(boris-42): We should catch unique constraint violation and - # wrap it by our own DBDuplicateEntry exception. Unique constraint - # violation is wrapped by IntegrityError. except sqla_exc.OperationalError as e: - _raise_if_deadlock_error(e, get_engine().name) + _raise_if_db_connection_lost(e, self.bind) + _raise_if_deadlock_error(e, self.bind.dialect.name) # NOTE(comstud): A lot of code is checking for OperationalError # so let's not wrap it for now. raise + # note(boris-42): We should catch unique constraint violation and + # wrap it by our own DBDuplicateEntry exception. Unique constraint + # violation is wrapped by IntegrityError. except sqla_exc.IntegrityError as e: # note(boris-42): SqlAlchemy doesn't unify errors from different # DBs so we must do this. Also in some tables (for example # instance_types) there are more than one unique constraint. This # means we should get names of columns, which values violate # unique constraint, from error message. - _raise_if_duplicate_entry_error(e, get_engine().name) + _raise_if_duplicate_entry_error(e, self.bind.dialect.name) raise exception.DBError(e) except Exception as e: - LOG.exception(_('DB exception wrapped.')) + LOG.exception(_LE('DB exception wrapped.')) raise exception.DBError(e) return _wrap -def get_engine(sqlite_fk=False, slave_engine=False): - """Return a SQLAlchemy engine.""" - global _ENGINE - global _SLAVE_ENGINE - engine = _ENGINE - db_uri = CONF.database.connection - - if slave_engine: - engine = _SLAVE_ENGINE - db_uri = CONF.database.slave_connection - - if engine is None: - engine = create_engine(db_uri, - sqlite_fk=sqlite_fk) - if slave_engine: - _SLAVE_ENGINE = engine - else: - _ENGINE = engine - - return engine - - def _synchronous_switch_listener(dbapi_conn, connection_rec): """Switch sqlite connections to non-synchronous mode.""" dbapi_conn.execute("PRAGMA synchronous = OFF") @@ -601,85 +488,176 @@ def _thread_yield(dbapi_con, con_record): time.sleep(0) -def _ping_listener(dbapi_conn, connection_rec, connection_proxy): - """Ensures that MySQL connections checked out of the pool are alive. +def _ping_listener(engine, dbapi_conn, connection_rec, connection_proxy): + """Ensures that MySQL, PostgreSQL or DB2 connections are alive. Borrowed from: http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f """ + cursor = dbapi_conn.cursor() try: - dbapi_conn.cursor().execute('select 1') - except dbapi_conn.OperationalError as ex: - if ex.args[0] in (2006, 2013, 2014, 2045, 2055): - LOG.warn(_('Got mysql server has gone away: %s'), ex) - raise sqla_exc.DisconnectionError("Database server went away") + ping_sql = 'select 1' + if engine.name == 'ibm_db_sa': + # DB2 requires a table expression + ping_sql = 'select 1 from (values (1)) AS t1' + cursor.execute(ping_sql) + except Exception as ex: + if engine.dialect.is_disconnect(ex, dbapi_conn, cursor): + msg = _LW('Database server has gone away: %s') % ex + LOG.warning(msg) + + # if the database server has gone away, all connections in the pool + # have become invalid and we can safely close all of them here, + # rather than waste time on checking of every single connection + engine.dispose() + + # this will be handled by SQLAlchemy and will force it to create + # a new connection and retry the original action + raise sqla_exc.DisconnectionError(msg) else: raise +def _set_session_sql_mode(dbapi_con, connection_rec, sql_mode=None): + """Set the sql_mode session variable. + + MySQL supports several server modes. The default is None, but sessions + may choose to enable server modes like TRADITIONAL, ANSI, + several STRICT_* modes and others. + + Note: passing in '' (empty string) for sql_mode clears + the SQL mode for the session, overriding a potentially set + server default. + """ + + cursor = dbapi_con.cursor() + cursor.execute("SET SESSION sql_mode = %s", [sql_mode]) + + +def _mysql_get_effective_sql_mode(engine): + """Returns the effective SQL mode for connections from the engine pool. + + Returns ``None`` if the mode isn't available, otherwise returns the mode. + + """ + # Get the real effective SQL mode. Even when unset by + # our own config, the server may still be operating in a specific + # SQL mode as set by the server configuration. + # Also note that the checkout listener will be called on execute to + # set the mode if it's registered. + row = engine.execute("SHOW VARIABLES LIKE 'sql_mode'").fetchone() + if row is None: + return + return row[1] + + +def _mysql_check_effective_sql_mode(engine): + """Logs a message based on the effective SQL mode for MySQL connections.""" + realmode = _mysql_get_effective_sql_mode(engine) + + if realmode is None: + LOG.warning(_LW('Unable to detect effective SQL mode')) + return + + LOG.debug('MySQL server mode set to %s', realmode) + # 'TRADITIONAL' mode enables several other modes, so + # we need a substring match here + if not ('TRADITIONAL' in realmode.upper() or + 'STRICT_ALL_TABLES' in realmode.upper()): + LOG.warning(_LW("MySQL SQL mode is '%s', " + "consider enabling TRADITIONAL or STRICT_ALL_TABLES"), + realmode) + + +def _mysql_set_mode_callback(engine, sql_mode): + if sql_mode is not None: + mode_callback = functools.partial(_set_session_sql_mode, + sql_mode=sql_mode) + sqlalchemy.event.listen(engine, 'connect', mode_callback) + _mysql_check_effective_sql_mode(engine) + + def _is_db_connection_error(args): """Return True if error in connecting to db.""" # NOTE(adam_g): This is currently MySQL specific and needs to be extended # to support Postgres and others. # For the db2, the error code is -30081 since the db2 is still not ready - conn_err_codes = ('2002', '2003', '2006', '-30081') + conn_err_codes = ('2002', '2003', '2006', '2013', '-30081') for err_code in conn_err_codes: if args.find(err_code) != -1: return True return False -def create_engine(sql_connection, sqlite_fk=False): +def _raise_if_db_connection_lost(error, engine): + # NOTE(vsergeyev): Function is_disconnect(e, connection, cursor) + # requires connection and cursor in incoming parameters, + # but we have no possibility to create connection if DB + # is not available, so in such case reconnect fails. + # But is_disconnect() ignores these parameters, so it + # makes sense to pass to function None as placeholder + # instead of connection and cursor. + if engine.dialect.is_disconnect(error, None, None): + raise exception.DBConnectionError(error) + + +def create_engine(sql_connection, sqlite_fk=False, mysql_sql_mode=None, + idle_timeout=3600, + connection_debug=0, max_pool_size=None, max_overflow=None, + pool_timeout=None, sqlite_synchronous=True, + connection_trace=False, max_retries=10, retry_interval=10): """Return a new SQLAlchemy engine.""" - # NOTE(geekinutah): At this point we could be connecting to the normal - # db handle or the slave db handle. Things like - # _wrap_db_error aren't going to work well if their - # backends don't match. Let's check. - _assert_matching_drivers() + connection_dict = sqlalchemy.engine.url.make_url(sql_connection) engine_args = { - "pool_recycle": CONF.database.idle_timeout, - "echo": False, + "pool_recycle": idle_timeout, 'convert_unicode': True, } - # Map our SQL debug level to SQLAlchemy's options - if CONF.database.connection_debug >= 100: - engine_args['echo'] = 'debug' - elif CONF.database.connection_debug >= 50: - engine_args['echo'] = True + logger = logging.getLogger('sqlalchemy.engine') + + # Map SQL debug level to Python log level + if connection_debug >= 100: + logger.setLevel(logging.DEBUG) + elif connection_debug >= 50: + logger.setLevel(logging.INFO) + else: + logger.setLevel(logging.WARNING) if "sqlite" in connection_dict.drivername: if sqlite_fk: engine_args["listeners"] = [SqliteForeignKeysListener()] engine_args["poolclass"] = NullPool - if CONF.database.connection == "sqlite://": + if sql_connection == "sqlite://": engine_args["poolclass"] = StaticPool engine_args["connect_args"] = {'check_same_thread': False} else: - if CONF.database.max_pool_size is not None: - engine_args['pool_size'] = CONF.database.max_pool_size - if CONF.database.max_overflow is not None: - engine_args['max_overflow'] = CONF.database.max_overflow - if CONF.database.pool_timeout is not None: - engine_args['pool_timeout'] = CONF.database.pool_timeout + if max_pool_size is not None: + engine_args['pool_size'] = max_pool_size + if max_overflow is not None: + engine_args['max_overflow'] = max_overflow + if pool_timeout is not None: + engine_args['pool_timeout'] = pool_timeout engine = sqlalchemy.create_engine(sql_connection, **engine_args) sqlalchemy.event.listen(engine, 'checkin', _thread_yield) - if 'mysql' in connection_dict.drivername: - sqlalchemy.event.listen(engine, 'checkout', _ping_listener) + if engine.name in ('ibm_db_sa', 'mysql', 'postgresql'): + ping_callback = functools.partial(_ping_listener, engine) + sqlalchemy.event.listen(engine, 'checkout', ping_callback) + if engine.name == 'mysql': + if mysql_sql_mode: + _mysql_set_mode_callback(engine, mysql_sql_mode) elif 'sqlite' in connection_dict.drivername: - if not CONF.sqlite_synchronous: + if not sqlite_synchronous: sqlalchemy.event.listen(engine, 'connect', _synchronous_switch_listener) sqlalchemy.event.listen(engine, 'connect', _add_regexp_listener) - if (CONF.database.connection_trace and - engine.dialect.dbapi.__name__ == 'MySQLdb'): + if connection_trace and engine.dialect.dbapi.__name__ == 'MySQLdb': _patch_mysqldb_with_stacktrace_comments() try: @@ -688,15 +666,15 @@ def create_engine(sql_connection, sqlite_fk=False): if not _is_db_connection_error(e.args[0]): raise - remaining = CONF.database.max_retries + remaining = max_retries if remaining == -1: remaining = 'infinite' while True: - msg = _('SQL connection failed. %s attempts left.') - LOG.warn(msg % remaining) + msg = _LW('SQL connection failed. %s attempts left.') + LOG.warning(msg % remaining) if remaining != 'infinite': remaining -= 1 - time.sleep(CONF.database.retry_interval) + time.sleep(retry_interval) try: engine.connect() break @@ -783,13 +761,144 @@ def _patch_mysqldb_with_stacktrace_comments(): setattr(MySQLdb.cursors.BaseCursor, '_do_query', _do_query) -def _assert_matching_drivers(): - """Make sure slave handle and normal handle have the same driver.""" - # NOTE(geekinutah): There's no use case for writing to one backend and - # reading from another. Who knows what the future holds? - if CONF.database.slave_connection == '': - return +class EngineFacade(object): + """A helper class for removing of global engine instances from mistral.db. - normal = sqlalchemy.engine.url.make_url(CONF.database.connection) - slave = sqlalchemy.engine.url.make_url(CONF.database.slave_connection) - assert normal.drivername == slave.drivername + As a library, mistral.db can't decide where to store/when to create engine + and sessionmaker instances, so this must be left for a target application. + + On the other hand, in order to simplify the adoption of mistral.db changes, + we'll provide a helper class, which creates engine and sessionmaker + on its instantiation and provides get_engine()/get_session() methods + that are compatible with corresponding utility functions that currently + exist in target projects, e.g. in Nova. + + engine/sessionmaker instances will still be global (and they are meant to + be global), but they will be stored in the app context, rather that in the + mistral.db context. + + Note: using of this helper is completely optional and you are encouraged to + integrate engine/sessionmaker instances into your apps any way you like + (e.g. one might want to bind a session to a request context). Two important + things to remember: + + 1. An Engine instance is effectively a pool of DB connections, so it's + meant to be shared (and it's thread-safe). + 2. A Session instance is not meant to be shared and represents a DB + transactional context (i.e. it's not thread-safe). sessionmaker is + a factory of sessions. + + """ + + def __init__(self, sql_connection, + sqlite_fk=False, autocommit=True, + expire_on_commit=False, **kwargs): + """Initialize engine and sessionmaker instances. + + :param sqlite_fk: enable foreign keys in SQLite + :type sqlite_fk: bool + + :param autocommit: use autocommit mode for created Session instances + :type autocommit: bool + + :param expire_on_commit: expire session objects on commit + :type expire_on_commit: bool + + Keyword arguments: + + :keyword mysql_sql_mode: the SQL mode to be used for MySQL sessions. + (defaults to TRADITIONAL) + :keyword idle_timeout: timeout before idle sql connections are reaped + (defaults to 3600) + :keyword connection_debug: verbosity of SQL debugging information. + 0=None, 100=Everything (defaults to 0) + :keyword max_pool_size: maximum number of SQL connections to keep open + in a pool (defaults to SQLAlchemy settings) + :keyword max_overflow: if set, use this value for max_overflow with + sqlalchemy (defaults to SQLAlchemy settings) + :keyword pool_timeout: if set, use this value for pool_timeout with + sqlalchemy (defaults to SQLAlchemy settings) + :keyword sqlite_synchronous: if True, SQLite uses synchronous mode + (defaults to True) + :keyword connection_trace: add python stack traces to SQL as comment + strings (defaults to False) + :keyword max_retries: maximum db connection retries during startup. + (setting -1 implies an infinite retry count) + (defaults to 10) + :keyword retry_interval: interval between retries of opening a sql + connection (defaults to 10) + + """ + + super(EngineFacade, self).__init__() + + self._engine = create_engine( + sql_connection=sql_connection, + sqlite_fk=sqlite_fk, + mysql_sql_mode=kwargs.get('mysql_sql_mode', 'TRADITIONAL'), + idle_timeout=kwargs.get('idle_timeout', 3600), + connection_debug=kwargs.get('connection_debug', 0), + max_pool_size=kwargs.get('max_pool_size'), + max_overflow=kwargs.get('max_overflow'), + pool_timeout=kwargs.get('pool_timeout'), + sqlite_synchronous=kwargs.get('sqlite_synchronous', True), + connection_trace=kwargs.get('connection_trace', False), + max_retries=kwargs.get('max_retries', 10), + retry_interval=kwargs.get('retry_interval', 10)) + self._session_maker = get_maker( + engine=self._engine, + autocommit=autocommit, + expire_on_commit=expire_on_commit) + + def get_engine(self): + """Get the engine instance (note, that it's shared).""" + + return self._engine + + def get_session(self, **kwargs): + """Get a Session instance. + + If passed, keyword arguments values override the ones used when the + sessionmaker instance was created. + + :keyword autocommit: use autocommit mode for created Session instances + :type autocommit: bool + + :keyword expire_on_commit: expire session objects on commit + :type expire_on_commit: bool + + """ + + for arg in kwargs: + if arg not in ('autocommit', 'expire_on_commit'): + del kwargs[arg] + + return self._session_maker(**kwargs) + + @classmethod + def from_config(cls, connection_string, conf, + sqlite_fk=False, autocommit=True, expire_on_commit=False): + """Initialize EngineFacade using oslo.config config instance options. + + :param connection_string: SQLAlchemy connection string + :type connection_string: string + + :param conf: oslo.config config instance + :type conf: oslo.config.cfg.ConfigOpts + + :param sqlite_fk: enable foreign keys in SQLite + :type sqlite_fk: bool + + :param autocommit: use autocommit mode for created Session instances + :type autocommit: bool + + :param expire_on_commit: expire session objects on commit + :type expire_on_commit: bool + + """ + + return cls(sql_connection=connection_string, + sqlite_fk=sqlite_fk, + autocommit=autocommit, + expire_on_commit=expire_on_commit, + **dict(conf.database.items())) diff --git a/mistral/openstack/common/db/sqlalchemy/test_base.py b/mistral/openstack/common/db/sqlalchemy/test_base.py new file mode 100644 index 000000000..8ed9a3a5d --- /dev/null +++ b/mistral/openstack/common/db/sqlalchemy/test_base.py @@ -0,0 +1,165 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc +import functools +import os + +import fixtures +from oslotest import base as test_base +import six + +from mistral.openstack.common.db.sqlalchemy import provision +from mistral.openstack.common.db.sqlalchemy import session +from mistral.openstack.common.db.sqlalchemy import utils + + +class DbFixture(fixtures.Fixture): + """Basic database fixture. + + Allows to run tests on various db backends, such as SQLite, MySQL and + PostgreSQL. By default use sqlite backend. To override default backend + uri set env variable OS_TEST_DBAPI_CONNECTION with database admin + credentials for specific backend. + """ + + def _get_uri(self): + return os.getenv('OS_TEST_DBAPI_CONNECTION', 'sqlite://') + + def __init__(self, test): + super(DbFixture, self).__init__() + + self.test = test + + def cleanUp(self): + self.test.engine.dispose() + + def setUp(self): + super(DbFixture, self).setUp() + + self.test.engine = session.create_engine(self._get_uri()) + self.test.sessionmaker = session.get_maker(self.test.engine) + + +class DbTestCase(test_base.BaseTestCase): + """Base class for testing of DB code. + + Using `DbFixture`. Intended to be the main database test case to use all + the tests on a given backend with user defined uri. Backend specific + tests should be decorated with `backend_specific` decorator. + """ + + FIXTURE = DbFixture + + def setUp(self): + super(DbTestCase, self).setUp() + self.useFixture(self.FIXTURE(self)) + + +ALLOWED_DIALECTS = ['sqlite', 'mysql', 'postgresql'] + + +def backend_specific(*dialects): + """Decorator to skip backend specific tests on inappropriate engines. + + ::dialects: list of dialects names under which the test will be launched. + """ + def wrap(f): + @functools.wraps(f) + def ins_wrap(self): + if not set(dialects).issubset(ALLOWED_DIALECTS): + raise ValueError( + "Please use allowed dialects: %s" % ALLOWED_DIALECTS) + if self.engine.name not in dialects: + msg = ('The test "%s" can be run ' + 'only on %s. Current engine is %s.') + args = (f.__name__, ' '.join(dialects), self.engine.name) + self.skip(msg % args) + else: + return f(self) + return ins_wrap + return wrap + + +@six.add_metaclass(abc.ABCMeta) +class OpportunisticFixture(DbFixture): + """Base fixture to use default CI databases. + + The databases exist in OpenStack CI infrastructure. But for the + correct functioning in local environment the databases must be + created manually. + """ + + DRIVER = abc.abstractproperty(lambda: None) + DBNAME = PASSWORD = USERNAME = 'openstack_citest' + + def setUp(self): + self._provisioning_engine = provision.get_engine( + utils.get_connect_string(backend=self.DRIVER, + user=self.USERNAME, + passwd=self.PASSWORD, + database=self.DBNAME) + ) + self._uri = provision.create_database(self._provisioning_engine) + + super(OpportunisticFixture, self).setUp() + + def cleanUp(self): + super(OpportunisticFixture, self).cleanUp() + + provision.drop_database(self._provisioning_engine, self._uri) + + def _get_uri(self): + return self._uri + + +@six.add_metaclass(abc.ABCMeta) +class OpportunisticTestCase(DbTestCase): + """Base test case to use default CI databases. + + The subclasses of the test case are running only when openstack_citest + database is available otherwise a tests will be skipped. + """ + + FIXTURE = abc.abstractproperty(lambda: None) + + def setUp(self): + credentials = { + 'backend': self.FIXTURE.DRIVER, + 'user': self.FIXTURE.USERNAME, + 'passwd': self.FIXTURE.PASSWORD, + 'database': self.FIXTURE.DBNAME} + + if self.FIXTURE.DRIVER and not utils.is_backend_avail(**credentials): + msg = '%s backend is not available.' % self.FIXTURE.DRIVER + return self.skip(msg) + + super(OpportunisticTestCase, self).setUp() + + +class MySQLOpportunisticFixture(OpportunisticFixture): + DRIVER = 'mysql' + + +class PostgreSQLOpportunisticFixture(OpportunisticFixture): + DRIVER = 'postgresql' + + +class MySQLOpportunisticTestCase(OpportunisticTestCase): + FIXTURE = MySQLOpportunisticFixture + + +class PostgreSQLOpportunisticTestCase(OpportunisticTestCase): + FIXTURE = PostgreSQLOpportunisticFixture diff --git a/mistral/openstack/common/db/sqlalchemy/test_migrations.py b/mistral/openstack/common/db/sqlalchemy/test_migrations.py index f0caf1891..9c61c180b 100644 --- a/mistral/openstack/common/db/sqlalchemy/test_migrations.py +++ b/mistral/openstack/common/db/sqlalchemy/test_migrations.py @@ -14,84 +14,44 @@ # License for the specific language governing permissions and limitations # under the License. -import ConfigParser import functools +import logging import os +import subprocess import lockfile +from oslotest import base as test_base +from six import moves +from six.moves.urllib import parse import sqlalchemy import sqlalchemy.exc -from mistral.openstack.common.gettextutils import _ -from mistral.openstack.common import log as logging -from mistral.openstack.common import processutils -from mistral.openstack.common.py3kcompat import urlutils -from mistral.openstack.common import test +from mistral.openstack.common.db.sqlalchemy import utils +from mistral.openstack.common.gettextutils import _LE LOG = logging.getLogger(__name__) -def _get_connect_string(backend, user, passwd, database): - """Get database connection - - Try to get a connection with a very specific set of values, if we get - these then we'll run the tests, otherwise they are skipped - """ - if backend == "postgres": - backend = "postgresql+psycopg2" - elif backend == "mysql": - backend = "mysql+mysqldb" - else: - raise Exception("Unrecognized backend: '%s'" % backend) - - return ("%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s" - % {'backend': backend, 'user': user, 'passwd': passwd, - 'database': database}) - - -def _is_backend_avail(backend, user, passwd, database): - try: - connect_uri = _get_connect_string(backend, user, passwd, database) - engine = sqlalchemy.create_engine(connect_uri) - connection = engine.connect() - except Exception: - # intentionally catch all to handle exceptions even if we don't - # have any backend code loaded. - return False - else: - connection.close() - engine.dispose() - return True - - def _have_mysql(user, passwd, database): present = os.environ.get('TEST_MYSQL_PRESENT') if present is None: - return _is_backend_avail('mysql', user, passwd, database) + return utils.is_backend_avail(backend='mysql', + user=user, + passwd=passwd, + database=database) return present.lower() in ('', 'true') def _have_postgresql(user, passwd, database): present = os.environ.get('TEST_POSTGRESQL_PRESENT') if present is None: - return _is_backend_avail('postgres', user, passwd, database) + return utils.is_backend_avail(backend='postgres', + user=user, + passwd=passwd, + database=database) return present.lower() in ('', 'true') -def get_db_connection_info(conn_pieces): - database = conn_pieces.path.strip('/') - loc_pieces = conn_pieces.netloc.split('@') - host = loc_pieces[1] - - auth_pieces = loc_pieces[0].split(':') - user = auth_pieces[0] - password = "" - if len(auth_pieces) > 1: - password = auth_pieces[1].strip() - - return (user, password, database, host) - - def _set_db_lock(lock_path=None, lock_prefix=None): def decorator(f): @functools.wraps(f) @@ -100,15 +60,15 @@ def _set_db_lock(lock_path=None, lock_prefix=None): path = lock_path or os.environ.get("MISTRAL_LOCK_PATH") lock = lockfile.FileLock(os.path.join(path, lock_prefix)) with lock: - LOG.debug(_('Got lock "%s"') % f.__name__) + LOG.debug('Got lock "%s"' % f.__name__) return f(*args, **kwargs) finally: - LOG.debug(_('Lock released "%s"') % f.__name__) + LOG.debug('Lock released "%s"' % f.__name__) return wrapper return decorator -class BaseMigrationTestCase(test.BaseTestCase): +class BaseMigrationTestCase(test_base.BaseTestCase): """Base class fort testing of migration utils.""" def __init__(self, *args, **kwargs): @@ -130,13 +90,13 @@ class BaseMigrationTestCase(test.BaseTestCase): # once. No need to re-run this on each test... LOG.debug('config_path is %s' % self.CONFIG_FILE_PATH) if os.path.exists(self.CONFIG_FILE_PATH): - cp = ConfigParser.RawConfigParser() + cp = moves.configparser.RawConfigParser() try: cp.read(self.CONFIG_FILE_PATH) defaults = cp.defaults() for key, value in defaults.items(): self.test_databases[key] = value - except ConfigParser.ParsingError as e: + except moves.configparser.ParsingError as e: self.fail("Failed to read test_migrations.conf config " "file. Got error: %s" % e) else: @@ -158,15 +118,18 @@ class BaseMigrationTestCase(test.BaseTestCase): super(BaseMigrationTestCase, self).tearDown() def execute_cmd(self, cmd=None): - out, err = processutils.trycmd(cmd, shell=True, discard_warnings=True) - output = out or err + process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + output = process.communicate()[0] LOG.debug(output) - self.assertEqual('', err, + self.assertEqual(0, process.returncode, "Failed to run: %s\n%s" % (cmd, output)) - @_set_db_lock('pgadmin', 'tests-') def _reset_pg(self, conn_pieces): - (user, password, database, host) = get_db_connection_info(conn_pieces) + (user, + password, + database, + host) = utils.get_db_connection_info(conn_pieces) os.environ['PGPASSWORD'] = password os.environ['PGUSER'] = user # note(boris-42): We must create and drop database, we can't @@ -186,10 +149,11 @@ class BaseMigrationTestCase(test.BaseTestCase): os.unsetenv('PGPASSWORD') os.unsetenv('PGUSER') + @_set_db_lock(lock_prefix='migration_tests-') def _reset_databases(self): for key, engine in self.engines.items(): conn_string = self.test_databases[key] - conn_pieces = urlutils.urlparse(conn_string) + conn_pieces = parse.urlparse(conn_string) engine.dispose() if conn_string.startswith('sqlite'): # We can just delete the SQLite database, which is @@ -204,7 +168,7 @@ class BaseMigrationTestCase(test.BaseTestCase): # the MYSQL database, which is easier and less error-prone # than using SQLAlchemy to do this via MetaData...trust me. (user, password, database, host) = \ - get_db_connection_info(conn_pieces) + utils.get_db_connection_info(conn_pieces) sql = ("drop database if exists %(db)s; " "create database %(db)s;") % {'db': database} cmd = ("mysql -u \"%(user)s\" -p\"%(password)s\" -h %(host)s " @@ -300,6 +264,6 @@ class WalkVersionsMixin(object): if check: check(engine, data) except Exception: - LOG.error("Failed to migrate to version %s on engine %s" % + LOG.error(_LE("Failed to migrate to version %s on engine %s") % (version, engine)) raise diff --git a/mistral/openstack/common/db/sqlalchemy/utils.py b/mistral/openstack/common/db/sqlalchemy/utils.py index f25ade90f..69e57eecc 100644 --- a/mistral/openstack/common/db/sqlalchemy/utils.py +++ b/mistral/openstack/common/db/sqlalchemy/utils.py @@ -16,9 +16,9 @@ # License for the specific language governing permissions and limitations # under the License. +import logging import re -from migrate.changeset import UniqueConstraint import sqlalchemy from sqlalchemy import Boolean from sqlalchemy import CheckConstraint @@ -29,16 +29,16 @@ from sqlalchemy import func from sqlalchemy import Index from sqlalchemy import Integer from sqlalchemy import MetaData +from sqlalchemy import or_ from sqlalchemy.sql.expression import literal_column from sqlalchemy.sql.expression import UpdateBase -from sqlalchemy.sql import select from sqlalchemy import String from sqlalchemy import Table from sqlalchemy.types import NullType -from mistral.openstack.common.gettextutils import _ # noqa - -from mistral.openstack.common import log as logging +from mistral.openstack.common import context as request_context +from mistral.openstack.common.db.sqlalchemy import models +from mistral.openstack.common.gettextutils import _, _LI, _LW from mistral.openstack.common import timeutils @@ -94,7 +94,7 @@ def paginate_query(query, model, limit, sort_keys, marker=None, if 'id' not in sort_keys: # TODO(justinsb): If this ever gives a false-positive, check # the actual primary key, rather than assuming its id - LOG.warn(_('Id not in sort_keys; is sort_keys unique?')) + LOG.warning(_LW('Id not in sort_keys; is sort_keys unique?')) assert(not (sort_dir and sort_dirs)) @@ -133,9 +133,9 @@ def paginate_query(query, model, limit, sort_keys, marker=None, # Build up an array of sort criteria as in the docstring criteria_list = [] - for i in range(0, len(sort_keys)): + for i in range(len(sort_keys)): crit_attrs = [] - for j in range(0, i): + for j in range(i): model_attr = getattr(model, sort_keys[j]) crit_attrs.append((model_attr == marker_values[j])) @@ -157,11 +157,111 @@ def paginate_query(query, model, limit, sort_keys, marker=None, return query +def _read_deleted_filter(query, db_model, read_deleted): + if 'deleted' not in db_model.__table__.columns: + raise ValueError(_("There is no `deleted` column in `%s` table. " + "Project doesn't use soft-deleted feature.") + % db_model.__name__) + + default_deleted_value = db_model.__table__.c.deleted.default.arg + if read_deleted == 'no': + query = query.filter(db_model.deleted == default_deleted_value) + elif read_deleted == 'yes': + pass # omit the filter to include deleted and active + elif read_deleted == 'only': + query = query.filter(db_model.deleted != default_deleted_value) + else: + raise ValueError(_("Unrecognized read_deleted value '%s'") + % read_deleted) + return query + + +def _project_filter(query, db_model, context, project_only): + if project_only and 'project_id' not in db_model.__table__.columns: + raise ValueError(_("There is no `project_id` column in `%s` table.") + % db_model.__name__) + + if request_context.is_user_context(context) and project_only: + if project_only == 'allow_none': + is_none = None + query = query.filter(or_(db_model.project_id == context.project_id, + db_model.project_id == is_none)) + else: + query = query.filter(db_model.project_id == context.project_id) + + return query + + +def model_query(context, model, session, args=None, project_only=False, + read_deleted=None): + """Query helper that accounts for context's `read_deleted` field. + + :param context: context to query under + + :param model: Model to query. Must be a subclass of ModelBase. + :type model: models.ModelBase + + :param session: The session to use. + :type session: sqlalchemy.orm.session.Session + + :param args: Arguments to query. If None - model is used. + :type args: tuple + + :param project_only: If present and context is user-type, then restrict + query to match the context's project_id. If set to + 'allow_none', restriction includes project_id = None. + :type project_only: bool + + :param read_deleted: If present, overrides context's read_deleted field. + :type read_deleted: bool + + Usage: + + ..code:: python + + result = (utils.model_query(context, models.Instance, session=session) + .filter_by(uuid=instance_uuid) + .all()) + + query = utils.model_query( + context, Node, + session=session, + args=(func.count(Node.id), func.sum(Node.ram)) + ).filter_by(project_id=project_id) + + """ + + if not read_deleted: + if hasattr(context, 'read_deleted'): + # NOTE(viktors): some projects use `read_deleted` attribute in + # their contexts instead of `show_deleted`. + read_deleted = context.read_deleted + else: + read_deleted = context.show_deleted + + if not issubclass(model, models.ModelBase): + raise TypeError(_("model should be a subclass of ModelBase")) + + query = session.query(model) if not args else session.query(*args) + query = _read_deleted_filter(query, model, read_deleted) + query = _project_filter(query, model, context, project_only) + + return query + + def get_table(engine, name): """Returns an sqlalchemy table dynamically from db. Needed because the models don't work for us in migrations as models will be far out of sync with the current data. + + .. warning:: + + Do not use this method when creating ForeignKeys in database migrations + because sqlalchemy needs the same MetaData object to hold information + about the parent table and the reference table in the ForeignKey. This + method uses a unique MetaData object per table object so it won't work + with ForeignKey creation. """ metadata = MetaData() metadata.bind = engine @@ -208,6 +308,10 @@ def drop_unique_constraint(migrate_engine, table_name, uc_name, *columns, **col_name_col_instance): """Drop unique constraint from table. + DEPRECATED: this function is deprecated and will be removed from mistral.db + in a few releases. Please use UniqueConstraint.drop() method directly for + sqlalchemy-migrate migration scripts. + This method drops UC from table and works for mysql, postgresql and sqlite. In mysql and postgresql we are able to use "alter table" construction. Sqlalchemy doesn't support some sqlite column types and replaces their @@ -224,6 +328,8 @@ def drop_unique_constraint(migrate_engine, table_name, uc_name, *columns, types by sqlite. For example BigInteger. """ + from migrate.changeset import UniqueConstraint + meta = MetaData() meta.bind = migrate_engine t = Table(table_name, meta, autoload=True) @@ -263,9 +369,9 @@ def drop_old_duplicate_entries_from_table(migrate_engine, table_name, columns_for_select = [func.max(table.c.id)] columns_for_select.extend(columns_for_group_by) - duplicated_rows_select = select(columns_for_select, - group_by=columns_for_group_by, - having=func.count(table.c.id) > 1) + duplicated_rows_select = sqlalchemy.sql.select( + columns_for_select, group_by=columns_for_group_by, + having=func.count(table.c.id) > 1) for row in migrate_engine.execute(duplicated_rows_select): # NOTE(boris-42): Do not remove row that has the biggest ID. @@ -275,10 +381,11 @@ def drop_old_duplicate_entries_from_table(migrate_engine, table_name, for name in uc_column_names: delete_condition &= table.c[name] == row[name] - rows_to_delete_select = select([table.c.id]).where(delete_condition) + rows_to_delete_select = sqlalchemy.sql.select( + [table.c.id]).where(delete_condition) for row in migrate_engine.execute(rows_to_delete_select).fetchall(): - LOG.info(_("Deleting duplicated row with id: %(id)s from table: " - "%(table)s") % dict(id=row[0], table=table_name)) + LOG.info(_LI("Deleting duplicated row with id: %(id)s from table: " + "%(table)s") % dict(id=row[0], table=table_name)) if use_soft_delete: delete_statement = table.update().\ @@ -386,7 +493,7 @@ def _change_deleted_column_type_to_boolean_sqlite(migrate_engine, table_name, else: c_select.append(table.c.deleted == table.c.id) - ins = InsertFromSelect(new_table, select(c_select)) + ins = InsertFromSelect(new_table, sqlalchemy.sql.select(c_select)) migrate_engine.execute(ins) table.drop() @@ -497,3 +604,52 @@ def _change_deleted_column_type_to_id_type_sqlite(migrate_engine, table_name, where(new_table.c.deleted == deleted).\ values(deleted=default_deleted_value).\ execute() + + +def get_connect_string(backend, database, user=None, passwd=None): + """Get database connection + + Try to get a connection with a very specific set of values, if we get + these then we'll run the tests, otherwise they are skipped + """ + args = {'backend': backend, + 'user': user, + 'passwd': passwd, + 'database': database} + if backend == 'sqlite': + template = '%(backend)s:///%(database)s' + else: + template = "%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s" + return template % args + + +def is_backend_avail(backend, database, user=None, passwd=None): + try: + connect_uri = get_connect_string(backend=backend, + database=database, + user=user, + passwd=passwd) + engine = sqlalchemy.create_engine(connect_uri) + connection = engine.connect() + except Exception: + # intentionally catch all to handle exceptions even if we don't + # have any backend code loaded. + return False + else: + connection.close() + engine.dispose() + return True + + +def get_db_connection_info(conn_pieces): + database = conn_pieces.path.strip('/') + loc_pieces = conn_pieces.netloc.split('@') + host = loc_pieces[1] + + auth_pieces = loc_pieces[0].split(':') + user = auth_pieces[0] + password = "" + if len(auth_pieces) > 1: + password = auth_pieces[1].strip() + + return (user, password, database, host) diff --git a/mistral/openstack/common/exception.py b/mistral/openstack/common/exception.py deleted file mode 100644 index 7c773f9e6..000000000 --- a/mistral/openstack/common/exception.py +++ /dev/null @@ -1,139 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Exceptions common to OpenStack projects -""" - -import logging - -from mistral.openstack.common.gettextutils import _ # noqa - -_FATAL_EXCEPTION_FORMAT_ERRORS = False - - -class Error(Exception): - def __init__(self, message=None): - super(Error, self).__init__(message) - - -class ApiError(Error): - def __init__(self, message='Unknown', code='Unknown'): - self.api_message = message - self.code = code - super(ApiError, self).__init__('%s: %s' % (code, message)) - - -class NotFound(Error): - pass - - -class UnknownScheme(Error): - - msg_fmt = "Unknown scheme '%s' found in URI" - - def __init__(self, scheme): - msg = self.msg_fmt % scheme - super(UnknownScheme, self).__init__(msg) - - -class BadStoreUri(Error): - - msg_fmt = "The Store URI %s was malformed. Reason: %s" - - def __init__(self, uri, reason): - msg = self.msg_fmt % (uri, reason) - super(BadStoreUri, self).__init__(msg) - - -class Duplicate(Error): - pass - - -class NotAuthorized(Error): - pass - - -class NotEmpty(Error): - pass - - -class Invalid(Error): - pass - - -class BadInputError(Exception): - """Error resulting from a client sending bad input to a server""" - pass - - -class MissingArgumentError(Error): - pass - - -class DatabaseMigrationError(Error): - pass - - -class ClientConnectionError(Exception): - """Error resulting from a client connecting to a server""" - pass - - -def wrap_exception(f): - def _wrap(*args, **kw): - try: - return f(*args, **kw) - except Exception as e: - if not isinstance(e, Error): - logging.exception(_('Uncaught exception')) - raise Error(str(e)) - raise - _wrap.func_name = f.func_name - return _wrap - - -class OpenstackException(Exception): - """Base Exception class. - - To correctly use this class, inherit from it and define - a 'msg_fmt' property. That message will get printf'd - with the keyword arguments provided to the constructor. - """ - msg_fmt = "An unknown exception occurred" - - def __init__(self, **kwargs): - try: - self._error_string = self.msg_fmt % kwargs - - except Exception: - if _FATAL_EXCEPTION_FORMAT_ERRORS: - raise - else: - # at least get the core message out if something happened - self._error_string = self.msg_fmt - - def __str__(self): - return self._error_string - - -class MalformedRequestBody(OpenstackException): - msg_fmt = "Malformed message body: %(reason)s" - - -class InvalidContentType(OpenstackException): - msg_fmt = "Invalid content type %(content_type)s" diff --git a/mistral/openstack/common/excutils.py b/mistral/openstack/common/excutils.py deleted file mode 100644 index 3aa232d30..000000000 --- a/mistral/openstack/common/excutils.py +++ /dev/null @@ -1,99 +0,0 @@ -# Copyright 2011 OpenStack Foundation. -# Copyright 2012, Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Exception related utilities. -""" - -import logging -import sys -import time -import traceback - -import six - -from mistral.openstack.common.gettextutils import _ # noqa - - -class save_and_reraise_exception(object): - """Save current exception, run some code and then re-raise. - - In some cases the exception context can be cleared, resulting in None - being attempted to be re-raised after an exception handler is run. This - can happen when eventlet switches greenthreads or when running an - exception handler, code raises and catches an exception. In both - cases the exception context will be cleared. - - To work around this, we save the exception state, run handler code, and - then re-raise the original exception. If another exception occurs, the - saved exception is logged and the new exception is re-raised. - - In some cases the caller may not want to re-raise the exception, and - for those circumstances this context provides a reraise flag that - can be used to suppress the exception. For example: - - except Exception: - with save_and_reraise_exception() as ctxt: - decide_if_need_reraise() - if not should_be_reraised: - ctxt.reraise = False - """ - def __init__(self): - self.reraise = True - - def __enter__(self): - self.type_, self.value, self.tb, = sys.exc_info() - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - if exc_type is not None: - logging.error(_('Original exception being dropped: %s'), - traceback.format_exception(self.type_, - self.value, - self.tb)) - return False - if self.reraise: - six.reraise(self.type_, self.value, self.tb) - - -def forever_retry_uncaught_exceptions(infunc): - def inner_func(*args, **kwargs): - last_log_time = 0 - last_exc_message = None - exc_count = 0 - while True: - try: - return infunc(*args, **kwargs) - except Exception as exc: - this_exc_message = six.u(str(exc)) - if this_exc_message == last_exc_message: - exc_count += 1 - else: - exc_count = 1 - # Do not log any more frequently than once a minute unless - # the exception message changes - cur_time = int(time.time()) - if (cur_time - last_log_time > 60 or - this_exc_message != last_exc_message): - logging.exception( - _('Unexpected exception occurred %d time(s)... ' - 'retrying.') % exc_count) - last_log_time = cur_time - last_exc_message = this_exc_message - exc_count = 0 - # This should be a very rare event. In case it isn't, do - # a sleep. - time.sleep(1) - return inner_func diff --git a/mistral/openstack/common/fileutils.py b/mistral/openstack/common/fileutils.py deleted file mode 100644 index c6515166e..000000000 --- a/mistral/openstack/common/fileutils.py +++ /dev/null @@ -1,137 +0,0 @@ -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import contextlib -import errno -import os -import tempfile - -from mistral.openstack.common import excutils -from mistral.openstack.common.gettextutils import _ # noqa -from mistral.openstack.common import log as logging - -LOG = logging.getLogger(__name__) - -_FILE_CACHE = {} - - -def ensure_tree(path): - """Create a directory (and any ancestor directories required) - - :param path: Directory to create - """ - try: - os.makedirs(path) - except OSError as exc: - if exc.errno == errno.EEXIST: - if not os.path.isdir(path): - raise - else: - raise - - -def read_cached_file(filename, force_reload=False): - """Read from a file if it has been modified. - - :param force_reload: Whether to reload the file. - :returns: A tuple with a boolean specifying if the data is fresh - or not. - """ - global _FILE_CACHE - - if force_reload and filename in _FILE_CACHE: - del _FILE_CACHE[filename] - - reloaded = False - mtime = os.path.getmtime(filename) - cache_info = _FILE_CACHE.setdefault(filename, {}) - - if not cache_info or mtime > cache_info.get('mtime', 0): - LOG.debug(_("Reloading cached file %s") % filename) - with open(filename) as fap: - cache_info['data'] = fap.read() - cache_info['mtime'] = mtime - reloaded = True - return (reloaded, cache_info['data']) - - -def delete_if_exists(path, remove=os.unlink): - """Delete a file, but ignore file not found error. - - :param path: File to delete - :param remove: Optional function to remove passed path - """ - - try: - remove(path) - except OSError as e: - if e.errno != errno.ENOENT: - raise - - -@contextlib.contextmanager -def remove_path_on_error(path, remove=delete_if_exists): - """Protect code that wants to operate on PATH atomically. - Any exception will cause PATH to be removed. - - :param path: File to work with - :param remove: Optional function to remove passed path - """ - - try: - yield - except Exception: - with excutils.save_and_reraise_exception(): - remove(path) - - -def file_open(*args, **kwargs): - """Open file - - see built-in file() documentation for more details - - Note: The reason this is kept in a separate module is to easily - be able to provide a stub module that doesn't alter system - state at all (for unit tests) - """ - return file(*args, **kwargs) - - -def write_to_tempfile(content, path=None, suffix='', prefix='tmp'): - """Create temporary file or use existing file. - - This util is needed for creating temporary file with - specified content, suffix and prefix. If path is not None, - it will be used for writing content. If the path doesn't - exist it'll be created. - - :param content: content for temporary file. - :param path: same as parameter 'dir' for mkstemp - :param suffix: same as parameter 'suffix' for mkstemp - :param prefix: same as parameter 'prefix' for mkstemp - - For example: it can be used in database tests for creating - configuration files. - """ - if path: - ensure_tree(path) - - (fd, path) = tempfile.mkstemp(suffix=suffix, dir=path, prefix=prefix) - try: - os.write(fd, content) - finally: - os.close(fd) - return path diff --git a/mistral/openstack/common/gettextutils.py b/mistral/openstack/common/gettextutils.py index 293a47cc9..adf95408d 100644 --- a/mistral/openstack/common/gettextutils.py +++ b/mistral/openstack/common/gettextutils.py @@ -23,25 +23,122 @@ Usual usage in an openstack.common module: """ import copy +import functools import gettext -import logging +import locale +from logging import handlers import os -import re -try: - import UserString as _userString -except ImportError: - import collections as _userString from babel import localedata import six -_localedir = os.environ.get('mistral'.upper() + '_LOCALEDIR') -_t = gettext.translation('mistral', localedir=_localedir, fallback=True) - _AVAILABLE_LANGUAGES = {} + +# FIXME(dhellmann): Remove this when moving to oslo.i18n. USE_LAZY = False +class TranslatorFactory(object): + """Create translator functions + """ + + def __init__(self, domain, lazy=False, localedir=None): + """Establish a set of translation functions for the domain. + + :param domain: Name of translation domain, + specifying a message catalog. + :type domain: str + :param lazy: Delays translation until a message is emitted. + Defaults to False. + :type lazy: Boolean + :param localedir: Directory with translation catalogs. + :type localedir: str + """ + self.domain = domain + self.lazy = lazy + if localedir is None: + localedir = os.environ.get(domain.upper() + '_LOCALEDIR') + self.localedir = localedir + + def _make_translation_func(self, domain=None): + """Return a new translation function ready for use. + + Takes into account whether or not lazy translation is being + done. + + The domain can be specified to override the default from the + factory, but the localedir from the factory is always used + because we assume the log-level translation catalogs are + installed in the same directory as the main application + catalog. + + """ + if domain is None: + domain = self.domain + if self.lazy: + return functools.partial(Message, domain=domain) + t = gettext.translation( + domain, + localedir=self.localedir, + fallback=True, + ) + if six.PY3: + return t.gettext + return t.ugettext + + @property + def primary(self): + "The default translation function." + return self._make_translation_func() + + def _make_log_translation_func(self, level): + return self._make_translation_func(self.domain + '-log-' + level) + + @property + def log_info(self): + "Translate info-level log messages." + return self._make_log_translation_func('info') + + @property + def log_warning(self): + "Translate warning-level log messages." + return self._make_log_translation_func('warning') + + @property + def log_error(self): + "Translate error-level log messages." + return self._make_log_translation_func('error') + + @property + def log_critical(self): + "Translate critical-level log messages." + return self._make_log_translation_func('critical') + + +# NOTE(dhellmann): When this module moves out of the incubator into +# oslo.i18n, these global variables can be moved to an integration +# module within each application. + +# Create the global translation functions. +_translators = TranslatorFactory('mistral') + +# The primary translation function using the well-known name "_" +_ = _translators.primary + +# Translators for log levels. +# +# The abbreviated names are meant to reflect the usual use of a short +# name like '_'. The "L" is for "log" and the other letter comes from +# the level. +_LI = _translators.log_info +_LW = _translators.log_warning +_LE = _translators.log_error +_LC = _translators.log_critical + +# NOTE(dhellmann): End of globals that will move to the application's +# integration module. + + def enable_lazy(): """Convenience function for configuring _() to use lazy gettext @@ -50,19 +147,18 @@ def enable_lazy(): your project is importing _ directly instead of using the gettextutils.install() way of importing the _ function. """ - global USE_LAZY + # FIXME(dhellmann): This function will be removed in oslo.i18n, + # because the TranslatorFactory makes it superfluous. + global _, _LI, _LW, _LE, _LC, USE_LAZY + tf = TranslatorFactory('mistral', lazy=True) + _ = tf.primary + _LI = tf.log_info + _LW = tf.log_warning + _LE = tf.log_error + _LC = tf.log_critical USE_LAZY = True -def _(msg): - if USE_LAZY: - return Message(msg, 'mistral') - else: - if six.PY3: - return _t.gettext(msg) - return _t.ugettext(msg) - - def install(domain, lazy=False): """Install a _() function using the given translation domain. @@ -82,31 +178,9 @@ def install(domain, lazy=False): any available locale. """ if lazy: - # NOTE(mrodden): Lazy gettext functionality. - # - # The following introduces a deferred way to do translations on - # messages in OpenStack. We override the standard _() function - # and % (format string) operation to build Message objects that can - # later be translated when we have more information. - # - # Also included below is an example LocaleHandler that translates - # Messages to an associated locale, effectively allowing many logs, - # each with their own locale. - - def _lazy_gettext(msg): - """Create and return a Message object. - - Lazy gettext function for a given domain, it is a factory method - for a project/module to get a lazy gettext function for its own - translation domain (i.e. nova, glance, cinder, etc.) - - Message encapsulates a string so that we can translate - it later when needed. - """ - return Message(msg, domain) - from six import moves - moves.builtins.__dict__['_'] = _lazy_gettext + tf = TranslatorFactory(domain, lazy=True) + moves.builtins.__dict__['_'] = tf.primary else: localedir = '%s_LOCALEDIR' % domain.upper() if six.PY3: @@ -118,182 +192,145 @@ def install(domain, lazy=False): unicode=True) -class Message(_userString.UserString, object): - """Class used to encapsulate translatable messages.""" - def __init__(self, msg, domain): - # _msg is the gettext msgid and should never change - self._msg = msg - self._left_extra_msg = '' - self._right_extra_msg = '' - self._locale = None - self.params = None - self.domain = domain +class Message(six.text_type): + """A Message object is a unicode object that can be translated. - @property - def data(self): - # NOTE(mrodden): this should always resolve to a unicode string - # that best represents the state of the message currently + Translation of Message is done explicitly using the translate() method. + For all non-translation intents and purposes, a Message is simply unicode, + and can be treated as such. + """ - localedir = os.environ.get(self.domain.upper() + '_LOCALEDIR') - if self.locale: - lang = gettext.translation(self.domain, - localedir=localedir, - languages=[self.locale], - fallback=True) - else: - # use system locale for translations - lang = gettext.translation(self.domain, - localedir=localedir, - fallback=True) + def __new__(cls, msgid, msgtext=None, params=None, + domain='mistral', *args): + """Create a new Message object. + In order for translation to work gettext requires a message ID, this + msgid will be used as the base unicode text. It is also possible + for the msgid and the base unicode text to be different by passing + the msgtext parameter. + """ + # If the base msgtext is not given, we use the default translation + # of the msgid (which is in English) just in case the system locale is + # not English, so that the base text will be in that locale by default. + if not msgtext: + msgtext = Message._translate_msgid(msgid, domain) + # We want to initialize the parent unicode with the actual object that + # would have been plain unicode if 'Message' was not enabled. + msg = super(Message, cls).__new__(cls, msgtext) + msg.msgid = msgid + msg.domain = domain + msg.params = params + return msg + + def translate(self, desired_locale=None): + """Translate this message to the desired locale. + + :param desired_locale: The desired locale to translate the message to, + if no locale is provided the message will be + translated to the system's default locale. + + :returns: the translated message in unicode + """ + + translated_message = Message._translate_msgid(self.msgid, + self.domain, + desired_locale) + if self.params is None: + # No need for more translation + return translated_message + + # This Message object may have been formatted with one or more + # Message objects as substitution arguments, given either as a single + # argument, part of a tuple, or as one or more values in a dictionary. + # When translating this Message we need to translate those Messages too + translated_params = _translate_args(self.params, desired_locale) + + translated_message = translated_message % translated_params + + return translated_message + + @staticmethod + def _translate_msgid(msgid, domain, desired_locale=None): + if not desired_locale: + system_locale = locale.getdefaultlocale() + # If the system locale is not available to the runtime use English + if not system_locale[0]: + desired_locale = 'en_US' + else: + desired_locale = system_locale[0] + + locale_dir = os.environ.get(domain.upper() + '_LOCALEDIR') + lang = gettext.translation(domain, + localedir=locale_dir, + languages=[desired_locale], + fallback=True) if six.PY3: - ugettext = lang.gettext + translator = lang.gettext else: - ugettext = lang.ugettext + translator = lang.ugettext - full_msg = (self._left_extra_msg + - ugettext(self._msg) + - self._right_extra_msg) - - if self.params is not None: - full_msg = full_msg % self.params - - return six.text_type(full_msg) - - @property - def locale(self): - return self._locale - - @locale.setter - def locale(self, value): - self._locale = value - if not self.params: - return - - # This Message object may have been constructed with one or more - # Message objects as substitution parameters, given as a single - # Message, or a tuple or Map containing some, so when setting the - # locale for this Message we need to set it for those Messages too. - if isinstance(self.params, Message): - self.params.locale = value - return - if isinstance(self.params, tuple): - for param in self.params: - if isinstance(param, Message): - param.locale = value - return - if isinstance(self.params, dict): - for param in self.params.values(): - if isinstance(param, Message): - param.locale = value - - def _save_dictionary_parameter(self, dict_param): - full_msg = self.data - # look for %(blah) fields in string; - # ignore %% and deal with the - # case where % is first character on the line - keys = re.findall('(?:[^%]|^)?%\((\w*)\)[a-z]', full_msg) - - # if we don't find any %(blah) blocks but have a %s - if not keys and re.findall('(?:[^%]|^)%[a-z]', full_msg): - # apparently the full dictionary is the parameter - params = copy.deepcopy(dict_param) - else: - params = {} - for key in keys: - try: - params[key] = copy.deepcopy(dict_param[key]) - except TypeError: - # cast uncopyable thing to unicode string - params[key] = six.text_type(dict_param[key]) - - return params - - def _save_parameters(self, other): - # we check for None later to see if - # we actually have parameters to inject, - # so encapsulate if our parameter is actually None - if other is None: - self.params = (other, ) - elif isinstance(other, dict): - self.params = self._save_dictionary_parameter(other) - else: - # fallback to casting to unicode, - # this will handle the problematic python code-like - # objects that cannot be deep-copied - try: - self.params = copy.deepcopy(other) - except TypeError: - self.params = six.text_type(other) - - return self - - # overrides to be more string-like - def __unicode__(self): - return self.data - - def __str__(self): - if six.PY3: - return self.__unicode__() - return self.data.encode('utf-8') - - def __getstate__(self): - to_copy = ['_msg', '_right_extra_msg', '_left_extra_msg', - 'domain', 'params', '_locale'] - new_dict = self.__dict__.fromkeys(to_copy) - for attr in to_copy: - new_dict[attr] = copy.deepcopy(self.__dict__[attr]) - - return new_dict - - def __setstate__(self, state): - for (k, v) in state.items(): - setattr(self, k, v) - - # operator overloads - def __add__(self, other): - copied = copy.deepcopy(self) - copied._right_extra_msg += other.__str__() - return copied - - def __radd__(self, other): - copied = copy.deepcopy(self) - copied._left_extra_msg += other.__str__() - return copied + translated_message = translator(msgid) + return translated_message def __mod__(self, other): - # do a format string to catch and raise - # any possible KeyErrors from missing parameters - self.data % other - copied = copy.deepcopy(self) - return copied._save_parameters(other) + # When we mod a Message we want the actual operation to be performed + # by the parent class (i.e. unicode()), the only thing we do here is + # save the original msgid and the parameters in case of a translation + params = self._sanitize_mod_params(other) + unicode_mod = super(Message, self).__mod__(params) + modded = Message(self.msgid, + msgtext=unicode_mod, + params=params, + domain=self.domain) + return modded - def __mul__(self, other): - return self.data * other + def _sanitize_mod_params(self, other): + """Sanitize the object being modded with this Message. - def __rmul__(self, other): - return other * self.data - - def __getitem__(self, key): - return self.data[key] - - def __getslice__(self, start, end): - return self.data.__getslice__(start, end) - - def __getattribute__(self, name): - # NOTE(mrodden): handle lossy operations that we can't deal with yet - # These override the UserString implementation, since UserString - # uses our __class__ attribute to try and build a new message - # after running the inner data string through the operation. - # At that point, we have lost the gettext message id and can just - # safely resolve to a string instead. - ops = ['capitalize', 'center', 'decode', 'encode', - 'expandtabs', 'ljust', 'lstrip', 'replace', 'rjust', 'rstrip', - 'strip', 'swapcase', 'title', 'translate', 'upper', 'zfill'] - if name in ops: - return getattr(self.data, name) + - Add support for modding 'None' so translation supports it + - Trim the modded object, which can be a large dictionary, to only + those keys that would actually be used in a translation + - Snapshot the object being modded, in case the message is + translated, it will be used as it was when the Message was created + """ + if other is None: + params = (other,) + elif isinstance(other, dict): + # Merge the dictionaries + # Copy each item in case one does not support deep copy. + params = {} + if isinstance(self.params, dict): + for key, val in self.params.items(): + params[key] = self._copy_param(val) + for key, val in other.items(): + params[key] = self._copy_param(val) else: - return _userString.UserString.__getattribute__(self, name) + params = self._copy_param(other) + return params + + def _copy_param(self, param): + try: + return copy.deepcopy(param) + except Exception: + # Fallback to casting to unicode this will handle the + # python code-like objects that can't be deep-copied + return six.text_type(param) + + def __add__(self, other): + msg = _('Message objects do not support addition.') + raise TypeError(msg) + + def __radd__(self, other): + return self.__add__(other) + + if six.PY2: + def __str__(self): + # NOTE(luisg): Logging in python 2.6 tries to str() log records, + # and it expects specifically a UnicodeError in order to proceed. + msg = _('Message objects do not support str() because they may ' + 'contain non-ascii characters. ' + 'Please use unicode() or translate() instead.') + raise UnicodeError(msg) def get_available_languages(domain): @@ -319,53 +356,143 @@ def get_available_languages(domain): list_identifiers = (getattr(localedata, 'list', None) or getattr(localedata, 'locale_identifiers')) locale_identifiers = list_identifiers() + for i in locale_identifiers: if find(i) is not None: language_list.append(i) + + # NOTE(luisg): Babel>=1.0,<1.3 has a bug where some OpenStack supported + # locales (e.g. 'zh_CN', and 'zh_TW') aren't supported even though they + # are perfectly legitimate locales: + # https://github.com/mitsuhiko/babel/issues/37 + # In Babel 1.3 they fixed the bug and they support these locales, but + # they are still not explicitly "listed" by locale_identifiers(). + # That is why we add the locales here explicitly if necessary so that + # they are listed as supported. + aliases = {'zh': 'zh_CN', + 'zh_Hant_HK': 'zh_HK', + 'zh_Hant': 'zh_TW', + 'fil': 'tl_PH'} + for (locale, alias) in six.iteritems(aliases): + if locale in language_list and alias not in language_list: + language_list.append(alias) + _AVAILABLE_LANGUAGES[domain] = language_list return copy.copy(language_list) -def get_localized_message(message, user_locale): - """Gets a localized version of the given message in the given locale. +def translate(obj, desired_locale=None): + """Gets the translated unicode representation of the given object. - If the message is not a Message object the message is returned as-is. - If the locale is None the message is translated to the default locale. + If the object is not translatable it is returned as-is. + If the locale is None the object is translated to the system locale. - :returns: the translated message in unicode, or the original message if + :param obj: the object to translate + :param desired_locale: the locale to translate the message to, if None the + default system locale will be used + :returns: the translated object in unicode, or the original object if it could not be translated """ - translated = message + message = obj + if not isinstance(message, Message): + # If the object to translate is not already translatable, + # let's first get its unicode representation + message = six.text_type(obj) if isinstance(message, Message): - original_locale = message.locale - message.locale = user_locale - translated = six.text_type(message) - message.locale = original_locale - return translated + # Even after unicoding() we still need to check if we are + # running with translatable unicode before translating + return message.translate(desired_locale) + return obj -class LocaleHandler(logging.Handler): - """Handler that can have a locale associated to translate Messages. +def _translate_args(args, desired_locale=None): + """Translates all the translatable elements of the given arguments object. - A quick example of how to utilize the Message class above. - LocaleHandler takes a locale and a target logging.Handler object - to forward LogRecord objects to after translating the internal Message. + This method is used for translating the translatable values in method + arguments which include values of tuples or dictionaries. + If the object is not a tuple or a dictionary the object itself is + translated if it is translatable. + + If the locale is None the object is translated to the system locale. + + :param args: the args to translate + :param desired_locale: the locale to translate the args to, if None the + default system locale will be used + :returns: a new args object with the translated contents of the original + """ + if isinstance(args, tuple): + return tuple(translate(v, desired_locale) for v in args) + if isinstance(args, dict): + translated_dict = {} + for (k, v) in six.iteritems(args): + translated_v = translate(v, desired_locale) + translated_dict[k] = translated_v + return translated_dict + return translate(args, desired_locale) + + +class TranslationHandler(handlers.MemoryHandler): + """Handler that translates records before logging them. + + The TranslationHandler takes a locale and a target logging.Handler object + to forward LogRecord objects to after translating them. This handler + depends on Message objects being logged, instead of regular strings. + + The handler can be configured declaratively in the logging.conf as follows: + + [handlers] + keys = translatedlog, translator + + [handler_translatedlog] + class = handlers.WatchedFileHandler + args = ('/var/log/api-localized.log',) + formatter = context + + [handler_translator] + class = openstack.common.log.TranslationHandler + target = translatedlog + args = ('zh_CN',) + + If the specified locale is not available in the system, the handler will + log in the default locale. """ - def __init__(self, locale, target): - """Initialize a LocaleHandler + def __init__(self, locale=None, target=None): + """Initialize a TranslationHandler :param locale: locale to use for translating messages :param target: logging.Handler object to forward LogRecord objects to after translation """ - logging.Handler.__init__(self) + # NOTE(luisg): In order to allow this handler to be a wrapper for + # other handlers, such as a FileHandler, and still be able to + # configure it using logging.conf, this handler has to extend + # MemoryHandler because only the MemoryHandlers' logging.conf + # parsing is implemented such that it accepts a target handler. + handlers.MemoryHandler.__init__(self, capacity=0, target=target) self.locale = locale - self.target = target + + def setFormatter(self, fmt): + self.target.setFormatter(fmt) def emit(self, record): - if isinstance(record.msg, Message): - # set the locale and resolve to a string - record.msg.locale = self.locale + # We save the message from the original record to restore it + # after translation, so other handlers are not affected by this + original_msg = record.msg + original_args = record.args + + try: + self._translate_and_log_record(record) + finally: + record.msg = original_msg + record.args = original_args + + def _translate_and_log_record(self, record): + record.msg = translate(record.msg, self.locale) + + # In addition to translating the message, we also need to translate + # arguments that were passed to the log method that were not part + # of the main message e.g., log.info(_('Some message %s'), this_one)) + record.args = _translate_args(record.args, self.locale) self.target.emit(record) diff --git a/mistral/openstack/common/importutils.py b/mistral/openstack/common/importutils.py index 4fd9ae2bc..97bdd9ec1 100644 --- a/mistral/openstack/common/importutils.py +++ b/mistral/openstack/common/importutils.py @@ -24,10 +24,10 @@ import traceback def import_class(import_str): """Returns a class from a string including module and class.""" mod_str, _sep, class_str = import_str.rpartition('.') + __import__(mod_str) try: - __import__(mod_str) return getattr(sys.modules[mod_str], class_str) - except (ValueError, AttributeError): + except AttributeError: raise ImportError('Class %s cannot be found (%s)' % (class_str, traceback.format_exception(*sys.exc_info()))) @@ -58,6 +58,13 @@ def import_module(import_str): return sys.modules[import_str] +def import_versioned_module(version, submodule=None): + module = 'mistral.v%s' % version + if submodule: + module = '.'.join((module, submodule)) + return import_module(module) + + def try_import(import_str, default=None): """Try to import a module and if it fails return default.""" try: diff --git a/mistral/openstack/common/jsonutils.py b/mistral/openstack/common/jsonutils.py index 47b9a9c05..91c3cacf3 100644 --- a/mistral/openstack/common/jsonutils.py +++ b/mistral/openstack/common/jsonutils.py @@ -31,21 +31,29 @@ This module provides a few things: ''' +import codecs import datetime import functools import inspect import itertools -import json -try: - import xmlrpclib -except ImportError: - # NOTE(jd): xmlrpclib is not shipped with Python 3 - xmlrpclib = None +import sys + +if sys.version_info < (2, 7): + # On Python <= 2.6, json module is not C boosted, so try to use + # simplejson module if available + try: + import simplejson as json + except ImportError: + import json +else: + import json import six +import six.moves.xmlrpc_client as xmlrpclib from mistral.openstack.common import gettextutils from mistral.openstack.common import importutils +from mistral.openstack.common import strutils from mistral.openstack.common import timeutils netaddr = importutils.try_import("netaddr") @@ -122,14 +130,14 @@ def to_primitive(value, convert_instances=False, convert_datetime=True, level=level, max_depth=max_depth) if isinstance(value, dict): - return dict((k, recursive(v)) for k, v in value.iteritems()) + return dict((k, recursive(v)) for k, v in six.iteritems(value)) elif isinstance(value, (list, tuple)): return [recursive(lv) for lv in value] # It's not clear why xmlrpclib created their own DateTime type, but # for our purposes, make it a datetime type which is explicitly # handled - if xmlrpclib and isinstance(value, xmlrpclib.DateTime): + if isinstance(value, xmlrpclib.DateTime): value = datetime.datetime(*tuple(value.timetuple())[:6]) if convert_datetime and isinstance(value, datetime.datetime): @@ -160,12 +168,12 @@ def dumps(value, default=to_primitive, **kwargs): return json.dumps(value, default=default, **kwargs) -def loads(s): - return json.loads(s) +def loads(s, encoding='utf-8'): + return json.loads(strutils.safe_decode(s, encoding)) -def load(s): - return json.load(s) +def load(fp, encoding='utf-8'): + return json.load(codecs.getreader(encoding)(fp)) try: diff --git a/mistral/openstack/common/lockutils.py b/mistral/openstack/common/lockutils.py deleted file mode 100644 index 4304dc2c3..000000000 --- a/mistral/openstack/common/lockutils.py +++ /dev/null @@ -1,303 +0,0 @@ -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import contextlib -import errno -import functools -import os -import shutil -import subprocess -import sys -import tempfile -import threading -import time -import weakref - -from oslo.config import cfg - -from mistral.openstack.common import fileutils -from mistral.openstack.common.gettextutils import _ # noqa -from mistral.openstack.common import local -from mistral.openstack.common import log as logging - - -LOG = logging.getLogger(__name__) - - -util_opts = [ - cfg.BoolOpt('disable_process_locking', default=False, - help='Whether to disable inter-process locks'), - cfg.StrOpt('lock_path', - default=os.environ.get("MISTRAL_LOCK_PATH"), - help=('Directory to use for lock files.')) -] - - -CONF = cfg.CONF -CONF.register_opts(util_opts) - - -def set_defaults(lock_path): - cfg.set_defaults(util_opts, lock_path=lock_path) - - -class _InterProcessLock(object): - """Lock implementation which allows multiple locks, working around - issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does - not require any cleanup. Since the lock is always held on a file - descriptor rather than outside of the process, the lock gets dropped - automatically if the process crashes, even if __exit__ is not executed. - - There are no guarantees regarding usage by multiple green threads in a - single process here. This lock works only between processes. Exclusive - access between local threads should be achieved using the semaphores - in the @synchronized decorator. - - Note these locks are released when the descriptor is closed, so it's not - safe to close the file descriptor while another green thread holds the - lock. Just opening and closing the lock file can break synchronisation, - so lock files must be accessed only using this abstraction. - """ - - def __init__(self, name): - self.lockfile = None - self.fname = name - - def __enter__(self): - self.lockfile = open(self.fname, 'w') - - while True: - try: - # Using non-blocking locks since green threads are not - # patched to deal with blocking locking calls. - # Also upon reading the MSDN docs for locking(), it seems - # to have a laughable 10 attempts "blocking" mechanism. - self.trylock() - return self - except IOError as e: - if e.errno in (errno.EACCES, errno.EAGAIN): - # external locks synchronise things like iptables - # updates - give it some time to prevent busy spinning - time.sleep(0.01) - else: - raise - - def __exit__(self, exc_type, exc_val, exc_tb): - try: - self.unlock() - self.lockfile.close() - except IOError: - LOG.exception(_("Could not release the acquired lock `%s`"), - self.fname) - - def trylock(self): - raise NotImplementedError() - - def unlock(self): - raise NotImplementedError() - - -class _WindowsLock(_InterProcessLock): - def trylock(self): - msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1) - - def unlock(self): - msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1) - - -class _PosixLock(_InterProcessLock): - def trylock(self): - fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB) - - def unlock(self): - fcntl.lockf(self.lockfile, fcntl.LOCK_UN) - - -if os.name == 'nt': - import msvcrt - InterProcessLock = _WindowsLock -else: - import fcntl - InterProcessLock = _PosixLock - -_semaphores = weakref.WeakValueDictionary() -_semaphores_lock = threading.Lock() - - -@contextlib.contextmanager -def lock(name, lock_file_prefix=None, external=False, lock_path=None): - """Context based lock - - This function yields a `threading.Semaphore` instance (if we don't use - eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is - True, in which case, it'll yield an InterProcessLock instance. - - :param lock_file_prefix: The lock_file_prefix argument is used to provide - lock files on disk with a meaningful prefix. - - :param external: The external keyword argument denotes whether this lock - should work across multiple processes. This means that if two different - workers both run a a method decorated with @synchronized('mylock', - external=True), only one of them will execute at a time. - - :param lock_path: The lock_path keyword argument is used to specify a - special location for external lock files to live. If nothing is set, then - CONF.lock_path is used as a default. - """ - with _semaphores_lock: - try: - sem = _semaphores[name] - except KeyError: - sem = threading.Semaphore() - _semaphores[name] = sem - - with sem: - LOG.debug(_('Got semaphore "%(lock)s"'), {'lock': name}) - - # NOTE(mikal): I know this looks odd - if not hasattr(local.strong_store, 'locks_held'): - local.strong_store.locks_held = [] - local.strong_store.locks_held.append(name) - - try: - if external and not CONF.disable_process_locking: - LOG.debug(_('Attempting to grab file lock "%(lock)s"'), - {'lock': name}) - - # We need a copy of lock_path because it is non-local - local_lock_path = lock_path or CONF.lock_path - if not local_lock_path: - raise cfg.RequiredOptError('lock_path') - - if not os.path.exists(local_lock_path): - fileutils.ensure_tree(local_lock_path) - LOG.info(_('Created lock path: %s'), local_lock_path) - - def add_prefix(name, prefix): - if not prefix: - return name - sep = '' if prefix.endswith('-') else '-' - return '%s%s%s' % (prefix, sep, name) - - # NOTE(mikal): the lock name cannot contain directory - # separators - lock_file_name = add_prefix(name.replace(os.sep, '_'), - lock_file_prefix) - - lock_file_path = os.path.join(local_lock_path, lock_file_name) - - try: - lock = InterProcessLock(lock_file_path) - with lock as lock: - LOG.debug(_('Got file lock "%(lock)s" at %(path)s'), - {'lock': name, 'path': lock_file_path}) - yield lock - finally: - LOG.debug(_('Released file lock "%(lock)s" at %(path)s'), - {'lock': name, 'path': lock_file_path}) - else: - yield sem - - finally: - local.strong_store.locks_held.remove(name) - - -def synchronized(name, lock_file_prefix=None, external=False, lock_path=None): - """Synchronization decorator. - - Decorating a method like so:: - - @synchronized('mylock') - def foo(self, *args): - ... - - ensures that only one thread will execute the foo method at a time. - - Different methods can share the same lock:: - - @synchronized('mylock') - def foo(self, *args): - ... - - @synchronized('mylock') - def bar(self, *args): - ... - - This way only one of either foo or bar can be executing at a time. - """ - - def wrap(f): - @functools.wraps(f) - def inner(*args, **kwargs): - try: - with lock(name, lock_file_prefix, external, lock_path): - LOG.debug(_('Got semaphore / lock "%(function)s"'), - {'function': f.__name__}) - return f(*args, **kwargs) - finally: - LOG.debug(_('Semaphore / lock released "%(function)s"'), - {'function': f.__name__}) - return inner - return wrap - - -def synchronized_with_prefix(lock_file_prefix): - """Partial object generator for the synchronization decorator. - - Redefine @synchronized in each project like so:: - - (in nova/utils.py) - from nova.openstack.common import lockutils - - synchronized = lockutils.synchronized_with_prefix('nova-') - - - (in nova/foo.py) - from nova import utils - - @utils.synchronized('mylock') - def bar(self, *args): - ... - - The lock_file_prefix argument is used to provide lock files on disk with a - meaningful prefix. - """ - - return functools.partial(synchronized, lock_file_prefix=lock_file_prefix) - - -def main(argv): - """Create a dir for locks and pass it to command from arguments - - If you run this: - python -m openstack.common.lockutils python setup.py testr - - a temporary directory will be created for all your locks and passed to all - your tests in an environment variable. The temporary dir will be deleted - afterwards and the return value will be preserved. - """ - - lock_dir = tempfile.mkdtemp() - os.environ["MISTRAL_LOCK_PATH"] = lock_dir - try: - ret_val = subprocess.call(argv[1:]) - finally: - shutil.rmtree(lock_dir, ignore_errors=True) - return ret_val - - -if __name__ == '__main__': - sys.exit(main(sys.argv)) diff --git a/mistral/openstack/common/log.py b/mistral/openstack/common/log.py index a80540d17..02d5676e7 100644 --- a/mistral/openstack/common/log.py +++ b/mistral/openstack/common/log.py @@ -15,7 +15,7 @@ # License for the specific language governing permissions and limitations # under the License. -"""Openstack logging handler. +"""OpenStack logging handler. This module adds to logging functionality by adding the option to specify a context object when calling the various log methods. If the context object @@ -41,7 +41,7 @@ from oslo.config import cfg import six from six import moves -from mistral.openstack.common.gettextutils import _ # noqa +from mistral.openstack.common.gettextutils import _ from mistral.openstack.common import importutils from mistral.openstack.common import jsonutils from mistral.openstack.common import local @@ -59,7 +59,10 @@ _SANITIZE_PATTERNS = [] _FORMAT_PATTERNS = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])', r'(<%(key)s>).*?()', r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])', - r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])'] + r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])', + r'([\'"].*?%(key)s[\'"]\s*,\s*\'--?[A-z]+\'\s*,\s*u?[\'"])' + '.*?([\'"])', + r'(%(key)s\s*--?[A-z]+\s*).*?([\s])'] for key in _SANITIZE_KEYS: for pattern in _FORMAT_PATTERNS: @@ -84,14 +87,11 @@ logging_cli_opts = [ cfg.StrOpt('log-config-append', metavar='PATH', deprecated_name='log-config', - help='The name of logging configuration file. It does not ' - 'disable existing loggers, but just appends specified ' - 'logging configuration to any other existing logging ' - 'options. Please see the Python logging module ' - 'documentation for details on logging configuration ' - 'files.'), + help='The name of a logging configuration file. This file ' + 'is appended to any existing logging configuration ' + 'files. For details about logging configuration files, ' + 'see the Python logging module documentation.'), cfg.StrOpt('log-format', - default=None, metavar='FORMAT', help='DEPRECATED. ' 'A logging.Formatter log message format string which may ' @@ -103,7 +103,7 @@ logging_cli_opts = [ default=_DEFAULT_LOG_DATE_FORMAT, metavar='DATE_FORMAT', help='Format string for %%(asctime)s in log records. ' - 'Default: %(default)s'), + 'Default: %(default)s .'), cfg.StrOpt('log-file', metavar='PATH', deprecated_name='logfile', @@ -112,68 +112,80 @@ logging_cli_opts = [ cfg.StrOpt('log-dir', deprecated_name='logdir', help='(Optional) The base directory used for relative ' - '--log-file paths'), + '--log-file paths.'), cfg.BoolOpt('use-syslog', default=False, - help='Use syslog for logging.'), + help='Use syslog for logging. ' + 'Existing syslog format is DEPRECATED during I, ' + 'and will chang in J to honor RFC5424.'), + cfg.BoolOpt('use-syslog-rfc-format', + # TODO(bogdando) remove or use True after existing + # syslog format deprecation in J + default=False, + help='(Optional) Enables or disables syslog rfc5424 format ' + 'for logging. If enabled, prefixes the MSG part of the ' + 'syslog message with APP-NAME (RFC5424). The ' + 'format without the APP-NAME is deprecated in I, ' + 'and will be removed in J.'), cfg.StrOpt('syslog-log-facility', default='LOG_USER', - help='syslog facility to receive log lines') + help='Syslog facility to receive log lines.') ] generic_log_opts = [ cfg.BoolOpt('use_stderr', default=True, - help='Log output to standard error') + help='Log output to standard error.') ] log_opts = [ cfg.StrOpt('logging_context_format_string', default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' - '%(name)s [%(request_id)s %(user)s %(tenant)s] ' + '%(name)s [%(request_id)s %(user_identity)s] ' '%(instance)s%(message)s', - help='format string to use for log messages with context'), + help='Format string to use for log messages with context.'), cfg.StrOpt('logging_default_format_string', default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' '%(name)s [-] %(instance)s%(message)s', - help='format string to use for log messages without context'), + help='Format string to use for log messages without context.'), cfg.StrOpt('logging_debug_format_suffix', default='%(funcName)s %(pathname)s:%(lineno)d', - help='data to append to log format when level is DEBUG'), + help='Data to append to log format when level is DEBUG.'), cfg.StrOpt('logging_exception_prefix', default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s ' '%(instance)s', - help='prefix each line of exception output with this format'), + help='Prefix each line of exception output with this format.'), cfg.ListOpt('default_log_levels', default=[ 'amqp=WARN', 'amqplib=WARN', 'boto=WARN', - 'keystone=INFO', 'qpid=WARN', 'sqlalchemy=WARN', 'suds=INFO', + 'oslo.messaging=INFO', 'iso8601=WARN', + 'requests.packages.urllib3.connectionpool=WARN' ], - help='list of logger=LEVEL pairs'), + help='List of logger=LEVEL pairs.'), cfg.BoolOpt('publish_errors', default=False, - help='publish error events'), + help='Enables or disables publication of error events.'), cfg.BoolOpt('fatal_deprecations', default=False, - help='make deprecations fatal'), + help='Enables or disables fatal status of deprecations.'), # NOTE(mikal): there are two options here because sometimes we are handed # a full instance (and could include more information), and other times we # are just handed a UUID for the instance. cfg.StrOpt('instance_format', default='[instance: %(uuid)s] ', - help='If an instance is passed with the log message, format ' - 'it like this'), + help='The format for an instance that is passed with the log ' + 'message. '), cfg.StrOpt('instance_uuid_format', default='[instance: %(uuid)s] ', - help='If an instance UUID is passed with the log message, ' - 'format it like this'), + help='The format for an instance UUID that is passed with the ' + 'log message. '), ] CONF = cfg.CONF @@ -236,10 +248,11 @@ def mask_password(message, secret="***"): """Replace password with 'secret' in message. :param message: The string which includes security information. - :param secret: value with which to replace passwords, defaults to "***". + :param secret: value with which to replace passwords. :returns: The unicode value of message with the password fields masked. For example: + >>> mask_password("'adminPass' : 'aaaaa'") "'adminPass' : '***'" >>> mask_password("'admin_pass' : 'aaaaa'") @@ -292,18 +305,39 @@ class ContextAdapter(BaseLoggerAdapter): self.logger = logger self.project = project_name self.version = version_string + self._deprecated_messages_sent = dict() @property def handlers(self): return self.logger.handlers def deprecated(self, msg, *args, **kwargs): + """Call this method when a deprecated feature is used. + + If the system is configured for fatal deprecations then the message + is logged at the 'critical' level and :class:`DeprecatedConfig` will + be raised. + + Otherwise, the message will be logged (once) at the 'warn' level. + + :raises: :class:`DeprecatedConfig` if the system is configured for + fatal deprecations. + + """ stdmsg = _("Deprecated: %s") % msg if CONF.fatal_deprecations: self.critical(stdmsg, *args, **kwargs) raise DeprecatedConfig(msg=stdmsg) - else: - self.warn(stdmsg, *args, **kwargs) + + # Using a list because a tuple with dict can't be stored in a set. + sent_args = self._deprecated_messages_sent.setdefault(msg, list()) + + if args in sent_args: + # Already logged this message, so don't log it again. + return + + sent_args.append(args) + self.warn(stdmsg, *args, **kwargs) def process(self, msg, kwargs): # NOTE(mrodden): catch any Message/other object and @@ -324,7 +358,7 @@ class ContextAdapter(BaseLoggerAdapter): extra.update(_dictify_context(context)) instance = kwargs.pop('instance', None) - instance_uuid = (extra.get('instance_uuid', None) or + instance_uuid = (extra.get('instance_uuid') or kwargs.pop('instance_uuid', None)) instance_extra = '' if instance: @@ -332,10 +366,12 @@ class ContextAdapter(BaseLoggerAdapter): elif instance_uuid: instance_extra = (CONF.instance_uuid_format % {'uuid': instance_uuid}) - extra.update({'instance': instance_extra}) + extra['instance'] = instance_extra - extra.update({"project": self.project}) - extra.update({"version": self.version}) + extra.setdefault('user_identity', kwargs.pop('user_identity', None)) + + extra['project'] = self.project + extra['version'] = self.version extra['extra'] = extra.copy() return msg, kwargs @@ -349,7 +385,7 @@ class JSONFormatter(logging.Formatter): def formatException(self, ei, strip_newlines=True): lines = traceback.format_exception(*ei) if strip_newlines: - lines = [itertools.ifilter( + lines = [moves.filter( lambda x: x, line.rstrip().splitlines()) for line in lines] lines = list(itertools.chain(*lines)) @@ -389,9 +425,11 @@ class JSONFormatter(logging.Formatter): def _create_logging_excepthook(product_name): def logging_excepthook(exc_type, value, tb): extra = {} - if CONF.verbose: + if CONF.verbose or CONF.debug: extra['exc_info'] = (exc_type, value, tb) - getLogger(product_name).critical(str(value), **extra) + getLogger(product_name).critical( + "".join(traceback.format_exception_only(exc_type, value)), + **extra) return logging_excepthook @@ -413,15 +451,15 @@ def _load_log_config(log_config_append): logging.config.fileConfig(log_config_append, disable_existing_loggers=False) except moves.configparser.Error as exc: - raise LogConfigError(log_config_append, str(exc)) + raise LogConfigError(log_config_append, six.text_type(exc)) -def setup(product_name): +def setup(product_name, version='unknown'): """Setup logging.""" if CONF.log_config_append: _load_log_config(CONF.log_config_append) else: - _setup_logging_from_conf() + _setup_logging_from_conf(product_name, version) sys.excepthook = _create_logging_excepthook(product_name) @@ -455,15 +493,38 @@ def _find_facility_from_conf(): return facility -def _setup_logging_from_conf(): +class RFCSysLogHandler(logging.handlers.SysLogHandler): + def __init__(self, *args, **kwargs): + self.binary_name = _get_binary_name() + # Do not use super() unless type(logging.handlers.SysLogHandler) + # is 'type' (Python 2.7). + # Use old style calls, if the type is 'classobj' (Python 2.6) + logging.handlers.SysLogHandler.__init__(self, *args, **kwargs) + + def format(self, record): + # Do not use super() unless type(logging.handlers.SysLogHandler) + # is 'type' (Python 2.7). + # Use old style calls, if the type is 'classobj' (Python 2.6) + msg = logging.handlers.SysLogHandler.format(self, record) + msg = self.binary_name + ' ' + msg + return msg + + +def _setup_logging_from_conf(project, version): log_root = getLogger(None).logger for handler in log_root.handlers: log_root.removeHandler(handler) if CONF.use_syslog: facility = _find_facility_from_conf() - syslog = logging.handlers.SysLogHandler(address='/dev/log', - facility=facility) + # TODO(bogdando) use the format provided by RFCSysLogHandler + # after existing syslog format deprecation in J + if CONF.use_syslog_rfc_format: + syslog = RFCSysLogHandler(address='/dev/log', + facility=facility) + else: + syslog = logging.handlers.SysLogHandler(address='/dev/log', + facility=facility) log_root.addHandler(syslog) logpath = _get_log_file_path() @@ -497,7 +558,9 @@ def _setup_logging_from_conf(): log_root.info('Deprecated: log_format is now deprecated and will ' 'be removed in the next release') else: - handler.setFormatter(ContextFormatter(datefmt=datefmt)) + handler.setFormatter(ContextFormatter(project=project, + version=version, + datefmt=datefmt)) if CONF.debug: log_root.setLevel(logging.DEBUG) @@ -508,9 +571,15 @@ def _setup_logging_from_conf(): for pair in CONF.default_log_levels: mod, _sep, level_name = pair.partition('=') - level = logging.getLevelName(level_name) logger = logging.getLogger(mod) - logger.setLevel(level) + # NOTE(AAzza) in python2.6 Logger.setLevel doesn't convert string name + # to integer code. + if sys.version_info < (2, 7): + level = logging.getLevelName(level_name) + logger.setLevel(level) + else: + logger.setLevel(level_name) + _loggers = {} @@ -541,7 +610,7 @@ class WritableLogger(object): self.level = level def write(self, msg): - self.logger.log(self.level, msg) + self.logger.log(self.level, msg.rstrip()) class ContextFormatter(logging.Formatter): @@ -555,18 +624,50 @@ class ContextFormatter(logging.Formatter): For information about what variables are available for the formatter see: http://docs.python.org/library/logging.html#formatter + If available, uses the context value stored in TLS - local.store.context + """ + def __init__(self, *args, **kwargs): + """Initialize ContextFormatter instance + + Takes additional keyword arguments which can be used in the message + format string. + + :keyword project: project name + :type project: string + :keyword version: project version + :type version: string + + """ + + self.project = kwargs.pop('project', 'unknown') + self.version = kwargs.pop('version', 'unknown') + + logging.Formatter.__init__(self, *args, **kwargs) + def format(self, record): """Uses contextstring if request_id is set, otherwise default.""" - # NOTE(sdague): default the fancier formating params + + # store project info + record.project = self.project + record.version = self.version + + # store request info + context = getattr(local.store, 'context', None) + if context: + d = _dictify_context(context) + for k, v in d.items(): + setattr(record, k, v) + + # NOTE(sdague): default the fancier formatting params # to an empty string so we don't throw an exception if # they get used - for key in ('instance', 'color'): + for key in ('instance', 'color', 'user_identity'): if key not in record.__dict__: record.__dict__[key] = '' - if record.__dict__.get('request_id', None): + if record.__dict__.get('request_id'): self._fmt = CONF.logging_context_format_string else: self._fmt = CONF.logging_default_format_string @@ -575,7 +676,7 @@ class ContextFormatter(logging.Formatter): CONF.logging_debug_format_suffix): self._fmt += " " + CONF.logging_debug_format_suffix - # Cache this on the record, Logger will respect our formated copy + # Cache this on the record, Logger will respect our formatted copy if record.exc_info: record.exc_text = self.formatException(record.exc_info, record) return logging.Formatter.format(self, record) diff --git a/mistral/openstack/common/loopingcall.py b/mistral/openstack/common/loopingcall.py index 5f5f5e5da..876560222 100644 --- a/mistral/openstack/common/loopingcall.py +++ b/mistral/openstack/common/loopingcall.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara @@ -22,7 +20,7 @@ import sys from eventlet import event from eventlet import greenthread -from mistral.openstack.common.gettextutils import _ # noqa +from mistral.openstack.common.gettextutils import _LE, _LW from mistral.openstack.common import log as logging from mistral.openstack.common import timeutils @@ -30,19 +28,19 @@ LOG = logging.getLogger(__name__) class LoopingCallDone(Exception): - """Exception to break out and stop a LoopingCall. + """Exception to break out and stop a LoopingCallBase. - The poll-function passed to LoopingCall can raise this exception to + The poll-function passed to LoopingCallBase can raise this exception to break out of the loop normally. This is somewhat analogous to StopIteration. An optional return-value can be included as the argument to the exception; - this return-value will be returned by LoopingCall.wait() + this return-value will be returned by LoopingCallBase.wait() """ def __init__(self, retvalue=True): - """:param retvalue: Value that LoopingCall.wait() should return.""" + """:param retvalue: Value that LoopingCallBase.wait() should return.""" self.retvalue = retvalue @@ -81,14 +79,14 @@ class FixedIntervalLoopingCall(LoopingCallBase): break delay = interval - timeutils.delta_seconds(start, end) if delay <= 0: - LOG.warn(_('task run outlasted interval by %s sec') % + LOG.warn(_LW('task run outlasted interval by %s sec') % -delay) greenthread.sleep(delay if delay > 0 else 0) except LoopingCallDone as e: self.stop() done.send(e.retvalue) except Exception: - LOG.exception(_('in fixed duration looping call')) + LOG.exception(_LE('in fixed duration looping call')) done.send_exception(*sys.exc_info()) return else: @@ -100,11 +98,6 @@ class FixedIntervalLoopingCall(LoopingCallBase): return self.done -# TODO(mikal): this class name is deprecated in Havana and should be removed -# in the I release -LoopingCall = FixedIntervalLoopingCall - - class DynamicLoopingCall(LoopingCallBase): """A looping call which sleeps until the next known event. @@ -117,7 +110,6 @@ class DynamicLoopingCall(LoopingCallBase): done = event.Event() def _inner(): - if initial_delay: greenthread.sleep(initial_delay) @@ -129,19 +121,20 @@ class DynamicLoopingCall(LoopingCallBase): if periodic_interval_max is not None: idle = min(idle, periodic_interval_max) - LOG.debug(_('Dynamic looping call sleeping for %.02f ' - 'seconds'), idle) + LOG.debug('Dynamic looping call sleeping for %.02f ' + 'seconds', idle) greenthread.sleep(idle) except LoopingCallDone as e: self.stop() done.send(e.retvalue) except Exception: - LOG.exception(_('in dynamic looping call')) + LOG.exception(_LE('in dynamic looping call')) done.send_exception(*sys.exc_info()) return else: done.send(True) self.done = done + greenthread.spawn(_inner) return self.done diff --git a/mistral/openstack/common/periodic_task.py b/mistral/openstack/common/periodic_task.py index 50c25950e..ec0255269 100644 --- a/mistral/openstack/common/periodic_task.py +++ b/mistral/openstack/common/periodic_task.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -13,21 +11,20 @@ # License for the specific language governing permissions and limitations # under the License. -import datetime import time from oslo.config import cfg +import six -from mistral.openstack.common.gettextutils import _ # noqa +from mistral.openstack.common.gettextutils import _, _LE, _LI from mistral.openstack.common import log as logging -from mistral.openstack.common import timeutils periodic_opts = [ cfg.BoolOpt('run_external_periodic_tasks', default=True, - help=('Some periodic tasks can be run in a separate process. ' - 'Should we run them here?')), + help='Some periodic tasks can be run in a separate process. ' + 'Should we run them here?'), ] CONF = cfg.CONF @@ -47,8 +44,8 @@ def periodic_task(*args, **kwargs): This decorator can be used in two ways: - 1. Without arguments '@periodic_task', this will be run on every cycle - of the periodic scheduler. + 1. Without arguments '@periodic_task', this will be run on the default + interval of 60 seconds. 2. With arguments: @periodic_task(spacing=N [, run_immediately=[True|False]]) @@ -79,18 +76,18 @@ def periodic_task(*args, **kwargs): if f._periodic_immediate: f._periodic_last_run = None else: - f._periodic_last_run = timeutils.utcnow() + f._periodic_last_run = time.time() return f # NOTE(sirp): The `if` is necessary to allow the decorator to be used with - # and without parens. + # and without parents. # - # In the 'with-parens' case (with kwargs present), this function needs to + # In the 'with-parents' case (with kwargs present), this function needs to # return a decorator function since the interpreter will invoke it like: # # periodic_task(*args, **kwargs)(f) # - # In the 'without-parens' case, the original function will be passed + # In the 'without-parents' case, the original function will be passed # in as the first argument, like: # # periodic_task(f) @@ -114,11 +111,6 @@ class _PeriodicTasksMeta(type): except AttributeError: cls._periodic_tasks = [] - try: - cls._periodic_last_run = cls._periodic_last_run.copy() - except AttributeError: - cls._periodic_last_run = {} - try: cls._periodic_spacing = cls._periodic_spacing.copy() except AttributeError: @@ -130,28 +122,33 @@ class _PeriodicTasksMeta(type): name = task.__name__ if task._periodic_spacing < 0: - LOG.info(_('Skipping periodic task %(task)s because ' - 'its interval is negative'), + LOG.info(_LI('Skipping periodic task %(task)s because ' + 'its interval is negative'), {'task': name}) continue if not task._periodic_enabled: - LOG.info(_('Skipping periodic task %(task)s because ' - 'it is disabled'), + LOG.info(_LI('Skipping periodic task %(task)s because ' + 'it is disabled'), {'task': name}) continue # A periodic spacing of zero indicates that this task should - # be run every pass + # be run on the default interval to avoid running too + # frequently. if task._periodic_spacing == 0: - task._periodic_spacing = None + task._periodic_spacing = DEFAULT_INTERVAL cls._periodic_tasks.append((name, task)) cls._periodic_spacing[name] = task._periodic_spacing - cls._periodic_last_run[name] = task._periodic_last_run +@six.add_metaclass(_PeriodicTasksMeta) class PeriodicTasks(object): - __metaclass__ = _PeriodicTasksMeta + def __init__(self): + super(PeriodicTasks, self).__init__() + self._periodic_last_run = {} + for name, task in self._periodic_tasks: + self._periodic_last_run[name] = task._periodic_last_run def run_periodic_tasks(self, context, raise_on_error=False): """Tasks to be run at a periodic interval.""" @@ -159,30 +156,27 @@ class PeriodicTasks(object): for task_name, task in self._periodic_tasks: full_task_name = '.'.join([self.__class__.__name__, task_name]) - now = timeutils.utcnow() spacing = self._periodic_spacing[task_name] last_run = self._periodic_last_run[task_name] # If a periodic task is _nearly_ due, then we'll run it early - if spacing is not None and last_run is not None: - due = last_run + datetime.timedelta(seconds=spacing) - if not timeutils.is_soon(due, 0.2): - idle_for = min(idle_for, timeutils.delta_seconds(now, due)) + idle_for = min(idle_for, spacing) + if last_run is not None: + delta = last_run + spacing - time.time() + if delta > 0.2: + idle_for = min(idle_for, delta) continue - if spacing is not None: - idle_for = min(idle_for, spacing) - - LOG.debug(_("Running periodic task %(full_task_name)s"), + LOG.debug("Running periodic task %(full_task_name)s", {"full_task_name": full_task_name}) - self._periodic_last_run[task_name] = timeutils.utcnow() + self._periodic_last_run[task_name] = time.time() try: task(self, context) except Exception as e: if raise_on_error: raise - LOG.exception(_("Error during %(full_task_name)s: %(e)s"), + LOG.exception(_LE("Error during %(full_task_name)s: %(e)s"), {"full_task_name": full_task_name, "e": e}) time.sleep(0) diff --git a/mistral/openstack/common/processutils.py b/mistral/openstack/common/processutils.py deleted file mode 100644 index 6035631b5..000000000 --- a/mistral/openstack/common/processutils.py +++ /dev/null @@ -1,248 +0,0 @@ -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -System-level utilities and helper functions. -""" - -import logging as stdlib_logging -import os -import random -import shlex -import signal - -from eventlet.green import subprocess -from eventlet import greenthread - -from mistral.openstack.common.gettextutils import _ # noqa -from mistral.openstack.common import log as logging - - -LOG = logging.getLogger(__name__) - - -class InvalidArgumentError(Exception): - def __init__(self, message=None): - super(InvalidArgumentError, self).__init__(message) - - -class UnknownArgumentError(Exception): - def __init__(self, message=None): - super(UnknownArgumentError, self).__init__(message) - - -class ProcessExecutionError(Exception): - def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None, - description=None): - self.exit_code = exit_code - self.stderr = stderr - self.stdout = stdout - self.cmd = cmd - self.description = description - - if description is None: - description = "Unexpected error while running command." - if exit_code is None: - exit_code = '-' - message = ("%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r" - % (description, cmd, exit_code, stdout, stderr)) - super(ProcessExecutionError, self).__init__(message) - - -class NoRootWrapSpecified(Exception): - def __init__(self, message=None): - super(NoRootWrapSpecified, self).__init__(message) - - -def _subprocess_setup(): - # Python installs a SIGPIPE handler by default. This is usually not what - # non-Python subprocesses expect. - signal.signal(signal.SIGPIPE, signal.SIG_DFL) - - -def execute(*cmd, **kwargs): - """Helper method to shell out and execute a command through subprocess. - - Allows optional retry. - - :param cmd: Passed to subprocess.Popen. - :type cmd: string - :param process_input: Send to opened process. - :type proces_input: string - :param check_exit_code: Single bool, int, or list of allowed exit - codes. Defaults to [0]. Raise - :class:`ProcessExecutionError` unless - program exits with one of these code. - :type check_exit_code: boolean, int, or [int] - :param delay_on_retry: True | False. Defaults to True. If set to True, - wait a short amount of time before retrying. - :type delay_on_retry: boolean - :param attempts: How many times to retry cmd. - :type attempts: int - :param run_as_root: True | False. Defaults to False. If set to True, - the command is prefixed by the command specified - in the root_helper kwarg. - :type run_as_root: boolean - :param root_helper: command to prefix to commands called with - run_as_root=True - :type root_helper: string - :param shell: whether or not there should be a shell used to - execute this command. Defaults to false. - :type shell: boolean - :param loglevel: log level for execute commands. - :type loglevel: int. (Should be stdlib_logging.DEBUG or - stdlib_logging.INFO) - :returns: (stdout, stderr) from process execution - :raises: :class:`UnknownArgumentError` on - receiving unknown arguments - :raises: :class:`ProcessExecutionError` - """ - - process_input = kwargs.pop('process_input', None) - check_exit_code = kwargs.pop('check_exit_code', [0]) - ignore_exit_code = False - delay_on_retry = kwargs.pop('delay_on_retry', True) - attempts = kwargs.pop('attempts', 1) - run_as_root = kwargs.pop('run_as_root', False) - root_helper = kwargs.pop('root_helper', '') - shell = kwargs.pop('shell', False) - loglevel = kwargs.pop('loglevel', stdlib_logging.DEBUG) - - if isinstance(check_exit_code, bool): - ignore_exit_code = not check_exit_code - check_exit_code = [0] - elif isinstance(check_exit_code, int): - check_exit_code = [check_exit_code] - - if kwargs: - raise UnknownArgumentError(_('Got unknown keyword args ' - 'to utils.execute: %r') % kwargs) - - if run_as_root and hasattr(os, 'geteuid') and os.geteuid() != 0: - if not root_helper: - raise NoRootWrapSpecified( - message=('Command requested root, but did not specify a root ' - 'helper.')) - cmd = shlex.split(root_helper) + list(cmd) - - cmd = map(str, cmd) - - while attempts > 0: - attempts -= 1 - try: - LOG.log(loglevel, _('Running cmd (subprocess): %s'), ' '.join(cmd)) - _PIPE = subprocess.PIPE # pylint: disable=E1101 - - if os.name == 'nt': - preexec_fn = None - close_fds = False - else: - preexec_fn = _subprocess_setup - close_fds = True - - obj = subprocess.Popen(cmd, - stdin=_PIPE, - stdout=_PIPE, - stderr=_PIPE, - close_fds=close_fds, - preexec_fn=preexec_fn, - shell=shell) - result = None - if process_input is not None: - result = obj.communicate(process_input) - else: - result = obj.communicate() - obj.stdin.close() # pylint: disable=E1101 - _returncode = obj.returncode # pylint: disable=E1101 - LOG.log(loglevel, _('Result was %s') % _returncode) - if not ignore_exit_code and _returncode not in check_exit_code: - (stdout, stderr) = result - raise ProcessExecutionError(exit_code=_returncode, - stdout=stdout, - stderr=stderr, - cmd=' '.join(cmd)) - return result - except ProcessExecutionError: - if not attempts: - raise - else: - LOG.log(loglevel, _('%r failed. Retrying.'), cmd) - if delay_on_retry: - greenthread.sleep(random.randint(20, 200) / 100.0) - finally: - # NOTE(termie): this appears to be necessary to let the subprocess - # call clean something up in between calls, without - # it two execute calls in a row hangs the second one - greenthread.sleep(0) - - -def trycmd(*args, **kwargs): - """A wrapper around execute() to more easily handle warnings and errors. - - Returns an (out, err) tuple of strings containing the output of - the command's stdout and stderr. If 'err' is not empty then the - command can be considered to have failed. - - :discard_warnings True | False. Defaults to False. If set to True, - then for succeeding commands, stderr is cleared - - """ - discard_warnings = kwargs.pop('discard_warnings', False) - - try: - out, err = execute(*args, **kwargs) - failed = False - except ProcessExecutionError as exn: - out, err = '', str(exn) - failed = True - - if not failed and discard_warnings and err: - # Handle commands that output to stderr but otherwise succeed - err = '' - - return out, err - - -def ssh_execute(ssh, cmd, process_input=None, - addl_env=None, check_exit_code=True): - LOG.debug(_('Running cmd (SSH): %s'), cmd) - if addl_env: - raise InvalidArgumentError(_('Environment not supported over SSH')) - - if process_input: - # This is (probably) fixable if we need it... - raise InvalidArgumentError(_('process_input not supported over SSH')) - - stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd) - channel = stdout_stream.channel - - # NOTE(justinsb): This seems suspicious... - # ...other SSH clients have buffering issues with this approach - stdout = stdout_stream.read() - stderr = stderr_stream.read() - stdin_stream.close() - - exit_status = channel.recv_exit_status() - - # exit_status == -1 if no exit code was returned - if exit_status != -1: - LOG.debug(_('Result was %s') % exit_status) - if check_exit_code and exit_status != 0: - raise ProcessExecutionError(exit_code=exit_status, - stdout=stdout, - stderr=stderr, - cmd=cmd) - - return (stdout, stderr) diff --git a/mistral/openstack/common/py3kcompat/urlutils.py b/mistral/openstack/common/py3kcompat/urlutils.py deleted file mode 100644 index 51e18111a..000000000 --- a/mistral/openstack/common/py3kcompat/urlutils.py +++ /dev/null @@ -1,63 +0,0 @@ -# -# Copyright 2013 Canonical Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -""" -Python2/Python3 compatibility layer for OpenStack -""" - -import six - -if six.PY3: - # python3 - import urllib.error - import urllib.parse - import urllib.request - - urlencode = urllib.parse.urlencode - urljoin = urllib.parse.urljoin - quote = urllib.parse.quote - parse_qsl = urllib.parse.parse_qsl - unquote = urllib.parse.unquote - urlparse = urllib.parse.urlparse - urlsplit = urllib.parse.urlsplit - urlunsplit = urllib.parse.urlunsplit - SplitResult = urllib.parse.SplitResult - - urlopen = urllib.request.urlopen - URLError = urllib.error.URLError - pathname2url = urllib.request.pathname2url -else: - # python2 - import urllib - import urllib2 - import urlparse - - urlencode = urllib.urlencode - quote = urllib.quote - unquote = urllib.unquote - - parse = urlparse - parse_qsl = parse.parse_qsl - urljoin = parse.urljoin - urlparse = parse.urlparse - urlsplit = parse.urlsplit - urlunsplit = parse.urlunsplit - SplitResult = parse.SplitResult - - urlopen = urllib2.urlopen - URLError = urllib2.URLError - pathname2url = urllib.pathname2url diff --git a/mistral/openstack/common/strutils.py b/mistral/openstack/common/strutils.py index c187dd5ae..98d0f3197 100644 --- a/mistral/openstack/common/strutils.py +++ b/mistral/openstack/common/strutils.py @@ -17,25 +17,31 @@ System-level utilities and helper functions. """ +import math import re import sys import unicodedata import six -from mistral.openstack.common.gettextutils import _ # noqa +from mistral.openstack.common.gettextutils import _ -# Used for looking up extensions of text -# to their 'multiplied' byte amount -BYTE_MULTIPLIERS = { - '': 1, - 't': 1024 ** 4, - 'g': 1024 ** 3, - 'm': 1024 ** 2, - 'k': 1024, +UNIT_PREFIX_EXPONENT = { + 'k': 1, + 'K': 1, + 'Ki': 1, + 'M': 2, + 'Mi': 2, + 'G': 3, + 'Gi': 3, + 'T': 4, + 'Ti': 4, +} +UNIT_SYSTEM_INFO = { + 'IEC': (1024, re.compile(r'(^[-+]?\d*\.?\d+)([KMGT]i?)?(b|bit|B)$')), + 'SI': (1000, re.compile(r'(^[-+]?\d*\.?\d+)([kMGT])?(b|bit|B)$')), } -BYTE_REGEX = re.compile(r'(^-?\d+)(\D*)') TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes') FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no') @@ -58,12 +64,12 @@ def int_from_bool_as_string(subject): return bool_from_string(subject) and 1 or 0 -def bool_from_string(subject, strict=False): +def bool_from_string(subject, strict=False, default=False): """Interpret a string as a boolean. A case-insensitive match is performed such that strings matching 't', 'true', 'on', 'y', 'yes', or '1' are considered True and, when - `strict=False`, anything else is considered False. + `strict=False`, anything else returns the value specified by 'default'. Useful for JSON-decoded stuff and config file parsing. @@ -72,7 +78,7 @@ def bool_from_string(subject, strict=False): Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'. """ if not isinstance(subject, six.string_types): - subject = str(subject) + subject = six.text_type(subject) lowered = subject.strip().lower() @@ -88,11 +94,12 @@ def bool_from_string(subject, strict=False): 'acceptable': acceptable} raise ValueError(msg) else: - return False + return default def safe_decode(text, incoming=None, errors='strict'): - """Decodes incoming str using `incoming` if they're not already unicode. + """Decodes incoming text/bytes string using `incoming` if they're not + already unicode. :param incoming: Text's current encoding :param errors: Errors handling policy. See here for valid @@ -101,7 +108,7 @@ def safe_decode(text, incoming=None, errors='strict'): representation of it. :raises TypeError: If text is not an instance of str """ - if not isinstance(text, six.string_types): + if not isinstance(text, (six.string_types, six.binary_type)): raise TypeError("%s can't be decoded" % type(text)) if isinstance(text, six.text_type): @@ -131,7 +138,7 @@ def safe_decode(text, incoming=None, errors='strict'): def safe_encode(text, incoming=None, encoding='utf-8', errors='strict'): - """Encodes incoming str/unicode using `encoding`. + """Encodes incoming text/bytes string using `encoding`. If incoming is not specified, text is expected to be encoded with current python's default encoding. (`sys.getdefaultencoding`) @@ -144,7 +151,7 @@ def safe_encode(text, incoming=None, representation of it. :raises TypeError: If text is not an instance of str """ - if not isinstance(text, six.string_types): + if not isinstance(text, (six.string_types, six.binary_type)): raise TypeError("%s can't be encoded" % type(text)) if not incoming: @@ -157,38 +164,54 @@ def safe_encode(text, incoming=None, # Decode text before encoding it with `encoding` text = safe_decode(text, incoming, errors) return text.encode(encoding, errors) - - return text + else: + return text -def to_bytes(text, default=0): - """Converts a string into an integer of bytes. +def string_to_bytes(text, unit_system='IEC', return_int=False): + """Converts a string into an float representation of bytes. - Looks at the last characters of the text to determine - what conversion is needed to turn the input text into a byte number. - Supports "B, K(B), M(B), G(B), and T(B)". (case insensitive) + The units supported for IEC :: + + Kb(it), Kib(it), Mb(it), Mib(it), Gb(it), Gib(it), Tb(it), Tib(it) + KB, KiB, MB, MiB, GB, GiB, TB, TiB + + The units supported for SI :: + + kb(it), Mb(it), Gb(it), Tb(it) + kB, MB, GB, TB + + Note that the SI unit system does not support capital letter 'K' :param text: String input for bytes size conversion. - :param default: Default return value when text is blank. + :param unit_system: Unit system for byte size conversion. + :param return_int: If True, returns integer representation of text + in bytes. (default: decimal) + :returns: Numerical representation of text in bytes. + :raises ValueError: If text has an invalid value. """ - match = BYTE_REGEX.search(text) + try: + base, reg_ex = UNIT_SYSTEM_INFO[unit_system] + except KeyError: + msg = _('Invalid unit system: "%s"') % unit_system + raise ValueError(msg) + match = reg_ex.match(text) if match: - magnitude = int(match.group(1)) - mult_key_org = match.group(2) - if not mult_key_org: - return magnitude - elif text: - msg = _('Invalid string format: %s') % text - raise TypeError(msg) + magnitude = float(match.group(1)) + unit_prefix = match.group(2) + if match.group(3) in ['b', 'bit']: + magnitude /= 8 else: - return default - mult_key = mult_key_org.lower().replace('b', '', 1) - multiplier = BYTE_MULTIPLIERS.get(mult_key) - if multiplier is None: - msg = _('Unknown byte multiplier: %s') % mult_key_org - raise TypeError(msg) - return magnitude * multiplier + msg = _('Invalid string format: %s') % text + raise ValueError(msg) + if not unit_prefix: + res = magnitude + else: + res = magnitude * pow(base, UNIT_PREFIX_EXPONENT[unit_prefix]) + if return_int: + return int(math.ceil(res)) + return res def to_slug(value, incoming=None, errors="strict"): diff --git a/mistral/openstack/common/test.py b/mistral/openstack/common/test.py deleted file mode 100644 index 6b6a5f913..000000000 --- a/mistral/openstack/common/test.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Common utilities used in testing""" - -import os - -import fixtures -import testtools - -_TRUE_VALUES = ('True', 'true', '1', 'yes') - - -class BaseTestCase(testtools.TestCase): - - def setUp(self): - super(BaseTestCase, self).setUp() - self._set_timeout() - self._fake_output() - self.useFixture(fixtures.FakeLogger('mistral.openstack.common')) - self.useFixture(fixtures.NestedTempfile()) - self.useFixture(fixtures.TempHomeDir()) - - def _set_timeout(self): - test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0) - try: - test_timeout = int(test_timeout) - except ValueError: - # If timeout value is invalid do not set a timeout. - test_timeout = 0 - if test_timeout > 0: - self.useFixture(fixtures.Timeout(test_timeout, gentle=True)) - - def _fake_output(self): - if os.environ.get('OS_STDOUT_CAPTURE') in _TRUE_VALUES: - stdout = self.useFixture(fixtures.StringStream('stdout')).stream - self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout)) - if os.environ.get('OS_STDERR_CAPTURE') in _TRUE_VALUES: - stderr = self.useFixture(fixtures.StringStream('stderr')).stream - self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr)) diff --git a/mistral/openstack/common/threadgroup.py b/mistral/openstack/common/threadgroup.py index 8a2211e55..0480ac731 100644 --- a/mistral/openstack/common/threadgroup.py +++ b/mistral/openstack/common/threadgroup.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -13,10 +11,10 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +import threading import eventlet from eventlet import greenpool -from eventlet import greenthread from mistral.openstack.common import log as logging from mistral.openstack.common import loopingcall @@ -48,9 +46,12 @@ class Thread(object): def wait(self): return self.thread.wait() + def link(self, func, *args, **kwargs): + self.thread.link(func, *args, **kwargs) + class ThreadGroup(object): - """The point of the ThreadGroup classis to: + """The point of the ThreadGroup class is to: * keep track of timers and greenthreads (making it easier to stop them when need be). @@ -79,13 +80,17 @@ class ThreadGroup(object): gt = self.pool.spawn(callback, *args, **kwargs) th = Thread(gt, self) self.threads.append(th) + return th def thread_done(self, thread): self.threads.remove(thread) - def stop(self): - current = greenthread.getcurrent() - for x in self.threads: + def _stop_threads(self): + current = threading.current_thread() + + # Iterate over a copy of self.threads so thread_done doesn't + # modify the list while we're iterating + for x in self.threads[:]: if x is current: # don't kill the current thread. continue @@ -94,6 +99,7 @@ class ThreadGroup(object): except Exception as ex: LOG.exception(ex) + def stop_timers(self): for x in self.timers: try: x.stop() @@ -101,6 +107,23 @@ class ThreadGroup(object): LOG.exception(ex) self.timers = [] + def stop(self, graceful=False): + """stop function has the option of graceful=True/False. + + * In case of graceful=True, wait for all threads to be finished. + Never kill threads. + * In case of graceful=False, kill threads immediately. + """ + self.stop_timers() + if graceful: + # In case of graceful=True, wait for all threads to be + # finished, never kill threads + self.wait() + else: + # In case of graceful=False(Default), kill threads + # immediately + self._stop_threads() + def wait(self): for x in self.timers: try: @@ -109,8 +132,11 @@ class ThreadGroup(object): pass except Exception as ex: LOG.exception(ex) - current = greenthread.getcurrent() - for x in self.threads: + current = threading.current_thread() + + # Iterate over a copy of self.threads so thread_done doesn't + # modify the list while we're iterating + for x in self.threads[:]: if x is current: continue try: diff --git a/mistral/openstack/common/timeutils.py b/mistral/openstack/common/timeutils.py index c8b0b1539..52688a026 100644 --- a/mistral/openstack/common/timeutils.py +++ b/mistral/openstack/common/timeutils.py @@ -77,6 +77,9 @@ def is_older_than(before, seconds): """Return True if before is older than seconds.""" if isinstance(before, six.string_types): before = parse_strtime(before).replace(tzinfo=None) + else: + before = before.replace(tzinfo=None) + return utcnow() - before > datetime.timedelta(seconds=seconds) @@ -84,6 +87,9 @@ def is_newer_than(after, seconds): """Return True if after is newer than seconds.""" if isinstance(after, six.string_types): after = parse_strtime(after).replace(tzinfo=None) + else: + after = after.replace(tzinfo=None) + return after - utcnow() > datetime.timedelta(seconds=seconds) @@ -108,7 +114,7 @@ def utcnow(): def iso8601_from_timestamp(timestamp): - """Returns a iso8601 formated date from timestamp.""" + """Returns a iso8601 formatted date from timestamp.""" return isotime(datetime.datetime.utcfromtimestamp(timestamp)) @@ -195,8 +201,8 @@ def total_seconds(delta): def is_soon(dt, window): """Determines if time is going to happen in the next window seconds. - :params dt: the time - :params window: minimum seconds to remain to consider the time not soon + :param dt: the time + :param window: minimum seconds to remain to consider the time not soon :return: True if expiration is within the given duration """ diff --git a/mistral/openstack/common/py3kcompat/__init__.py b/mistral/openstack/common/uuidutils.py similarity index 56% rename from mistral/openstack/common/py3kcompat/__init__.py rename to mistral/openstack/common/uuidutils.py index 97ae4e34a..234b880c9 100644 --- a/mistral/openstack/common/py3kcompat/__init__.py +++ b/mistral/openstack/common/uuidutils.py @@ -1,5 +1,4 @@ -# -# Copyright 2013 Canonical Ltd. +# Copyright (c) 2012 Intel Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -13,4 +12,26 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -# + +""" +UUID related utilities and helper functions. +""" + +import uuid + + +def generate_uuid(): + return str(uuid.uuid4()) + + +def is_uuid_like(val): + """Returns validation of a value as a UUID. + + For our purposes, a UUID is a canonical form string: + aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa + + """ + try: + return str(uuid.UUID(val)) == val + except (TypeError, ValueError, AttributeError): + return False diff --git a/mistral/tests/base.py b/mistral/tests/base.py index d9f775e90..bbea7a0f2 100644 --- a/mistral/tests/base.py +++ b/mistral/tests/base.py @@ -32,7 +32,6 @@ importutils.import_module("mistral.config") from mistral.db.sqlalchemy import api as db_api from mistral.openstack.common import log as logging -from mistral.openstack.common.db.sqlalchemy import session from mistral import version from mistral import engine from mistral.engine import executor @@ -106,7 +105,8 @@ class BaseTest(unittest2.TestCase): class DbTestCase(BaseTest): def setUp(self): self.db_fd, self.db_path = tempfile.mkstemp() - session.set_defaults('sqlite:///' + self.db_path, self.db_path) + cfg.CONF.set_default('connection', 'sqlite:///' + self.db_path, + group='database') db_api.setup_db() def tearDown(self): diff --git a/openstack-common.conf b/openstack-common.conf index 321680c6e..c3ec61374 100644 --- a/openstack-common.conf +++ b/openstack-common.conf @@ -1,9 +1,7 @@ [DEFAULT] # The list of modules to copy from oslo-incubator.git -module=wsgi module=config -module=exception module=cliutils module=db module=db.sqlalchemy diff --git a/requirements.txt b/requirements.txt index 4e56e940d..be476dff4 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,6 +10,7 @@ requests>=1.1 kombu>=2.4.8 oslo.config>=1.2.0 oslo.messaging>=1.3.0 +oslotest paramiko>=1.9.0 python-keystoneclient>=0.7.0 networkx>=1.8 diff --git a/tools/config/check_uptodate.sh b/tools/config/check_uptodate.sh new file mode 100755 index 000000000..d34caae2c --- /dev/null +++ b/tools/config/check_uptodate.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash + +PROJECT_NAME=${PROJECT_NAME:-mistral} +CFGFILE_NAME=${PROJECT_NAME}.conf.sample + +if [ -e etc/${PROJECT_NAME}/${CFGFILE_NAME} ]; then + CFGFILE=etc/${PROJECT_NAME}/${CFGFILE_NAME} +elif [ -e etc/${CFGFILE_NAME} ]; then + CFGFILE=etc/${CFGFILE_NAME} +else + echo "${0##*/}: can not find config file" + exit 1 +fi + +TEMPDIR=`mktemp -d /tmp/${PROJECT_NAME}.XXXXXX` +trap "rm -rf $TEMPDIR" EXIT + +tools/config/generate_sample.sh -b ./ -p ${PROJECT_NAME} -o ${TEMPDIR} + +if ! diff -u ${TEMPDIR}/${CFGFILE_NAME} ${CFGFILE} +then + echo "${0##*/}: ${PROJECT_NAME}.conf.sample is not up to date." + echo "${0##*/}: Please run ${0%%${0##*/}}generate_sample.sh." + exit 1 +fi diff --git a/tools/config/generate_sample.sh b/tools/config/generate_sample.sh index 29fb346b1..7d2389d38 100755 --- a/tools/config/generate_sample.sh +++ b/tools/config/generate_sample.sh @@ -1,11 +1,21 @@ #!/usr/bin/env bash +# Generate sample configuration for your project. +# +# Aside from the command line flags, it also respects a config file which +# should be named oslo.config.generator.rc and be placed in the same directory. +# +# You can then export the following variables: +# MISTRAL_CONFIG_GENERATOR_EXTRA_MODULES: list of modules to interrogate for options. +# MISTRAL_CONFIG_GENERATOR_EXTRA_LIBRARIES: list of libraries to discover. +# MISTRAL_CONFIG_GENERATOR_EXCLUDED_FILES: list of files to remove from automatic listing. + print_hint() { echo "Try \`${0##*/} --help' for more information." >&2 } -PARSED_OPTIONS=$(getopt -n "${0##*/}" -o hb:p:o: \ - --long help,base-dir:,package-name:,output-dir: -- "$@") +PARSED_OPTIONS=$(getopt -n "${0##*/}" -o hb:p:m:l:o: \ + --long help,base-dir:,package-name:,output-dir:,module:,library: -- "$@") if [ $? != 0 ] ; then print_hint ; exit 1 ; fi @@ -21,6 +31,8 @@ while true; do echo "-b, --base-dir=DIR project base directory" echo "-p, --package-name=NAME project package name" echo "-o, --output-dir=DIR file output directory" + echo "-m, --module=MOD extra python module to interrogate for options" + echo "-l, --library=LIB extra library that registers options for discovery" exit 0 ;; -b|--base-dir) @@ -38,6 +50,16 @@ while true; do OUTPUTDIR=`echo $1 | sed -e 's/\/*$//g'` shift ;; + -m|--module) + shift + MODULES="$MODULES -m $1" + shift + ;; + -l|--library) + shift + LIBRARIES="$LIBRARIES -l $1" + shift + ;; --) break ;; @@ -53,7 +75,7 @@ then BASEDIR=$(cd "$BASEDIR" && pwd) fi -PACKAGENAME=${PACKAGENAME:-${BASEDIR##*/}} +PACKAGENAME=${PACKAGENAME:-$(python setup.py --name)} TARGETDIR=$BASEDIR/$PACKAGENAME if ! [ -d $TARGETDIR ] then @@ -77,12 +99,24 @@ find $TARGETDIR -type f -name "*.pyc" -delete FILES=$(find $TARGETDIR -type f -name "*.py" ! -path "*/tests/*" \ -exec grep -l "Opt(" {} + | sed -e "s/^$BASEDIRESC\///g" | sort -u) -EXTRA_MODULES_FILE="`dirname $0`/oslo.config.generator.rc" -if test -r "$EXTRA_MODULES_FILE" +RC_FILE="`dirname $0`/oslo.config.generator.rc" +if test -r "$RC_FILE" then - source "$EXTRA_MODULES_FILE" + source "$RC_FILE" fi +for filename in ${MISTRAL_CONFIG_GENERATOR_EXCLUDED_FILES}; do + FILES="${FILES[@]/$filename/}" +done + +for mod in ${MISTRAL_CONFIG_GENERATOR_EXTRA_MODULES}; do + MODULES="$MODULES -m $mod" +done + +for lib in ${MISTRAL_CONFIG_GENERATOR_EXTRA_LIBRARIES}; do + LIBRARIES="$LIBRARIES -l $lib" +done + export EVENTLET_NO_GREENDNS=yes OS_VARS=$(set | sed -n '/^OS_/s/=[^=]*$//gp' | xargs) @@ -90,7 +124,7 @@ OS_VARS=$(set | sed -n '/^OS_/s/=[^=]*$//gp' | xargs) DEFAULT_MODULEPATH=mistral.openstack.common.config.generator MODULEPATH=${MODULEPATH:-$DEFAULT_MODULEPATH} OUTPUTFILE=$OUTPUTDIR/$PACKAGENAME.conf.sample -python -m $MODULEPATH $FILES > $OUTPUTFILE +python -m $MODULEPATH $MODULES $LIBRARIES $FILES > $OUTPUTFILE # Hook to allow projects to append custom config file snippets CONCAT_FILES=$(ls $BASEDIR/tools/config/*.conf.sample 2>/dev/null) diff --git a/tools/install_venv b/tools/install_venv deleted file mode 100755 index 6a4aea2a5..000000000 --- a/tools/install_venv +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/sh - -tox -vvv -evenv -- python --version diff --git a/tools/install_venv.py b/tools/install_venv.py deleted file mode 100644 index 0011a8be1..000000000 --- a/tools/install_venv.py +++ /dev/null @@ -1,77 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Copyright 2010 OpenStack Foundation -# Copyright 2013 IBM Corp. -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ConfigParser -import os -import sys - -import install_venv_common as install_venv # flake8: noqa - - -def print_help(project, venv, root): - help = """ - %(project)s development environment setup is complete. - - %(project)s development uses virtualenv to track and manage Python - dependencies while in development and testing. - - To activate the %(project)s virtualenv for the extent of your current - shell session you can run: - - $ source %(venv)s/bin/activate - - Or, if you prefer, you can run commands in the virtualenv on a case by - case basis by running: - - $ %(root)s/tools/with_venv.sh - """ - print help % dict(project=project, venv=venv, root=root) - - -def main(argv): - root = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) - - if os.environ.get('tools_path'): - root = os.environ['tools_path'] - venv = os.path.join(root, '.venv') - if os.environ.get('venv'): - venv = os.environ['venv'] - - pip_requires = os.path.join(root, 'requirements.txt') - test_requires = os.path.join(root, 'test-requirements.txt') - py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1]) - setup_cfg = ConfigParser.ConfigParser() - setup_cfg.read('setup.cfg') - project = setup_cfg.get('metadata', 'name') - - install = install_venv.InstallVenv( - root, venv, pip_requires, test_requires, py_version, project) - options = install.parse_args(argv) - install.check_python_version() - install.check_dependencies() - install.create_virtualenv(no_site_packages=options.no_site_packages) - install.install_dependencies() - install.post_process() - print_help(project, venv, root) - -if __name__ == '__main__': - main(sys.argv) diff --git a/tools/install_venv_common.py b/tools/install_venv_common.py deleted file mode 100644 index f428c1e02..000000000 --- a/tools/install_venv_common.py +++ /dev/null @@ -1,212 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 OpenStack Foundation -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Provides methods needed by installation script for OpenStack development -virtual environments. - -Since this script is used to bootstrap a virtualenv from the system's Python -environment, it should be kept strictly compatible with Python 2.6. - -Synced in from openstack-common -""" - -from __future__ import print_function - -import optparse -import os -import subprocess -import sys - - -class InstallVenv(object): - - def __init__(self, root, venv, requirements, - test_requirements, py_version, - project): - self.root = root - self.venv = venv - self.requirements = requirements - self.test_requirements = test_requirements - self.py_version = py_version - self.project = project - - def die(self, message, *args): - print(message % args, file=sys.stderr) - sys.exit(1) - - def check_python_version(self): - if sys.version_info < (2, 6): - self.die("Need Python Version >= 2.6") - - def run_command_with_code(self, cmd, redirect_output=True, - check_exit_code=True): - """Runs a command in an out-of-process shell. - - Returns the output of that command. Working directory is self.root. - """ - if redirect_output: - stdout = subprocess.PIPE - else: - stdout = None - - proc = subprocess.Popen(cmd, cwd=self.root, stdout=stdout) - output = proc.communicate()[0] - if check_exit_code and proc.returncode != 0: - self.die('Command "%s" failed.\n%s', ' '.join(cmd), output) - return (output, proc.returncode) - - def run_command(self, cmd, redirect_output=True, check_exit_code=True): - return self.run_command_with_code(cmd, redirect_output, - check_exit_code)[0] - - def get_distro(self): - if (os.path.exists('/etc/fedora-release') or - os.path.exists('/etc/redhat-release')): - return Fedora( - self.root, self.venv, self.requirements, - self.test_requirements, self.py_version, self.project) - else: - return Distro( - self.root, self.venv, self.requirements, - self.test_requirements, self.py_version, self.project) - - def check_dependencies(self): - self.get_distro().install_virtualenv() - - def create_virtualenv(self, no_site_packages=True): - """Creates the virtual environment and installs PIP. - - Creates the virtual environment and installs PIP only into the - virtual environment. - """ - if not os.path.isdir(self.venv): - print('Creating venv...', end=' ') - if no_site_packages: - self.run_command(['virtualenv', '-q', '--no-site-packages', - self.venv]) - else: - self.run_command(['virtualenv', '-q', self.venv]) - print('done.') - else: - print("venv already exists...") - pass - - def pip_install(self, *args): - self.run_command(['tools/with_venv.sh', - 'pip', 'install', '--upgrade'] + list(args), - redirect_output=False) - - def install_dependencies(self): - print('Installing dependencies with pip (this can take a while)...') - - # First things first, make sure our venv has the latest pip and - # setuptools. - self.pip_install('pip>=1.3') - self.pip_install('setuptools') - - self.pip_install('-r', self.requirements) - self.pip_install('-r', self.test_requirements) - - def post_process(self): - self.get_distro().post_process() - - def parse_args(self, argv): - """Parses command-line arguments.""" - parser = optparse.OptionParser() - parser.add_option('-n', '--no-site-packages', - action='store_true', - help="Do not inherit packages from global Python " - "install") - return parser.parse_args(argv[1:])[0] - - -class Distro(InstallVenv): - - def check_cmd(self, cmd): - return bool(self.run_command(['which', cmd], - check_exit_code=False).strip()) - - def install_virtualenv(self): - if self.check_cmd('virtualenv'): - return - - if self.check_cmd('easy_install'): - print('Installing virtualenv via easy_install...', end=' ') - if self.run_command(['easy_install', 'virtualenv']): - print('Succeeded') - return - else: - print('Failed') - - self.die('ERROR: virtualenv not found.\n\n%s development' - ' requires virtualenv, please install it using your' - ' favorite package management tool' % self.project) - - def post_process(self): - """Any distribution-specific post-processing gets done here. - - In particular, this is useful for applying patches to code inside - the venv. - """ - pass - - -class Fedora(Distro): - """This covers all Fedora-based distributions. - - Includes: Fedora, RHEL, CentOS, Scientific Linux - """ - - def check_pkg(self, pkg): - return self.run_command_with_code(['rpm', '-q', pkg], - check_exit_code=False)[1] == 0 - - def apply_patch(self, originalfile, patchfile): - self.run_command(['patch', '-N', originalfile, patchfile], - check_exit_code=False) - - def install_virtualenv(self): - if self.check_cmd('virtualenv'): - return - - if not self.check_pkg('python-virtualenv'): - self.die("Please install 'python-virtualenv'.") - - super(Fedora, self).install_virtualenv() - - def post_process(self): - """Workaround for a bug in eventlet. - - This currently affects RHEL6.1, but the fix can safely be - applied to all RHEL and Fedora distributions. - - This can be removed when the fix is applied upstream. - - Nova: https://bugs.launchpad.net/nova/+bug/884915 - Upstream: https://bitbucket.org/eventlet/eventlet/issue/89 - RHEL: https://bugzilla.redhat.com/958868 - """ - - # Install "patch" program if it's not there - if not self.check_pkg('patch'): - self.die("Please install 'patch'.") - - # Apply the eventlet patch - self.apply_patch(os.path.join(self.venv, 'lib', self.py_version, - 'site-packages', - 'eventlet/green/subprocess.py'), - 'contrib/redhat-eventlet.patch') diff --git a/tools/lintstack.py b/tools/lintstack.py deleted file mode 100755 index dbf9e8d3c..000000000 --- a/tools/lintstack.py +++ /dev/null @@ -1,200 +0,0 @@ -#!/usr/bin/env python -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2012, AT&T Labs, Yun Mao -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# Stolen from OpenStack Nova - -"""pylint error checking.""" - -import cStringIO as StringIO -import json -import re -import sys - -from pylint import lint -from pylint.reporters import text - -# Note(maoy): E1103 is error code related to partial type inference -ignore_codes = ["E1103"] -# Note(maoy): the error message is the pattern of E0202. It should be ignored -# for mistral.tests modules -ignore_messages = ["An attribute affected in mistral.tests"] -# We ignore all errors in openstack.common because it should be checked -# elsewhere. -ignore_modules = ["mistral/openstack/common/"] - -KNOWN_PYLINT_EXCEPTIONS_FILE = "tools/pylint_exceptions" - - -class LintOutput(object): - - _cached_filename = None - _cached_content = None - - def __init__(self, filename, lineno, line_content, code, message, - lintoutput): - self.filename = filename - self.lineno = lineno - self.line_content = line_content - self.code = code - self.message = message - self.lintoutput = lintoutput - - @classmethod - def from_line(cls, line): - m = re.search(r"(\S+):(\d+): \[(\S+)(, \S+)?] (.*)", line) - matched = m.groups() - filename, lineno, code, message = (matched[0], int(matched[1]), - matched[2], matched[-1]) - if cls._cached_filename != filename: - with open(filename) as f: - cls._cached_content = list(f.readlines()) - cls._cached_filename = filename - line_content = cls._cached_content[lineno - 1].rstrip() - return cls(filename, lineno, line_content, code, message, - line.rstrip()) - - @classmethod - def from_msg_to_dict(cls, msg): - """From the output of pylint msg, to a dict, where each key - is a unique error identifier, value is a list of LintOutput - """ - result = {} - for line in msg.splitlines(): - obj = cls.from_line(line) - if obj.is_ignored(): - continue - key = obj.key() - if key not in result: - result[key] = [] - result[key].append(obj) - return result - - def is_ignored(self): - if self.code in ignore_codes: - return True - if any(self.filename.startswith(name) for name in ignore_modules): - return True - if any(msg in self.message for msg in ignore_messages): - return True - return False - - def key(self): - if self.code in ["E1101", "E1103"]: - # These two types of errors are like Foo class has no member bar. - # We discard the source code so that the error will be ignored - # next time another Foo.bar is encountered. - return self.message, "" - return self.message, self.line_content.strip() - - def json(self): - return json.dumps(self.__dict__) - - def review_str(self): - return ("File %(filename)s\nLine %(lineno)d:%(line_content)s\n" - "%(code)s: %(message)s" % self.__dict__) - - -class ErrorKeys(object): - - @classmethod - def print_json(cls, errors, output=sys.stdout): - print >>output, "# automatically generated by tools/lintstack.py" - for i in sorted(errors.keys()): - print >>output, json.dumps(i) - - @classmethod - def from_file(cls, filename): - keys = set() - for line in open(filename): - if line and line[0] != "#": - d = json.loads(line) - keys.add(tuple(d)) - return keys - - -def run_pylint(): - buff = StringIO.StringIO() - reporter = text.ParseableTextReporter(output=buff) - args = ["--include-ids=y", "-E", "mistral"] - lint.Run(args, reporter=reporter, exit=False) - val = buff.getvalue() - buff.close() - return val - - -def generate_error_keys(msg=None): - print "Generating", KNOWN_PYLINT_EXCEPTIONS_FILE - if msg is None: - msg = run_pylint() - errors = LintOutput.from_msg_to_dict(msg) - with open(KNOWN_PYLINT_EXCEPTIONS_FILE, "w") as f: - ErrorKeys.print_json(errors, output=f) - - -def validate(newmsg=None): - print "Loading", KNOWN_PYLINT_EXCEPTIONS_FILE - known = ErrorKeys.from_file(KNOWN_PYLINT_EXCEPTIONS_FILE) - if newmsg is None: - print "Running pylint. Be patient..." - newmsg = run_pylint() - errors = LintOutput.from_msg_to_dict(newmsg) - - print "Unique errors reported by pylint: was %d, now %d." \ - % (len(known), len(errors)) - passed = True - for err_key, err_list in errors.items(): - for err in err_list: - if err_key not in known: - print err.lintoutput - print - passed = False - if passed: - print "Congrats! pylint check passed." - redundant = known - set(errors.keys()) - if redundant: - print "Extra credit: some known pylint exceptions disappeared." - for i in sorted(redundant): - print json.dumps(i) - print "Consider regenerating the exception file if you will." - else: - print ("Please fix the errors above. If you believe they are false" - " positives, run 'tools/lintstack.py generate' to overwrite.") - sys.exit(1) - - -def usage(): - print """Usage: tools/lintstack.py [generate|validate] - To generate pylint_exceptions file: tools/lintstack.py generate - To validate the current commit: tools/lintstack.py - """ - - -def main(): - option = "validate" - if len(sys.argv) > 1: - option = sys.argv[1] - if option == "generate": - generate_error_keys() - elif option == "validate": - validate() - else: - usage() - - -if __name__ == "__main__": - main() diff --git a/tools/lintstack.sh b/tools/lintstack.sh deleted file mode 100755 index f2464b060..000000000 --- a/tools/lintstack.sh +++ /dev/null @@ -1,61 +0,0 @@ -#!/usr/bin/env bash - -# Copyright (c) 2012-2013, AT&T Labs, Yun Mao -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# Stolen from OpenStack Nova - -# Use lintstack.py to compare pylint errors. -# We run pylint twice, once on HEAD, once on the code before the latest -# commit for review. -set -e -TOOLS_DIR=$(cd $(dirname "$0") && pwd) -# Get the current branch name. -GITHEAD=`git rev-parse --abbrev-ref HEAD` -if [[ "$GITHEAD" == "HEAD" ]]; then - # In detached head mode, get revision number instead - GITHEAD=`git rev-parse HEAD` - echo "Currently we are at commit $GITHEAD" -else - echo "Currently we are at branch $GITHEAD" -fi - -cp -f $TOOLS_DIR/lintstack.py $TOOLS_DIR/lintstack.head.py - -if git rev-parse HEAD^2 2>/dev/null; then - # The HEAD is a Merge commit. Here, the patch to review is - # HEAD^2, the master branch is at HEAD^1, and the patch was - # written based on HEAD^2~1. - PREV_COMMIT=`git rev-parse HEAD^2~1` - git checkout HEAD~1 - # The git merge is necessary for reviews with a series of patches. - # If not, this is a no-op so won't hurt either. - git merge $PREV_COMMIT -else - # The HEAD is not a merge commit. This won't happen on gerrit. - # Most likely you are running against your own patch locally. - # We assume the patch to examine is HEAD, and we compare it against - # HEAD~1 - git checkout HEAD~1 -fi - -# First generate tools/pylint_exceptions from HEAD~1 -$TOOLS_DIR/lintstack.head.py generate -# Then use that as a reference to compare against HEAD -git checkout $GITHEAD -$TOOLS_DIR/lintstack.head.py -echo "Check passed. FYI: the pylint exceptions are:" -cat $TOOLS_DIR/pylint_exceptions - diff --git a/tools/run_pep8 b/tools/run_pep8 deleted file mode 100755 index 256ec8350..000000000 --- a/tools/run_pep8 +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/sh - -tox -epep8 diff --git a/tools/run_pylint b/tools/run_pylint deleted file mode 100755 index c69b8d1f7..000000000 --- a/tools/run_pylint +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/sh - -tox -epylint diff --git a/tools/update_env_deps b/tools/update_env_deps deleted file mode 100755 index 5551dafe7..000000000 --- a/tools/update_env_deps +++ /dev/null @@ -1,16 +0,0 @@ -TOX_ENVLIST=`grep envlist tox.ini | cut -d '=' -f 2 | tr ',' ' '` -TESTENVS=`grep testenv tox.ini | awk -F ':' '{print $2}' | tr '[]' ' '` -UNFILTERED_ENVLIST=`echo "$TOX_ENVLIST $TESTENVS"` -ENVLIST=$( awk 'BEGIN{RS=ORS=" "}!a[$0]++' <<<$UNFILTERED_ENVLIST ); -for env in $ENVLIST -do - ENV_PATH=.tox/$env - PIP_PATH=$ENV_PATH/bin/pip - echo -e "\nUpdate environment ${env}...\n" - if [ ! -d $ENV_PATH -o ! -f $PIP_PATH ] - then - tox --notest -e$env - else - $PIP_PATH install -r requirements.txt -r test-requirements.txt - fi -done diff --git a/tools/with_venv.sh b/tools/with_venv.sh deleted file mode 100755 index c8d2940fc..000000000 --- a/tools/with_venv.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash -TOOLS=`dirname $0` -VENV=$TOOLS/../.venv -source $VENV/bin/activate && $@