diff --git a/openstack-common.conf b/openstack-common.conf index de92db4a..e9ad3e7d 100644 --- a/openstack-common.conf +++ b/openstack-common.conf @@ -3,6 +3,7 @@ # The list of modules to copy from oslo-incubator.git module=db module=db.sqlalchemy +module=fixture module=processutils module=log diff --git a/storyboard/openstack/common/db/__init__.py b/storyboard/openstack/common/db/__init__.py index 1b9b60de..e69de29b 100644 --- a/storyboard/openstack/common/db/__init__.py +++ b/storyboard/openstack/common/db/__init__.py @@ -1,16 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2012 Cloudscaling Group, Inc -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/storyboard/openstack/common/db/api.py b/storyboard/openstack/common/db/api.py index ebe852d6..56aec57d 100644 --- a/storyboard/openstack/common/db/api.py +++ b/storyboard/openstack/common/db/api.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright (c) 2013 Rackspace Hosting # All Rights Reserved. # @@ -21,27 +19,15 @@ Supported configuration options: The following two parameters are in the 'database' group: `backend`: DB backend name or full module path to DB backend module. -`use_tpool`: Enable thread pooling of DB API calls. A DB backend module should implement a method named 'get_backend' which takes no arguments. The method can return any object that implements DB API methods. - -*NOTE*: There are bugs in eventlet when using tpool combined with -threading locks. The python logging module happens to use such locks. To -work around this issue, be sure to specify thread=False with -eventlet.monkey_patch(). - -A bug for eventlet has been filed here: - -https://bitbucket.org/eventlet/eventlet/issue/137/ """ -import functools from oslo.config import cfg from storyboard.openstack.common import importutils -from storyboard.openstack.common import lockutils db_opts = [ @@ -50,12 +36,6 @@ db_opts = [ deprecated_name='db_backend', deprecated_group='DEFAULT', help='The backend to use for db'), - cfg.BoolOpt('use_tpool', - default=False, - deprecated_name='dbapi_use_tpool', - deprecated_group='DEFAULT', - help='Enable the experimental use of thread pooling for ' - 'all DB API calls') ] CONF = cfg.CONF @@ -66,41 +46,12 @@ class DBAPI(object): def __init__(self, backend_mapping=None): if backend_mapping is None: backend_mapping = {} - self.__backend = None - self.__backend_mapping = backend_mapping - - @lockutils.synchronized('dbapi_backend', 'storyboard-') - def __get_backend(self): - """Get the actual backend. May be a module or an instance of - a class. Doesn't matter to us. We do this synchronized as it's - possible multiple greenthreads started very quickly trying to do - DB calls and eventlet can switch threads before self.__backend gets - assigned. - """ - if self.__backend: - # Another thread assigned it - return self.__backend backend_name = CONF.database.backend - self.__use_tpool = CONF.database.use_tpool - if self.__use_tpool: - from eventlet import tpool - self.__tpool = tpool # Import the untranslated name if we don't have a # mapping. - backend_path = self.__backend_mapping.get(backend_name, - backend_name) + backend_path = backend_mapping.get(backend_name, backend_name) backend_mod = importutils.import_module(backend_path) self.__backend = backend_mod.get_backend() - return self.__backend def __getattr__(self, key): - backend = self.__backend or self.__get_backend() - attr = getattr(backend, key) - if not self.__use_tpool or not hasattr(attr, '__call__'): - return attr - - def tpool_wrapper(*args, **kwargs): - return self.__tpool.execute(attr, *args, **kwargs) - - functools.update_wrapper(tpool_wrapper, attr) - return tpool_wrapper + return getattr(self.__backend, key) diff --git a/storyboard/openstack/common/db/exception.py b/storyboard/openstack/common/db/exception.py index e1950c24..7ec6d6cd 100644 --- a/storyboard/openstack/common/db/exception.py +++ b/storyboard/openstack/common/db/exception.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. @@ -18,7 +16,7 @@ """DB related custom exceptions.""" -from storyboard.openstack.common.gettextutils import _ # noqa +from storyboard.openstack.common.gettextutils import _ class DBError(Exception): @@ -49,3 +47,8 @@ class DbMigrationError(DBError): """Wraps migration specific exception.""" def __init__(self, message=None): super(DbMigrationError, self).__init__(str(message)) + + +class DBConnectionError(DBError): + """Wraps connection specific exception.""" + pass diff --git a/storyboard/openstack/common/db/sqlalchemy/__init__.py b/storyboard/openstack/common/db/sqlalchemy/__init__.py index 1b9b60de..e69de29b 100644 --- a/storyboard/openstack/common/db/sqlalchemy/__init__.py +++ b/storyboard/openstack/common/db/sqlalchemy/__init__.py @@ -1,16 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2012 Cloudscaling Group, Inc -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/storyboard/openstack/common/db/sqlalchemy/migration.py b/storyboard/openstack/common/db/sqlalchemy/migration.py index ee94c0fb..5282ecb8 100644 --- a/storyboard/openstack/common/db/sqlalchemy/migration.py +++ b/storyboard/openstack/common/db/sqlalchemy/migration.py @@ -36,53 +36,25 @@ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. -import distutils.version as dist_version import os import re -import migrate from migrate.changeset import ansisql from migrate.changeset.databases import sqlite -from migrate.versioning import util as migrate_util +from migrate import exceptions as versioning_exceptions +from migrate.versioning import api as versioning_api +from migrate.versioning.repository import Repository import sqlalchemy from sqlalchemy.schema import UniqueConstraint from storyboard.openstack.common.db import exception from storyboard.openstack.common.db.sqlalchemy import session as db_session -from storyboard.openstack.common.gettextutils import _ # noqa +from storyboard.openstack.common.gettextutils import _ -@migrate_util.decorator -def patched_with_engine(f, *a, **kw): - url = a[0] - engine = migrate_util.construct_engine(url, **kw) - - try: - kw['engine'] = engine - return f(*a, **kw) - finally: - if isinstance(engine, migrate_util.Engine) and engine is not url: - migrate_util.log.debug('Disposing SQLAlchemy engine %s', engine) - engine.dispose() - - -# TODO(jkoelker) When migrate 0.7.3 is released and nova depends -# on that version or higher, this can be removed -MIN_PKG_VERSION = dist_version.StrictVersion('0.7.3') -if (not hasattr(migrate, '__version__') or - dist_version.StrictVersion(migrate.__version__) < MIN_PKG_VERSION): - migrate_util.with_engine = patched_with_engine - - -# NOTE(jkoelker) Delay importing migrate until we are patched -from migrate import exceptions as versioning_exceptions -from migrate.versioning import api as versioning_api -from migrate.versioning.repository import Repository - -_REPOSITORY = None - get_engine = db_session.get_engine @@ -220,6 +192,7 @@ def db_sync(abs_path, version=None, init_version=0): current_version = db_version(abs_path, init_version) repository = _find_migrate_repo(abs_path) + _db_schema_sanity_check() if version is None or version > current_version: return versioning_api.upgrade(get_engine(), repository, version) else: @@ -227,6 +200,22 @@ def db_sync(abs_path, version=None, init_version=0): version) +def _db_schema_sanity_check(): + engine = get_engine() + if engine.name == 'mysql': + onlyutf8_sql = ('SELECT TABLE_NAME,TABLE_COLLATION ' + 'from information_schema.TABLES ' + 'where TABLE_SCHEMA=%s and ' + 'TABLE_COLLATION NOT LIKE "%%utf8%%"') + + table_names = [res[0] for res in engine.execute(onlyutf8_sql, + engine.url.database)] + if len(table_names) > 0: + raise ValueError(_('Tables "%s" have non utf8 collation, ' + 'please make sure all tables are CHARSET=utf8' + ) % ','.join(table_names)) + + def db_version(abs_path, init_version): """Show the current version of the repository. @@ -241,14 +230,15 @@ def db_version(abs_path, init_version): engine = get_engine() meta.reflect(bind=engine) tables = meta.tables - if len(tables) == 0: + if len(tables) == 0 or 'alembic_version' in tables: db_version_control(abs_path, init_version) return versioning_api.db_version(get_engine(), repository) else: - # Some pre-Essex DB's may not be version controlled. - # Require them to upgrade using Essex first. raise exception.DbMigrationError( - message=_("Upgrade DB using Essex release first.")) + message=_( + "The database is not under version control, but has " + "tables. Please stamp the current version of the schema " + "manually.")) def db_version_control(abs_path, version=None): @@ -270,9 +260,6 @@ def _find_migrate_repo(abs_path): :param abs_path: Absolute path to migrate repository """ - global _REPOSITORY if not os.path.exists(abs_path): raise exception.DbMigrationError("Path %s not found" % abs_path) - if _REPOSITORY is None: - _REPOSITORY = Repository(abs_path) - return _REPOSITORY + return Repository(abs_path) diff --git a/storyboard/openstack/common/db/sqlalchemy/models.py b/storyboard/openstack/common/db/sqlalchemy/models.py index 71dbb331..4be9a848 100644 --- a/storyboard/openstack/common/db/sqlalchemy/models.py +++ b/storyboard/openstack/common/db/sqlalchemy/models.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. @@ -41,13 +39,13 @@ class ModelBase(object): if not session: session = sa.get_session() # NOTE(boris-42): This part of code should be look like: - # sesssion.add(self) + # session.add(self) # session.flush() # But there is a bug in sqlalchemy and eventlet that # raises NoneType exception if there is no running # transaction and rollback is called. As long as # sqlalchemy has this bug we have to create transaction - # explicity. + # explicitly. with session.begin(subtransactions=True): session.add(self) session.flush() @@ -61,7 +59,16 @@ class ModelBase(object): def get(self, key, default=None): return getattr(self, key, default) - def _get_extra_keys(self): + @property + def _extra_keys(self): + """Specifies custom fields + + Subclasses can override this property to return a list + of custom fields that should be included in their dict + representation. + + For reference check tests/db/sqlalchemy/test_models.py + """ return [] def __iter__(self): @@ -69,7 +76,7 @@ class ModelBase(object): # NOTE(russellb): Allow models to specify other keys that can be looked # up, beyond the actual db columns. An example would be the 'name' # property for an Instance. - columns.extend(self._get_extra_keys()) + columns.extend(self._extra_keys) self._i = iter(columns) return self @@ -91,12 +98,12 @@ class ModelBase(object): joined = dict([(k, v) for k, v in six.iteritems(self.__dict__) if not k[0] == '_']) local.update(joined) - return local.iteritems() + return six.iteritems(local) class TimestampMixin(object): - created_at = Column(DateTime, default=timeutils.utcnow) - updated_at = Column(DateTime, onupdate=timeutils.utcnow) + created_at = Column(DateTime, default=lambda: timeutils.utcnow()) + updated_at = Column(DateTime, onupdate=lambda: timeutils.utcnow()) class SoftDeleteMixin(object): diff --git a/storyboard/openstack/common/db/sqlalchemy/provision.py b/storyboard/openstack/common/db/sqlalchemy/provision.py new file mode 100644 index 00000000..f10eef28 --- /dev/null +++ b/storyboard/openstack/common/db/sqlalchemy/provision.py @@ -0,0 +1,187 @@ +# Copyright 2013 Mirantis.inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Provision test environment for specific DB backends""" + +import argparse +import os +import random +import string + +from six import moves +import sqlalchemy + +from storyboard.openstack.common.db import exception as exc + + +SQL_CONNECTION = os.getenv('OS_TEST_DBAPI_ADMIN_CONNECTION', 'sqlite://') + + +def _gen_credentials(*names): + """Generate credentials.""" + auth_dict = {} + for name in names: + val = ''.join(random.choice(string.ascii_lowercase) + for i in moves.range(10)) + auth_dict[name] = val + return auth_dict + + +def _get_engine(uri=SQL_CONNECTION): + """Engine creation + + By default the uri is SQL_CONNECTION which is admin credentials. + Call the function without arguments to get admin connection. Admin + connection required to create temporary user and database for each + particular test. Otherwise use existing connection to recreate connection + to the temporary database. + """ + return sqlalchemy.create_engine(uri, poolclass=sqlalchemy.pool.NullPool) + + +def _execute_sql(engine, sql, driver): + """Initialize connection, execute sql query and close it.""" + try: + with engine.connect() as conn: + if driver == 'postgresql': + conn.connection.set_isolation_level(0) + for s in sql: + conn.execute(s) + except sqlalchemy.exc.OperationalError: + msg = ('%s does not match database admin ' + 'credentials or database does not exist.') + raise exc.DBConnectionError(msg % SQL_CONNECTION) + + +def create_database(engine): + """Provide temporary user and database for each particular test.""" + driver = engine.name + + auth = _gen_credentials('database', 'user', 'passwd') + + sqls = { + 'mysql': [ + "drop database if exists %(database)s;", + "grant all on %(database)s.* to '%(user)s'@'localhost'" + " identified by '%(passwd)s';", + "create database %(database)s;", + ], + 'postgresql': [ + "drop database if exists %(database)s;", + "drop user if exists %(user)s;", + "create user %(user)s with password '%(passwd)s';", + "create database %(database)s owner %(user)s;", + ] + } + + if driver == 'sqlite': + return 'sqlite:////tmp/%s' % auth['database'] + + try: + sql_rows = sqls[driver] + except KeyError: + raise ValueError('Unsupported RDBMS %s' % driver) + sql_query = map(lambda x: x % auth, sql_rows) + + _execute_sql(engine, sql_query, driver) + + params = auth.copy() + params['backend'] = driver + return "%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s" % params + + +def drop_database(engine, current_uri): + """Drop temporary database and user after each particular test.""" + engine = _get_engine(current_uri) + admin_engine = _get_engine() + driver = engine.name + auth = {'database': engine.url.database, 'user': engine.url.username} + + if driver == 'sqlite': + try: + os.remove(auth['database']) + except OSError: + pass + return + + sqls = { + 'mysql': [ + "drop database if exists %(database)s;", + "drop user '%(user)s'@'localhost';", + ], + 'postgresql': [ + "drop database if exists %(database)s;", + "drop user if exists %(user)s;", + ] + } + + try: + sql_rows = sqls[driver] + except KeyError: + raise ValueError('Unsupported RDBMS %s' % driver) + sql_query = map(lambda x: x % auth, sql_rows) + + _execute_sql(admin_engine, sql_query, driver) + + +def main(): + """Controller to handle commands + + ::create: Create test user and database with random names. + ::drop: Drop user and database created by previous command. + """ + parser = argparse.ArgumentParser( + description='Controller to handle database creation and dropping' + ' commands.', + epilog='Under normal circumstances is not used directly.' + ' Used in .testr.conf to automate test database creation' + ' and dropping processes.') + subparsers = parser.add_subparsers( + help='Subcommands to manipulate temporary test databases.') + + create = subparsers.add_parser( + 'create', + help='Create temporary test ' + 'databases and users.') + create.set_defaults(which='create') + create.add_argument( + 'instances_count', + type=int, + help='Number of databases to create.') + + drop = subparsers.add_parser( + 'drop', + help='Drop temporary test databases and users.') + drop.set_defaults(which='drop') + drop.add_argument( + 'instances', + nargs='+', + help='List of databases uri to be dropped.') + + args = parser.parse_args() + + engine = _get_engine() + which = args.which + + if which == "create": + for i in range(int(args.instances_count)): + print(create_database(engine)) + elif which == "drop": + for db in args.instances: + drop_database(engine, db) + + +if __name__ == "__main__": + main() diff --git a/storyboard/openstack/common/db/sqlalchemy/session.py b/storyboard/openstack/common/db/sqlalchemy/session.py index 1e026bdc..d99af4d3 100644 --- a/storyboard/openstack/common/db/sqlalchemy/session.py +++ b/storyboard/openstack/common/db/sqlalchemy/session.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. @@ -23,7 +21,7 @@ Initializing: * Call set_defaults with the minimal of the following kwargs: sql_connection, sqlite_db - Example: + Example:: session.set_defaults( sql_connection="sqlite:///var/lib/storyboard/sqlite.db", @@ -44,17 +42,17 @@ Recommended ways to use sessions within this framework: functionality should be handled at a logical level. For an example, look at the code around quotas and reservation_rollback(). - Examples: + Examples:: def get_foo(context, foo): - return model_query(context, models.Foo).\ - filter_by(foo=foo).\ - first() + return (model_query(context, models.Foo). + filter_by(foo=foo). + first()) def update_foo(context, id, newfoo): - model_query(context, models.Foo).\ - filter_by(id=id).\ - update({'foo': newfoo}) + (model_query(context, models.Foo). + filter_by(id=id). + update({'foo': newfoo})) def create_foo(context, values): foo_ref = models.Foo() @@ -68,14 +66,21 @@ Recommended ways to use sessions within this framework: handler will take care of calling flush() and commit() for you. If using this approach, you should not explicitly call flush() or commit(). Any error within the context of the session will cause the session to emit - a ROLLBACK. If the connection is dropped before this is possible, the - database will implicitly rollback the transaction. + a ROLLBACK. Database Errors like IntegrityError will be raised in + session's __exit__ handler, and any try/except within the context managed + by session will not be triggered. And catching other non-database errors in + the session will not trigger the ROLLBACK, so exception handlers should + always be outside the session, unless the developer wants to do a partial + commit on purpose. If the connection is dropped before this is possible, + the database will implicitly roll back the transaction. Note: statements in the session scope will not be automatically retried. If you create models within the session, they need to be added, but you do not need to call model.save() + :: + def create_many_foo(context, foos): session = get_session() with session.begin(): @@ -87,33 +92,50 @@ Recommended ways to use sessions within this framework: def update_bar(context, foo_id, newbar): session = get_session() with session.begin(): - foo_ref = model_query(context, models.Foo, session).\ - filter_by(id=foo_id).\ - first() - model_query(context, models.Bar, session).\ - filter_by(id=foo_ref['bar_id']).\ - update({'bar': newbar}) + foo_ref = (model_query(context, models.Foo, session). + filter_by(id=foo_id). + first()) + (model_query(context, models.Bar, session). + filter_by(id=foo_ref['bar_id']). + update({'bar': newbar})) Note: update_bar is a trivially simple example of using "with session.begin". Whereas create_many_foo is a good example of when a transaction is needed, it is always best to use as few queries as possible. The two queries in update_bar can be better expressed using a single query which avoids - the need for an explicit transaction. It can be expressed like so: + the need for an explicit transaction. It can be expressed like so:: def update_bar(context, foo_id, newbar): - subq = model_query(context, models.Foo.id).\ - filter_by(id=foo_id).\ - limit(1).\ - subquery() - model_query(context, models.Bar).\ - filter_by(id=subq.as_scalar()).\ - update({'bar': newbar}) + subq = (model_query(context, models.Foo.id). + filter_by(id=foo_id). + limit(1). + subquery()) + (model_query(context, models.Bar). + filter_by(id=subq.as_scalar()). + update({'bar': newbar})) - For reference, this emits approximagely the following SQL statement: + For reference, this emits approximately the following SQL statement:: UPDATE bar SET bar = ${newbar} WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1); + Note: create_duplicate_foo is a trivially simple example of catching an + exception while using "with session.begin". Here create two duplicate + instances with same primary key, must catch the exception out of context + managed by a single session: + + def create_duplicate_foo(context): + foo1 = models.Foo() + foo2 = models.Foo() + foo1.id = foo2.id = 1 + session = get_session() + try: + with session.begin(): + session.add(foo1) + session.add(foo2) + except exception.DBDuplicateEntry as e: + handle_error(e) + * Passing an active session between methods. Sessions should only be passed to private methods. The private method must use a subtransaction; otherwise SQLAlchemy will throw an error when you call session.begin() on an existing @@ -129,6 +151,8 @@ Recommended ways to use sessions within this framework: becomes less clear in this situation. When this is needed for code clarity, it should be clearly documented. + :: + def myfunc(foo): session = get_session() with session.begin(): @@ -173,7 +197,7 @@ There are some things which it is best to avoid: Enabling soft deletes: * To use/enable soft-deletes, the SoftDeleteMixin must be added - to your model class. For example: + to your model class. For example:: class NovaBase(models.SoftDeleteMixin, models.ModelBase): pass @@ -181,14 +205,15 @@ Enabling soft deletes: Efficient use of soft deletes: -* There are two possible ways to mark a record as deleted: +* There are two possible ways to mark a record as deleted:: + model.soft_delete() and query.soft_delete(). model.soft_delete() method works with single already fetched entry. query.soft_delete() makes only one db request for all entries that correspond to query. -* In almost all cases you should use query.soft_delete(). Some examples: +* In almost all cases you should use query.soft_delete(). Some examples:: def soft_delete_bar(): count = model_query(BarModel).find(some_condition).soft_delete() @@ -199,9 +224,9 @@ Efficient use of soft deletes: if session is None: session = get_session() with session.begin(subtransactions=True): - count = model_query(BarModel).\ - find(some_condition).\ - soft_delete(synchronize_session=True) + count = (model_query(BarModel). + find(some_condition). + soft_delete(synchronize_session=True)) # Here synchronize_session is required, because we # don't know what is going on in outer session. if count == 0: @@ -211,6 +236,8 @@ Efficient use of soft deletes: you fetch a single record, work with it, and mark it as deleted in the same transaction. + :: + def soft_delete_bar_model(): session = get_session() with session.begin(): @@ -219,13 +246,13 @@ Efficient use of soft deletes: bar_ref.soft_delete(session=session) However, if you need to work with all entries that correspond to query and - then soft delete them you should use query.soft_delete() method: + then soft delete them you should use query.soft_delete() method:: def soft_delete_multi_models(): session = get_session() with session.begin(): - query = model_query(BarModel, session=session).\ - find(some_condition) + query = (model_query(BarModel, session=session). + find(some_condition)) model_refs = query.all() # Work with model_refs query.soft_delete(synchronize_session=False) @@ -236,6 +263,8 @@ Efficient use of soft deletes: which issues a single query. Using model.soft_delete(), as in the following example, is very inefficient. + :: + for bar_ref in bar_refs: bar_ref.soft_delete(session=session) # This will produce count(bar_refs) db requests. @@ -249,24 +278,23 @@ import time from oslo.config import cfg import six from sqlalchemy import exc as sqla_exc -import sqlalchemy.interfaces from sqlalchemy.interfaces import PoolListener import sqlalchemy.orm from sqlalchemy.pool import NullPool, StaticPool from sqlalchemy.sql.expression import literal_column from storyboard.openstack.common.db import exception -from storyboard.openstack.common.gettextutils import _ # noqa +from storyboard.openstack.common.gettextutils import _ from storyboard.openstack.common import log as logging from storyboard.openstack.common import timeutils sqlite_db_opts = [ cfg.StrOpt('sqlite_db', default='storyboard.sqlite', - help='the filename to use with sqlite'), + help='The file name to use with SQLite'), cfg.BoolOpt('sqlite_synchronous', default=True, - help='If true, use synchronous mode for sqlite'), + help='If True, SQLite uses synchronous mode'), ] database_opts = [ @@ -276,6 +304,7 @@ database_opts = [ '../', '$sqlite_db')), help='The SQLAlchemy connection string used to connect to the ' 'database', + secret=True, deprecated_opts=[cfg.DeprecatedOpt('sql_connection', group='DEFAULT'), cfg.DeprecatedOpt('sql_connection', @@ -284,6 +313,7 @@ database_opts = [ group='sql'), ]), cfg.StrOpt('slave_connection', default='', + secret=True, help='The SQLAlchemy connection string used to connect to the ' 'slave database'), cfg.IntOpt('idle_timeout', @@ -291,8 +321,10 @@ database_opts = [ deprecated_opts=[cfg.DeprecatedOpt('sql_idle_timeout', group='DEFAULT'), cfg.DeprecatedOpt('sql_idle_timeout', - group='DATABASE')], - help='timeout before idle sql connections are reaped'), + group='DATABASE'), + cfg.DeprecatedOpt('idle_timeout', + group='sql')], + help='Timeout before idle sql connections are reaped'), cfg.IntOpt('min_pool_size', default=1, deprecated_opts=[cfg.DeprecatedOpt('sql_min_pool_size', @@ -315,7 +347,7 @@ database_opts = [ group='DEFAULT'), cfg.DeprecatedOpt('sql_max_retries', group='DATABASE')], - help='maximum db connection retries during startup. ' + help='Maximum db connection retries during startup. ' '(setting -1 implies an infinite retry count)'), cfg.IntOpt('retry_interval', default=10, @@ -323,7 +355,7 @@ database_opts = [ group='DEFAULT'), cfg.DeprecatedOpt('reconnect_interval', group='DATABASE')], - help='interval between retries of opening a sql connection'), + help='Interval between retries of opening a sql connection'), cfg.IntOpt('max_overflow', default=None, deprecated_opts=[cfg.DeprecatedOpt('sql_max_overflow', @@ -409,8 +441,8 @@ class SqliteForeignKeysListener(PoolListener): dbapi_con.execute('pragma foreign_keys=ON') -def get_session(autocommit=True, expire_on_commit=False, - sqlite_fk=False, slave_session=False): +def get_session(autocommit=True, expire_on_commit=False, sqlite_fk=False, + slave_session=False, mysql_traditional_mode=False): """Return a SQLAlchemy session.""" global _MAKER global _SLAVE_MAKER @@ -420,7 +452,8 @@ def get_session(autocommit=True, expire_on_commit=False, maker = _SLAVE_MAKER if maker is None: - engine = get_engine(sqlite_fk=sqlite_fk, slave_engine=slave_session) + engine = get_engine(sqlite_fk=sqlite_fk, slave_engine=slave_session, + mysql_traditional_mode=mysql_traditional_mode) maker = get_maker(engine, autocommit, expire_on_commit) if slave_session: @@ -439,6 +472,11 @@ def get_session(autocommit=True, expire_on_commit=False, # 1 column - (IntegrityError) column c1 is not unique # N columns - (IntegrityError) column c1, c2, ..., N are not unique # +# sqlite since 3.7.16: +# 1 column - (IntegrityError) UNIQUE constraint failed: k1 +# +# N columns - (IntegrityError) UNIQUE constraint failed: k1, k2 +# # postgres: # 1 column - (IntegrityError) duplicate key value violates unique # constraint "users_c1_key" @@ -451,9 +489,10 @@ def get_session(autocommit=True, expire_on_commit=False, # N columns - (IntegrityError) (1062, "Duplicate entry 'values joined # with -' for key 'name_of_our_constraint'") _DUP_KEY_RE_DB = { - "sqlite": re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"), - "postgresql": re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"), - "mysql": re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$") + "sqlite": (re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"), + re.compile(r"^.*UNIQUE\s+constraint\s+failed:\s+(.+)$")), + "postgresql": (re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"),), + "mysql": (re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$"),) } @@ -483,10 +522,14 @@ def _raise_if_duplicate_entry_error(integrity_error, engine_name): # SQLAlchemy can differ when using unicode() and accessing .message. # An audit across all three supported engines will be necessary to # ensure there are no regressions. - m = _DUP_KEY_RE_DB[engine_name].match(integrity_error.message) - if not m: + for pattern in _DUP_KEY_RE_DB[engine_name]: + match = pattern.match(integrity_error.message) + if match: + break + else: return - columns = m.group(1) + + columns = match.group(1) if engine_name == "sqlite": columns = columns.strip().split(", ") @@ -555,7 +598,8 @@ def _wrap_db_error(f): return _wrap -def get_engine(sqlite_fk=False, slave_engine=False): +def get_engine(sqlite_fk=False, slave_engine=False, + mysql_traditional_mode=False): """Return a SQLAlchemy engine.""" global _ENGINE global _SLAVE_ENGINE @@ -567,8 +611,8 @@ def get_engine(sqlite_fk=False, slave_engine=False): db_uri = CONF.database.slave_connection if engine is None: - engine = create_engine(db_uri, - sqlite_fk=sqlite_fk) + engine = create_engine(db_uri, sqlite_fk=sqlite_fk, + mysql_traditional_mode=mysql_traditional_mode) if slave_engine: _SLAVE_ENGINE = engine else: @@ -603,22 +647,39 @@ def _thread_yield(dbapi_con, con_record): time.sleep(0) -def _ping_listener(dbapi_conn, connection_rec, connection_proxy): - """Ensures that MySQL connections checked out of the pool are alive. +def _ping_listener(engine, dbapi_conn, connection_rec, connection_proxy): + """Ensures that MySQL and DB2 connections are alive. Borrowed from: http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f """ + cursor = dbapi_conn.cursor() try: - dbapi_conn.cursor().execute('select 1') - except dbapi_conn.OperationalError as ex: - if ex.args[0] in (2006, 2013, 2014, 2045, 2055): - LOG.warn(_('Got mysql server has gone away: %s'), ex) - raise sqla_exc.DisconnectionError("Database server went away") + ping_sql = 'select 1' + if engine.name == 'ibm_db_sa': + # DB2 requires a table expression + ping_sql = 'select 1 from (values (1)) AS t1' + cursor.execute(ping_sql) + except Exception as ex: + if engine.dialect.is_disconnect(ex, dbapi_conn, cursor): + msg = _('Database server has gone away: %s') % ex + LOG.warning(msg) + raise sqla_exc.DisconnectionError(msg) else: raise +def _set_mode_traditional(dbapi_con, connection_rec, connection_proxy): + """Set engine mode to 'traditional'. + + Required to prevent silent truncates at insert or update operations + under MySQL. By default MySQL truncates inserted string if it longer + than a declared field just with warning. That is fraught with data + corruption. + """ + dbapi_con.cursor().execute("SET SESSION sql_mode = TRADITIONAL;") + + def _is_db_connection_error(args): """Return True if error in connecting to db.""" # NOTE(adam_g): This is currently MySQL specific and needs to be extended @@ -631,7 +692,8 @@ def _is_db_connection_error(args): return False -def create_engine(sql_connection, sqlite_fk=False): +def create_engine(sql_connection, sqlite_fk=False, + mysql_traditional_mode=False): """Return a new SQLAlchemy engine.""" # NOTE(geekinutah): At this point we could be connecting to the normal # db handle or the slave db handle. Things like @@ -672,8 +734,16 @@ def create_engine(sql_connection, sqlite_fk=False): sqlalchemy.event.listen(engine, 'checkin', _thread_yield) - if 'mysql' in connection_dict.drivername: - sqlalchemy.event.listen(engine, 'checkout', _ping_listener) + if engine.name in ['mysql', 'ibm_db_sa']: + callback = functools.partial(_ping_listener, engine) + sqlalchemy.event.listen(engine, 'checkout', callback) + if mysql_traditional_mode: + sqlalchemy.event.listen(engine, 'checkout', _set_mode_traditional) + else: + LOG.warning(_("This application has not enabled MySQL traditional" + " mode, which means silent data corruption may" + " occur. Please encourage the application" + " developers to enable this mode.")) elif 'sqlite' in connection_dict.drivername: if not CONF.sqlite_synchronous: sqlalchemy.event.listen(engine, 'connect', @@ -695,7 +765,7 @@ def create_engine(sql_connection, sqlite_fk=False): remaining = 'infinite' while True: msg = _('SQL connection failed. %s attempts left.') - LOG.warn(msg % remaining) + LOG.warning(msg % remaining) if remaining != 'infinite': remaining -= 1 time.sleep(CONF.database.retry_interval) diff --git a/storyboard/openstack/common/db/sqlalchemy/test_migrations.py b/storyboard/openstack/common/db/sqlalchemy/test_migrations.py index 6f5ba7cf..515bbac2 100644 --- a/storyboard/openstack/common/db/sqlalchemy/test_migrations.py +++ b/storyboard/openstack/common/db/sqlalchemy/test_migrations.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010-2011 OpenStack Foundation # Copyright 2012-2013 IBM Corp. # All Rights Reserved. @@ -16,81 +14,52 @@ # License for the specific language governing permissions and limitations # under the License. - -import commands -import ConfigParser +import functools import os -import urlparse +import subprocess +import lockfile +from six import moves import sqlalchemy import sqlalchemy.exc -from storyboard.openstack.common import lockutils +from storyboard.openstack.common.db.sqlalchemy import utils +from storyboard.openstack.common.gettextutils import _ from storyboard.openstack.common import log as logging +from storyboard.openstack.common.py3kcompat import urlutils from storyboard.openstack.common import test LOG = logging.getLogger(__name__) -def _get_connect_string(backend, user, passwd, database): - """Get database connection - - Try to get a connection with a very specific set of values, if we get - these then we'll run the tests, otherwise they are skipped - """ - if backend == "postgres": - backend = "postgresql+psycopg2" - elif backend == "mysql": - backend = "mysql+mysqldb" - else: - raise Exception("Unrecognized backend: '%s'" % backend) - - return ("%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s" - % {'backend': backend, 'user': user, 'passwd': passwd, - 'database': database}) - - -def _is_backend_avail(backend, user, passwd, database): - try: - connect_uri = _get_connect_string(backend, user, passwd, database) - engine = sqlalchemy.create_engine(connect_uri) - connection = engine.connect() - except Exception: - # intentionally catch all to handle exceptions even if we don't - # have any backend code loaded. - return False - else: - connection.close() - engine.dispose() - return True - - def _have_mysql(user, passwd, database): present = os.environ.get('TEST_MYSQL_PRESENT') if present is None: - return _is_backend_avail('mysql', user, passwd, database) + return utils.is_backend_avail('mysql', user, passwd, database) return present.lower() in ('', 'true') def _have_postgresql(user, passwd, database): present = os.environ.get('TEST_POSTGRESQL_PRESENT') if present is None: - return _is_backend_avail('postgres', user, passwd, database) + return utils.is_backend_avail('postgres', user, passwd, database) return present.lower() in ('', 'true') -def get_db_connection_info(conn_pieces): - database = conn_pieces.path.strip('/') - loc_pieces = conn_pieces.netloc.split('@') - host = loc_pieces[1] - - auth_pieces = loc_pieces[0].split(':') - user = auth_pieces[0] - password = "" - if len(auth_pieces) > 1: - password = auth_pieces[1].strip() - - return (user, password, database, host) +def _set_db_lock(lock_path=None, lock_prefix=None): + def decorator(f): + @functools.wraps(f) + def wrapper(*args, **kwargs): + try: + path = lock_path or os.environ.get("STORYBOARD_LOCK_PATH") + lock = lockfile.FileLock(os.path.join(path, lock_prefix)) + with lock: + LOG.debug(_('Got lock "%s"') % f.__name__) + return f(*args, **kwargs) + finally: + LOG.debug(_('Lock released "%s"') % f.__name__) + return wrapper + return decorator class BaseMigrationTestCase(test.BaseTestCase): @@ -115,13 +84,13 @@ class BaseMigrationTestCase(test.BaseTestCase): # once. No need to re-run this on each test... LOG.debug('config_path is %s' % self.CONFIG_FILE_PATH) if os.path.exists(self.CONFIG_FILE_PATH): - cp = ConfigParser.RawConfigParser() + cp = moves.configparser.RawConfigParser() try: cp.read(self.CONFIG_FILE_PATH) defaults = cp.defaults() for key, value in defaults.items(): self.test_databases[key] = value - except ConfigParser.ParsingError as e: + except moves.configparser.ParsingError as e: self.fail("Failed to read test_migrations.conf config " "file. Got error: %s" % e) else: @@ -143,14 +112,18 @@ class BaseMigrationTestCase(test.BaseTestCase): super(BaseMigrationTestCase, self).tearDown() def execute_cmd(self, cmd=None): - status, output = commands.getstatusoutput(cmd) + process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + output = process.communicate()[0] LOG.debug(output) - self.assertEqual(0, status, + self.assertEqual(0, process.returncode, "Failed to run: %s\n%s" % (cmd, output)) - @lockutils.synchronized('pgadmin', 'tests-', external=True) def _reset_pg(self, conn_pieces): - (user, password, database, host) = get_db_connection_info(conn_pieces) + (user, + password, + database, + host) = utils.get_db_connection_info(conn_pieces) os.environ['PGPASSWORD'] = password os.environ['PGUSER'] = user # note(boris-42): We must create and drop database, we can't @@ -170,10 +143,11 @@ class BaseMigrationTestCase(test.BaseTestCase): os.unsetenv('PGPASSWORD') os.unsetenv('PGUSER') + @_set_db_lock(lock_prefix='migration_tests-') def _reset_databases(self): for key, engine in self.engines.items(): conn_string = self.test_databases[key] - conn_pieces = urlparse.urlparse(conn_string) + conn_pieces = urlutils.urlparse(conn_string) engine.dispose() if conn_string.startswith('sqlite'): # We can just delete the SQLite database, which is @@ -188,7 +162,7 @@ class BaseMigrationTestCase(test.BaseTestCase): # the MYSQL database, which is easier and less error-prone # than using SQLAlchemy to do this via MetaData...trust me. (user, password, database, host) = \ - get_db_connection_info(conn_pieces) + utils.get_db_connection_info(conn_pieces) sql = ("drop database if exists %(db)s; " "create database %(db)s;") % {'db': database} cmd = ("mysql -u \"%(user)s\" -p\"%(password)s\" -h %(host)s " diff --git a/storyboard/openstack/common/db/sqlalchemy/utils.py b/storyboard/openstack/common/db/sqlalchemy/utils.py index 3317ab7a..235131de 100644 --- a/storyboard/openstack/common/db/sqlalchemy/utils.py +++ b/storyboard/openstack/common/db/sqlalchemy/utils.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2010-2011 OpenStack Foundation. @@ -38,7 +36,7 @@ from sqlalchemy import String from sqlalchemy import Table from sqlalchemy.types import NullType -from storyboard.openstack.common.gettextutils import _ # noqa +from storyboard.openstack.common.gettextutils import _ from storyboard.openstack.common import log as logging from storyboard.openstack.common import timeutils @@ -96,7 +94,7 @@ def paginate_query(query, model, limit, sort_keys, marker=None, if 'id' not in sort_keys: # TODO(justinsb): If this ever gives a false-positive, check # the actual primary key, rather than assuming its id - LOG.warn(_('Id not in sort_keys; is sort_keys unique?')) + LOG.warning(_('Id not in sort_keys; is sort_keys unique?')) assert(not (sort_dir and sort_dirs)) @@ -135,9 +133,9 @@ def paginate_query(query, model, limit, sort_keys, marker=None, # Build up an array of sort criteria as in the docstring criteria_list = [] - for i in range(0, len(sort_keys)): + for i in range(len(sort_keys)): crit_attrs = [] - for j in range(0, i): + for j in range(i): model_attr = getattr(model, sort_keys[j]) crit_attrs.append((model_attr == marker_values[j])) @@ -499,3 +497,50 @@ def _change_deleted_column_type_to_id_type_sqlite(migrate_engine, table_name, where(new_table.c.deleted == deleted).\ values(deleted=default_deleted_value).\ execute() + + +def get_connect_string(backend, user, passwd, database): + """Get database connection + + Try to get a connection with a very specific set of values, if we get + these then we'll run the tests, otherwise they are skipped + """ + if backend == "postgres": + backend = "postgresql+psycopg2" + elif backend == "mysql": + backend = "mysql+mysqldb" + else: + raise Exception("Unrecognized backend: '%s'" % backend) + + return ("%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s" + % {'backend': backend, 'user': user, 'passwd': passwd, + 'database': database}) + + +def is_backend_avail(backend, user, passwd, database): + try: + connect_uri = get_connect_string(backend, user, passwd, database) + engine = sqlalchemy.create_engine(connect_uri) + connection = engine.connect() + except Exception: + # intentionally catch all to handle exceptions even if we don't + # have any backend code loaded. + return False + else: + connection.close() + engine.dispose() + return True + + +def get_db_connection_info(conn_pieces): + database = conn_pieces.path.strip('/') + loc_pieces = conn_pieces.netloc.split('@') + host = loc_pieces[1] + + auth_pieces = loc_pieces[0].split(':') + user = auth_pieces[0] + password = "" + if len(auth_pieces) > 1: + password = auth_pieces[1].strip() + + return (user, password, database, host) diff --git a/storyboard/openstack/common/excutils.py b/storyboard/openstack/common/excutils.py index f1cbe96b..f2927423 100644 --- a/storyboard/openstack/common/excutils.py +++ b/storyboard/openstack/common/excutils.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2011 OpenStack Foundation. # Copyright 2012, Red Hat, Inc. # @@ -26,7 +24,7 @@ import traceback import six -from storyboard.openstack.common.gettextutils import _ # noqa +from storyboard.openstack.common.gettextutils import _ class save_and_reraise_exception(object): @@ -44,13 +42,13 @@ class save_and_reraise_exception(object): In some cases the caller may not want to re-raise the exception, and for those circumstances this context provides a reraise flag that - can be used to suppress the exception. For example: + can be used to suppress the exception. For example:: - except Exception: - with save_and_reraise_exception() as ctxt: - decide_if_need_reraise() - if not should_be_reraised: - ctxt.reraise = False + except Exception: + with save_and_reraise_exception() as ctxt: + decide_if_need_reraise() + if not should_be_reraised: + ctxt.reraise = False """ def __init__(self): self.reraise = True diff --git a/storyboard/openstack/common/fileutils.py b/storyboard/openstack/common/fileutils.py index 91b96a42..6d6f52a6 100644 --- a/storyboard/openstack/common/fileutils.py +++ b/storyboard/openstack/common/fileutils.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # @@ -15,14 +13,13 @@ # License for the specific language governing permissions and limitations # under the License. - import contextlib import errno import os import tempfile from storyboard.openstack.common import excutils -from storyboard.openstack.common.gettextutils import _ # noqa +from storyboard.openstack.common.gettextutils import _ from storyboard.openstack.common import log as logging LOG = logging.getLogger(__name__) diff --git a/storyboard/openstack/common/fixture/__init__.py b/storyboard/openstack/common/fixture/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/storyboard/openstack/common/fixture/config.py b/storyboard/openstack/common/fixture/config.py new file mode 100644 index 00000000..4b0efd2f --- /dev/null +++ b/storyboard/openstack/common/fixture/config.py @@ -0,0 +1,46 @@ +# +# Copyright 2013 Mirantis, Inc. +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import fixtures +from oslo.config import cfg +import six + + +class Config(fixtures.Fixture): + """Override some configuration values. + + The keyword arguments are the names of configuration options to + override and their values. + + If a group argument is supplied, the overrides are applied to + the specified configuration option group. + + All overrides are automatically cleared at the end of the current + test by the reset() method, which is registered by addCleanup(). + """ + + def __init__(self, conf=cfg.CONF): + self.conf = conf + + def setUp(self): + super(Config, self).setUp() + self.addCleanup(self.conf.reset) + + def config(self, **kw): + group = kw.pop('group', None) + for k, v in six.iteritems(kw): + self.conf.set_override(k, v, group) diff --git a/storyboard/openstack/common/fixture/lockutils.py b/storyboard/openstack/common/fixture/lockutils.py new file mode 100644 index 00000000..25fc370e --- /dev/null +++ b/storyboard/openstack/common/fixture/lockutils.py @@ -0,0 +1,51 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import fixtures + +from storyboard.openstack.common import lockutils + + +class LockFixture(fixtures.Fixture): + """External locking fixture. + + This fixture is basically an alternative to the synchronized decorator with + the external flag so that tearDowns and addCleanups will be included in + the lock context for locking between tests. The fixture is recommended to + be the first line in a test method, like so:: + + def test_method(self): + self.useFixture(LockFixture) + ... + + or the first line in setUp if all the test methods in the class are + required to be serialized. Something like:: + + class TestCase(testtools.testcase): + def setUp(self): + self.useFixture(LockFixture) + super(TestCase, self).setUp() + ... + + This is because addCleanups are put on a LIFO queue that gets run after the + test method exits. (either by completing or raising an exception) + """ + def __init__(self, name, lock_file_prefix=None): + self.mgr = lockutils.lock(name, lock_file_prefix, True) + + def setUp(self): + super(LockFixture, self).setUp() + self.addCleanup(self.mgr.__exit__, None, None, None) + self.mgr.__enter__() diff --git a/storyboard/openstack/common/fixture/mockpatch.py b/storyboard/openstack/common/fixture/mockpatch.py new file mode 100644 index 00000000..a8ffeb37 --- /dev/null +++ b/storyboard/openstack/common/fixture/mockpatch.py @@ -0,0 +1,51 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import fixtures +import mock + + +class PatchObject(fixtures.Fixture): + """Deal with code around mock.""" + + def __init__(self, obj, attr, new=mock.DEFAULT, **kwargs): + self.obj = obj + self.attr = attr + self.kwargs = kwargs + self.new = new + + def setUp(self): + super(PatchObject, self).setUp() + _p = mock.patch.object(self.obj, self.attr, self.new, **self.kwargs) + self.mock = _p.start() + self.addCleanup(_p.stop) + + +class Patch(fixtures.Fixture): + + """Deal with code around mock.patch.""" + + def __init__(self, obj, new=mock.DEFAULT, **kwargs): + self.obj = obj + self.kwargs = kwargs + self.new = new + + def setUp(self): + super(Patch, self).setUp() + _p = mock.patch(self.obj, self.new, **self.kwargs) + self.mock = _p.start() + self.addCleanup(_p.stop) diff --git a/storyboard/openstack/common/fixture/moxstubout.py b/storyboard/openstack/common/fixture/moxstubout.py new file mode 100644 index 00000000..e8c031f0 --- /dev/null +++ b/storyboard/openstack/common/fixture/moxstubout.py @@ -0,0 +1,32 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import fixtures +import mox + + +class MoxStubout(fixtures.Fixture): + """Deal with code around mox and stubout as a fixture.""" + + def setUp(self): + super(MoxStubout, self).setUp() + # emulate some of the mox stuff, we can't use the metaclass + # because it screws with our generators + self.mox = mox.Mox() + self.stubs = self.mox.stubs + self.addCleanup(self.mox.UnsetStubs) + self.addCleanup(self.mox.VerifyAll) diff --git a/storyboard/openstack/common/gettextutils.py b/storyboard/openstack/common/gettextutils.py index 700d9588..9bbb7bdc 100644 --- a/storyboard/openstack/common/gettextutils.py +++ b/storyboard/openstack/common/gettextutils.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2012 Red Hat, Inc. # Copyright 2013 IBM Corp. # All Rights Reserved. @@ -26,13 +24,10 @@ Usual usage in an openstack.common module: import copy import gettext -import logging +import locale +from logging import handlers import os import re -try: - import UserString as _userString -except ImportError: - import collections as _userString from babel import localedata import six @@ -58,7 +53,7 @@ def enable_lazy(): def _(msg): if USE_LAZY: - return Message(msg, 'storyboard') + return Message(msg, domain='storyboard') else: if six.PY3: return _t.gettext(msg) @@ -90,11 +85,6 @@ def install(domain, lazy=False): # messages in OpenStack. We override the standard _() function # and % (format string) operation to build Message objects that can # later be translated when we have more information. - # - # Also included below is an example LocaleHandler that translates - # Messages to an associated locale, effectively allowing many logs, - # each with their own locale. - def _lazy_gettext(msg): """Create and return a Message object. @@ -105,7 +95,7 @@ def install(domain, lazy=False): Message encapsulates a string so that we can translate it later when needed. """ - return Message(msg, domain) + return Message(msg, domain=domain) from six import moves moves.builtins.__dict__['_'] = _lazy_gettext @@ -120,182 +110,169 @@ def install(domain, lazy=False): unicode=True) -class Message(_userString.UserString, object): - """Class used to encapsulate translatable messages.""" - def __init__(self, msg, domain): - # _msg is the gettext msgid and should never change - self._msg = msg - self._left_extra_msg = '' - self._right_extra_msg = '' - self._locale = None - self.params = None - self.domain = domain +class Message(six.text_type): + """A Message object is a unicode object that can be translated. - @property - def data(self): - # NOTE(mrodden): this should always resolve to a unicode string - # that best represents the state of the message currently + Translation of Message is done explicitly using the translate() method. + For all non-translation intents and purposes, a Message is simply unicode, + and can be treated as such. + """ - localedir = os.environ.get(self.domain.upper() + '_LOCALEDIR') - if self.locale: - lang = gettext.translation(self.domain, - localedir=localedir, - languages=[self.locale], - fallback=True) - else: - # use system locale for translations - lang = gettext.translation(self.domain, - localedir=localedir, - fallback=True) + def __new__(cls, msgid, msgtext=None, params=None, + domain='storyboard', *args): + """Create a new Message object. + In order for translation to work gettext requires a message ID, this + msgid will be used as the base unicode text. It is also possible + for the msgid and the base unicode text to be different by passing + the msgtext parameter. + """ + # If the base msgtext is not given, we use the default translation + # of the msgid (which is in English) just in case the system locale is + # not English, so that the base text will be in that locale by default. + if not msgtext: + msgtext = Message._translate_msgid(msgid, domain) + # We want to initialize the parent unicode with the actual object that + # would have been plain unicode if 'Message' was not enabled. + msg = super(Message, cls).__new__(cls, msgtext) + msg.msgid = msgid + msg.domain = domain + msg.params = params + return msg + + def translate(self, desired_locale=None): + """Translate this message to the desired locale. + + :param desired_locale: The desired locale to translate the message to, + if no locale is provided the message will be + translated to the system's default locale. + + :returns: the translated message in unicode + """ + + translated_message = Message._translate_msgid(self.msgid, + self.domain, + desired_locale) + if self.params is None: + # No need for more translation + return translated_message + + # This Message object may have been formatted with one or more + # Message objects as substitution arguments, given either as a single + # argument, part of a tuple, or as one or more values in a dictionary. + # When translating this Message we need to translate those Messages too + translated_params = _translate_args(self.params, desired_locale) + + translated_message = translated_message % translated_params + + return translated_message + + @staticmethod + def _translate_msgid(msgid, domain, desired_locale=None): + if not desired_locale: + system_locale = locale.getdefaultlocale() + # If the system locale is not available to the runtime use English + if not system_locale[0]: + desired_locale = 'en_US' + else: + desired_locale = system_locale[0] + + locale_dir = os.environ.get(domain.upper() + '_LOCALEDIR') + lang = gettext.translation(domain, + localedir=locale_dir, + languages=[desired_locale], + fallback=True) if six.PY3: - ugettext = lang.gettext + translator = lang.gettext else: - ugettext = lang.ugettext + translator = lang.ugettext - full_msg = (self._left_extra_msg + - ugettext(self._msg) + - self._right_extra_msg) + translated_message = translator(msgid) + return translated_message - if self.params is not None: - full_msg = full_msg % self.params + def __mod__(self, other): + # When we mod a Message we want the actual operation to be performed + # by the parent class (i.e. unicode()), the only thing we do here is + # save the original msgid and the parameters in case of a translation + params = self._sanitize_mod_params(other) + unicode_mod = super(Message, self).__mod__(params) + modded = Message(self.msgid, + msgtext=unicode_mod, + params=params, + domain=self.domain) + return modded - return six.text_type(full_msg) + def _sanitize_mod_params(self, other): + """Sanitize the object being modded with this Message. - @property - def locale(self): - return self._locale + - Add support for modding 'None' so translation supports it + - Trim the modded object, which can be a large dictionary, to only + those keys that would actually be used in a translation + - Snapshot the object being modded, in case the message is + translated, it will be used as it was when the Message was created + """ + if other is None: + params = (other,) + elif isinstance(other, dict): + params = self._trim_dictionary_parameters(other) + else: + params = self._copy_param(other) + return params - @locale.setter - def locale(self, value): - self._locale = value - if not self.params: - return + def _trim_dictionary_parameters(self, dict_param): + """Return a dict that only has matching entries in the msgid.""" + # NOTE(luisg): Here we trim down the dictionary passed as parameters + # to avoid carrying a lot of unnecessary weight around in the message + # object, for example if someone passes in Message() % locals() but + # only some params are used, and additionally we prevent errors for + # non-deepcopyable objects by unicoding() them. - # This Message object may have been constructed with one or more - # Message objects as substitution parameters, given as a single - # Message, or a tuple or Map containing some, so when setting the - # locale for this Message we need to set it for those Messages too. - if isinstance(self.params, Message): - self.params.locale = value - return - if isinstance(self.params, tuple): - for param in self.params: - if isinstance(param, Message): - param.locale = value - return - if isinstance(self.params, dict): - for param in self.params.values(): - if isinstance(param, Message): - param.locale = value + # Look for %(param) keys in msgid; + # Skip %% and deal with the case where % is first character on the line + keys = re.findall('(?:[^%]|^)?%\((\w*)\)[a-z]', self.msgid) - def _save_dictionary_parameter(self, dict_param): - full_msg = self.data - # look for %(blah) fields in string; - # ignore %% and deal with the - # case where % is first character on the line - keys = re.findall('(?:[^%]|^)?%\((\w*)\)[a-z]', full_msg) - - # if we don't find any %(blah) blocks but have a %s - if not keys and re.findall('(?:[^%]|^)%[a-z]', full_msg): - # apparently the full dictionary is the parameter - params = copy.deepcopy(dict_param) + # If we don't find any %(param) keys but have a %s + if not keys and re.findall('(?:[^%]|^)%[a-z]', self.msgid): + # Apparently the full dictionary is the parameter + params = self._copy_param(dict_param) else: params = {} + # Save our existing parameters as defaults to protect + # ourselves from losing values if we are called through an + # (erroneous) chain that builds a valid Message with + # arguments, and then does something like "msg % kwds" + # where kwds is an empty dictionary. + src = {} + if isinstance(self.params, dict): + src.update(self.params) + src.update(dict_param) for key in keys: - try: - params[key] = copy.deepcopy(dict_param[key]) - except TypeError: - # cast uncopyable thing to unicode string - params[key] = six.text_type(dict_param[key]) + params[key] = self._copy_param(src[key]) return params - def _save_parameters(self, other): - # we check for None later to see if - # we actually have parameters to inject, - # so encapsulate if our parameter is actually None - if other is None: - self.params = (other, ) - elif isinstance(other, dict): - self.params = self._save_dictionary_parameter(other) - else: - # fallback to casting to unicode, - # this will handle the problematic python code-like - # objects that cannot be deep-copied - try: - self.params = copy.deepcopy(other) - except TypeError: - self.params = six.text_type(other) + def _copy_param(self, param): + try: + return copy.deepcopy(param) + except TypeError: + # Fallback to casting to unicode this will handle the + # python code-like objects that can't be deep-copied + return six.text_type(param) - return self - - # overrides to be more string-like - def __unicode__(self): - return self.data - - def __str__(self): - if six.PY3: - return self.__unicode__() - return self.data.encode('utf-8') - - def __getstate__(self): - to_copy = ['_msg', '_right_extra_msg', '_left_extra_msg', - 'domain', 'params', '_locale'] - new_dict = self.__dict__.fromkeys(to_copy) - for attr in to_copy: - new_dict[attr] = copy.deepcopy(self.__dict__[attr]) - - return new_dict - - def __setstate__(self, state): - for (k, v) in state.items(): - setattr(self, k, v) - - # operator overloads def __add__(self, other): - copied = copy.deepcopy(self) - copied._right_extra_msg += other.__str__() - return copied + msg = _('Message objects do not support addition.') + raise TypeError(msg) def __radd__(self, other): - copied = copy.deepcopy(self) - copied._left_extra_msg += other.__str__() - return copied + return self.__add__(other) - def __mod__(self, other): - # do a format string to catch and raise - # any possible KeyErrors from missing parameters - self.data % other - copied = copy.deepcopy(self) - return copied._save_parameters(other) - - def __mul__(self, other): - return self.data * other - - def __rmul__(self, other): - return other * self.data - - def __getitem__(self, key): - return self.data[key] - - def __getslice__(self, start, end): - return self.data.__getslice__(start, end) - - def __getattribute__(self, name): - # NOTE(mrodden): handle lossy operations that we can't deal with yet - # These override the UserString implementation, since UserString - # uses our __class__ attribute to try and build a new message - # after running the inner data string through the operation. - # At that point, we have lost the gettext message id and can just - # safely resolve to a string instead. - ops = ['capitalize', 'center', 'decode', 'encode', - 'expandtabs', 'ljust', 'lstrip', 'replace', 'rjust', 'rstrip', - 'strip', 'swapcase', 'title', 'translate', 'upper', 'zfill'] - if name in ops: - return getattr(self.data, name) - else: - return _userString.UserString.__getattribute__(self, name) + def __str__(self): + # NOTE(luisg): Logging in python 2.6 tries to str() log records, + # and it expects specifically a UnicodeError in order to proceed. + msg = _('Message objects do not support str() because they may ' + 'contain non-ascii characters. ' + 'Please use unicode() or translate() instead.') + raise UnicodeError(msg) def get_available_languages(domain): @@ -321,53 +298,143 @@ def get_available_languages(domain): list_identifiers = (getattr(localedata, 'list', None) or getattr(localedata, 'locale_identifiers')) locale_identifiers = list_identifiers() + for i in locale_identifiers: if find(i) is not None: language_list.append(i) + + # NOTE(luisg): Babel>=1.0,<1.3 has a bug where some OpenStack supported + # locales (e.g. 'zh_CN', and 'zh_TW') aren't supported even though they + # are perfectly legitimate locales: + # https://github.com/mitsuhiko/babel/issues/37 + # In Babel 1.3 they fixed the bug and they support these locales, but + # they are still not explicitly "listed" by locale_identifiers(). + # That is why we add the locales here explicitly if necessary so that + # they are listed as supported. + aliases = {'zh': 'zh_CN', + 'zh_Hant_HK': 'zh_HK', + 'zh_Hant': 'zh_TW', + 'fil': 'tl_PH'} + for (locale, alias) in six.iteritems(aliases): + if locale in language_list and alias not in language_list: + language_list.append(alias) + _AVAILABLE_LANGUAGES[domain] = language_list return copy.copy(language_list) -def get_localized_message(message, user_locale): - """Gets a localized version of the given message in the given locale. +def translate(obj, desired_locale=None): + """Gets the translated unicode representation of the given object. - If the message is not a Message object the message is returned as-is. - If the locale is None the message is translated to the default locale. + If the object is not translatable it is returned as-is. + If the locale is None the object is translated to the system locale. - :returns: the translated message in unicode, or the original message if + :param obj: the object to translate + :param desired_locale: the locale to translate the message to, if None the + default system locale will be used + :returns: the translated object in unicode, or the original object if it could not be translated """ - translated = message + message = obj + if not isinstance(message, Message): + # If the object to translate is not already translatable, + # let's first get its unicode representation + message = six.text_type(obj) if isinstance(message, Message): - original_locale = message.locale - message.locale = user_locale - translated = six.text_type(message) - message.locale = original_locale - return translated + # Even after unicoding() we still need to check if we are + # running with translatable unicode before translating + return message.translate(desired_locale) + return obj -class LocaleHandler(logging.Handler): - """Handler that can have a locale associated to translate Messages. +def _translate_args(args, desired_locale=None): + """Translates all the translatable elements of the given arguments object. - A quick example of how to utilize the Message class above. - LocaleHandler takes a locale and a target logging.Handler object - to forward LogRecord objects to after translating the internal Message. + This method is used for translating the translatable values in method + arguments which include values of tuples or dictionaries. + If the object is not a tuple or a dictionary the object itself is + translated if it is translatable. + + If the locale is None the object is translated to the system locale. + + :param args: the args to translate + :param desired_locale: the locale to translate the args to, if None the + default system locale will be used + :returns: a new args object with the translated contents of the original + """ + if isinstance(args, tuple): + return tuple(translate(v, desired_locale) for v in args) + if isinstance(args, dict): + translated_dict = {} + for (k, v) in six.iteritems(args): + translated_v = translate(v, desired_locale) + translated_dict[k] = translated_v + return translated_dict + return translate(args, desired_locale) + + +class TranslationHandler(handlers.MemoryHandler): + """Handler that translates records before logging them. + + The TranslationHandler takes a locale and a target logging.Handler object + to forward LogRecord objects to after translating them. This handler + depends on Message objects being logged, instead of regular strings. + + The handler can be configured declaratively in the logging.conf as follows: + + [handlers] + keys = translatedlog, translator + + [handler_translatedlog] + class = handlers.WatchedFileHandler + args = ('/var/log/api-localized.log',) + formatter = context + + [handler_translator] + class = openstack.common.log.TranslationHandler + target = translatedlog + args = ('zh_CN',) + + If the specified locale is not available in the system, the handler will + log in the default locale. """ - def __init__(self, locale, target): - """Initialize a LocaleHandler + def __init__(self, locale=None, target=None): + """Initialize a TranslationHandler :param locale: locale to use for translating messages :param target: logging.Handler object to forward LogRecord objects to after translation """ - logging.Handler.__init__(self) + # NOTE(luisg): In order to allow this handler to be a wrapper for + # other handlers, such as a FileHandler, and still be able to + # configure it using logging.conf, this handler has to extend + # MemoryHandler because only the MemoryHandlers' logging.conf + # parsing is implemented such that it accepts a target handler. + handlers.MemoryHandler.__init__(self, capacity=0, target=target) self.locale = locale - self.target = target + + def setFormatter(self, fmt): + self.target.setFormatter(fmt) def emit(self, record): - if isinstance(record.msg, Message): - # set the locale and resolve to a string - record.msg.locale = self.locale + # We save the message from the original record to restore it + # after translation, so other handlers are not affected by this + original_msg = record.msg + original_args = record.args + + try: + self._translate_and_log_record(record) + finally: + record.msg = original_msg + record.args = original_args + + def _translate_and_log_record(self, record): + record.msg = translate(record.msg, self.locale) + + # In addition to translating the message, we also need to translate + # arguments that were passed to the log method that were not part + # of the main message e.g., log.info(_('Some message %s'), this_one)) + record.args = _translate_args(record.args, self.locale) self.target.emit(record) diff --git a/storyboard/openstack/common/importutils.py b/storyboard/openstack/common/importutils.py index 7a303f93..4fd9ae2b 100644 --- a/storyboard/openstack/common/importutils.py +++ b/storyboard/openstack/common/importutils.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # diff --git a/storyboard/openstack/common/jsonutils.py b/storyboard/openstack/common/jsonutils.py index 2f24f73e..0972c8d9 100644 --- a/storyboard/openstack/common/jsonutils.py +++ b/storyboard/openstack/common/jsonutils.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara @@ -41,8 +39,12 @@ import json try: import xmlrpclib except ImportError: - # NOTE(jd): xmlrpclib is not shipped with Python 3 - xmlrpclib = None + # NOTE(jaypipes): xmlrpclib was renamed to xmlrpc.client in Python3 + # however the function and object call signatures + # remained the same. This whole try/except block should + # be removed and replaced with a call to six.moves once + # six 1.4.2 is released. See http://bit.ly/1bqrVzu + import xmlrpc.client as xmlrpclib import six @@ -124,14 +126,14 @@ def to_primitive(value, convert_instances=False, convert_datetime=True, level=level, max_depth=max_depth) if isinstance(value, dict): - return dict((k, recursive(v)) for k, v in value.iteritems()) + return dict((k, recursive(v)) for k, v in six.iteritems(value)) elif isinstance(value, (list, tuple)): return [recursive(lv) for lv in value] # It's not clear why xmlrpclib created their own DateTime type, but # for our purposes, make it a datetime type which is explicitly # handled - if xmlrpclib and isinstance(value, xmlrpclib.DateTime): + if isinstance(value, xmlrpclib.DateTime): value = datetime.datetime(*tuple(value.timetuple())[:6]) if convert_datetime and isinstance(value, datetime.datetime): diff --git a/storyboard/openstack/common/local.py b/storyboard/openstack/common/local.py index e82f17d0..0819d5b9 100644 --- a/storyboard/openstack/common/local.py +++ b/storyboard/openstack/common/local.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # diff --git a/storyboard/openstack/common/lockutils.py b/storyboard/openstack/common/lockutils.py index 06d94da6..7ab62b2b 100644 --- a/storyboard/openstack/common/lockutils.py +++ b/storyboard/openstack/common/lockutils.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # @@ -15,7 +13,6 @@ # License for the specific language governing permissions and limitations # under the License. - import contextlib import errno import functools @@ -31,8 +28,7 @@ import weakref from oslo.config import cfg from storyboard.openstack.common import fileutils -from storyboard.openstack.common.gettextutils import _ # noqa -from storyboard.openstack.common import local +from storyboard.openstack.common.gettextutils import _ from storyboard.openstack.common import log as logging @@ -43,7 +39,7 @@ util_opts = [ cfg.BoolOpt('disable_process_locking', default=False, help='Whether to disable inter-process locks'), cfg.StrOpt('lock_path', - default=os.environ.get("STORIES_LOCK_PATH"), + default=os.environ.get("STORYBOARD_LOCK_PATH"), help=('Directory to use for lock files.')) ] @@ -78,7 +74,13 @@ class _InterProcessLock(object): self.lockfile = None self.fname = name - def __enter__(self): + def acquire(self): + basedir = os.path.dirname(self.fname) + + if not os.path.exists(basedir): + fileutils.ensure_tree(basedir) + LOG.info(_('Created lock path: %s'), basedir) + self.lockfile = open(self.fname, 'w') while True: @@ -88,22 +90,37 @@ class _InterProcessLock(object): # Also upon reading the MSDN docs for locking(), it seems # to have a laughable 10 attempts "blocking" mechanism. self.trylock() - return self + LOG.debug(_('Got file lock "%s"'), self.fname) + return True except IOError as e: if e.errno in (errno.EACCES, errno.EAGAIN): # external locks synchronise things like iptables # updates - give it some time to prevent busy spinning time.sleep(0.01) else: - raise + raise threading.ThreadError(_("Unable to acquire lock on" + " `%(filename)s` due to" + " %(exception)s") % + { + 'filename': self.fname, + 'exception': e, + }) - def __exit__(self, exc_type, exc_val, exc_tb): + def __enter__(self): + self.acquire() + return self + + def release(self): try: self.unlock() self.lockfile.close() except IOError: LOG.exception(_("Could not release the acquired lock `%s`"), self.fname) + LOG.debug(_('Released file lock "%s"'), self.fname) + + def __exit__(self, exc_type, exc_val, exc_tb): + self.release() def trylock(self): raise NotImplementedError() @@ -139,26 +156,27 @@ _semaphores = weakref.WeakValueDictionary() _semaphores_lock = threading.Lock() -@contextlib.contextmanager -def lock(name, lock_file_prefix=None, external=False, lock_path=None): - """Context based lock +def external_lock(name, lock_file_prefix=None): + with internal_lock(name): + LOG.debug(_('Attempting to grab external lock "%(lock)s"'), + {'lock': name}) - This function yields a `threading.Semaphore` instance (if we don't use - eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is - True, in which case, it'll yield an InterProcessLock instance. + # NOTE(mikal): the lock name cannot contain directory + # separators + name = name.replace(os.sep, '_') + if lock_file_prefix: + sep = '' if lock_file_prefix.endswith('-') else '-' + name = '%s%s%s' % (lock_file_prefix, sep, name) - :param lock_file_prefix: The lock_file_prefix argument is used to provide - lock files on disk with a meaningful prefix. + if not CONF.lock_path: + raise cfg.RequiredOptError('lock_path') - :param external: The external keyword argument denotes whether this lock - should work across multiple processes. This means that if two different - workers both run a a method decorated with @synchronized('mylock', - external=True), only one of them will execute at a time. + lock_file_path = os.path.join(CONF.lock_path, name) - :param lock_path: The lock_path keyword argument is used to specify a - special location for external lock files to live. If nothing is set, then - CONF.lock_path is used as a default. - """ + return InterProcessLock(lock_file_path) + + +def internal_lock(name): with _semaphores_lock: try: sem = _semaphores[name] @@ -166,58 +184,35 @@ def lock(name, lock_file_prefix=None, external=False, lock_path=None): sem = threading.Semaphore() _semaphores[name] = sem - with sem: - LOG.debug(_('Got semaphore "%(lock)s"'), {'lock': name}) - - # NOTE(mikal): I know this looks odd - if not hasattr(local.strong_store, 'locks_held'): - local.strong_store.locks_held = [] - local.strong_store.locks_held.append(name) - - try: - if external and not CONF.disable_process_locking: - LOG.debug(_('Attempting to grab file lock "%(lock)s"'), - {'lock': name}) - - # We need a copy of lock_path because it is non-local - local_lock_path = lock_path or CONF.lock_path - if not local_lock_path: - raise cfg.RequiredOptError('lock_path') - - if not os.path.exists(local_lock_path): - fileutils.ensure_tree(local_lock_path) - LOG.info(_('Created lock path: %s'), local_lock_path) - - def add_prefix(name, prefix): - if not prefix: - return name - sep = '' if prefix.endswith('-') else '-' - return '%s%s%s' % (prefix, sep, name) - - # NOTE(mikal): the lock name cannot contain directory - # separators - lock_file_name = add_prefix(name.replace(os.sep, '_'), - lock_file_prefix) - - lock_file_path = os.path.join(local_lock_path, lock_file_name) - - try: - lock = InterProcessLock(lock_file_path) - with lock as lock: - LOG.debug(_('Got file lock "%(lock)s" at %(path)s'), - {'lock': name, 'path': lock_file_path}) - yield lock - finally: - LOG.debug(_('Released file lock "%(lock)s" at %(path)s'), - {'lock': name, 'path': lock_file_path}) - else: - yield sem - - finally: - local.strong_store.locks_held.remove(name) + LOG.debug(_('Got semaphore "%(lock)s"'), {'lock': name}) + return sem -def synchronized(name, lock_file_prefix=None, external=False, lock_path=None): +@contextlib.contextmanager +def lock(name, lock_file_prefix=None, external=False): + """Context based lock + + This function yields a `threading.Semaphore` instance (if we don't use + eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is + True, in which case, it'll yield an InterProcessLock instance. + + :param lock_file_prefix: The lock_file_prefix argument is used to provide + lock files on disk with a meaningful prefix. + + :param external: The external keyword argument denotes whether this lock + should work across multiple processes. This means that if two different + workers both run a a method decorated with @synchronized('mylock', + external=True), only one of them will execute at a time. + """ + if external and not CONF.disable_process_locking: + lock = external_lock(name, lock_file_prefix) + else: + lock = internal_lock(name) + with lock: + yield lock + + +def synchronized(name, lock_file_prefix=None, external=False): """Synchronization decorator. Decorating a method like so:: @@ -245,7 +240,7 @@ def synchronized(name, lock_file_prefix=None, external=False, lock_path=None): @functools.wraps(f) def inner(*args, **kwargs): try: - with lock(name, lock_file_prefix, external, lock_path): + with lock(name, lock_file_prefix, external): LOG.debug(_('Got semaphore / lock "%(function)s"'), {'function': f.__name__}) return f(*args, **kwargs) @@ -293,7 +288,7 @@ def main(argv): """ lock_dir = tempfile.mkdtemp() - os.environ["STORIES_LOCK_PATH"] = lock_dir + os.environ["STORYBOARD_LOCK_PATH"] = lock_dir try: ret_val = subprocess.call(argv[1:]) finally: diff --git a/storyboard/openstack/common/log.py b/storyboard/openstack/common/log.py index ea5b2ee8..ed3a88db 100644 --- a/storyboard/openstack/common/log.py +++ b/storyboard/openstack/common/log.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2011 OpenStack Foundation. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. @@ -43,7 +41,7 @@ from oslo.config import cfg import six from six import moves -from storyboard.openstack.common.gettextutils import _ # noqa +from storyboard.openstack.common.gettextutils import _ from storyboard.openstack.common import importutils from storyboard.openstack.common import jsonutils from storyboard.openstack.common import local @@ -117,10 +115,21 @@ logging_cli_opts = [ '--log-file paths'), cfg.BoolOpt('use-syslog', default=False, - help='Use syslog for logging.'), + help='Use syslog for logging. ' + 'Existing syslog format is DEPRECATED during I, ' + 'and then will be changed in J to honor RFC5424'), + cfg.BoolOpt('use-syslog-rfc-format', + # TODO(bogdando) remove or use True after existing + # syslog format deprecation in J + default=False, + help='(Optional) Use syslog rfc5424 format for logging. ' + 'If enabled, will add APP-NAME (RFC5424) before the ' + 'MSG part of the syslog message. The old format ' + 'without APP-NAME is deprecated in I, ' + 'and will be removed in J.'), cfg.StrOpt('syslog-log-facility', default='LOG_USER', - help='syslog facility to receive log lines') + help='Syslog facility to receive log lines') ] generic_log_opts = [ @@ -132,38 +141,38 @@ generic_log_opts = [ log_opts = [ cfg.StrOpt('logging_context_format_string', default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' - '%(name)s [%(request_id)s %(user)s %(tenant)s] ' + '%(name)s [%(request_id)s %(user_identity)s] ' '%(instance)s%(message)s', - help='format string to use for log messages with context'), + help='Format string to use for log messages with context'), cfg.StrOpt('logging_default_format_string', default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' '%(name)s [-] %(instance)s%(message)s', - help='format string to use for log messages without context'), + help='Format string to use for log messages without context'), cfg.StrOpt('logging_debug_format_suffix', default='%(funcName)s %(pathname)s:%(lineno)d', - help='data to append to log format when level is DEBUG'), + help='Data to append to log format when level is DEBUG'), cfg.StrOpt('logging_exception_prefix', default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s ' '%(instance)s', - help='prefix each line of exception output with this format'), + help='Prefix each line of exception output with this format'), cfg.ListOpt('default_log_levels', default=[ 'amqp=WARN', 'amqplib=WARN', 'boto=WARN', - 'keystone=INFO', 'qpid=WARN', 'sqlalchemy=WARN', 'suds=INFO', 'iso8601=WARN', + 'requests.packages.urllib3.connectionpool=WARN' ], - help='list of logger=LEVEL pairs'), + help='List of logger=LEVEL pairs'), cfg.BoolOpt('publish_errors', default=False, - help='publish error events'), + help='Publish error events'), cfg.BoolOpt('fatal_deprecations', default=False, - help='make deprecations fatal'), + help='Make deprecations fatal'), # NOTE(mikal): there are two options here because sometimes we are handed # a full instance (and could include more information), and other times we @@ -238,10 +247,11 @@ def mask_password(message, secret="***"): """Replace password with 'secret' in message. :param message: The string which includes security information. - :param secret: value with which to replace passwords, defaults to "***". + :param secret: value with which to replace passwords. :returns: The unicode value of message with the password fields masked. For example: + >>> mask_password("'adminPass' : 'aaaaa'") "'adminPass' : '***'" >>> mask_password("'admin_pass' : 'aaaaa'") @@ -334,10 +344,12 @@ class ContextAdapter(BaseLoggerAdapter): elif instance_uuid: instance_extra = (CONF.instance_uuid_format % {'uuid': instance_uuid}) - extra.update({'instance': instance_extra}) + extra['instance'] = instance_extra - extra.update({"project": self.project}) - extra.update({"version": self.version}) + extra.setdefault('user_identity', kwargs.pop('user_identity', None)) + + extra['project'] = self.project + extra['version'] = self.version extra['extra'] = extra.copy() return msg, kwargs @@ -351,7 +363,7 @@ class JSONFormatter(logging.Formatter): def formatException(self, ei, strip_newlines=True): lines = traceback.format_exception(*ei) if strip_newlines: - lines = [itertools.ifilter( + lines = [moves.filter( lambda x: x, line.rstrip().splitlines()) for line in lines] lines = list(itertools.chain(*lines)) @@ -391,9 +403,11 @@ class JSONFormatter(logging.Formatter): def _create_logging_excepthook(product_name): def logging_excepthook(exc_type, value, tb): extra = {} - if CONF.verbose: + if CONF.verbose or CONF.debug: extra['exc_info'] = (exc_type, value, tb) - getLogger(product_name).critical(str(value), **extra) + getLogger(product_name).critical( + "".join(traceback.format_exception_only(exc_type, value)), + **extra) return logging_excepthook @@ -457,6 +471,17 @@ def _find_facility_from_conf(): return facility +class RFCSysLogHandler(logging.handlers.SysLogHandler): + def __init__(self, *args, **kwargs): + self.binary_name = _get_binary_name() + super(RFCSysLogHandler, self).__init__(*args, **kwargs) + + def format(self, record): + msg = super(RFCSysLogHandler, self).format(record) + msg = self.binary_name + ' ' + msg + return msg + + def _setup_logging_from_conf(): log_root = getLogger(None).logger for handler in log_root.handlers: @@ -464,8 +489,14 @@ def _setup_logging_from_conf(): if CONF.use_syslog: facility = _find_facility_from_conf() - syslog = logging.handlers.SysLogHandler(address='/dev/log', - facility=facility) + # TODO(bogdando) use the format provided by RFCSysLogHandler + # after existing syslog format deprecation in J + if CONF.use_syslog_rfc_format: + syslog = RFCSysLogHandler(address='/dev/log', + facility=facility) + else: + syslog = logging.handlers.SysLogHandler(address='/dev/log', + facility=facility) log_root.addHandler(syslog) logpath = _get_log_file_path() @@ -477,7 +508,7 @@ def _setup_logging_from_conf(): streamlog = ColorHandler() log_root.addHandler(streamlog) - elif not CONF.log_file: + elif not logpath: # pass sys.stdout as a positional argument # python2.6 calls the argument strm, in 2.7 it's stream streamlog = logging.StreamHandler(sys.stdout) @@ -543,7 +574,7 @@ class WritableLogger(object): self.level = level def write(self, msg): - self.logger.log(self.level, msg) + self.logger.log(self.level, msg.rstrip()) class ContextFormatter(logging.Formatter): @@ -561,7 +592,7 @@ class ContextFormatter(logging.Formatter): def format(self, record): """Uses contextstring if request_id is set, otherwise default.""" - # NOTE(sdague): default the fancier formating params + # NOTE(sdague): default the fancier formatting params # to an empty string so we don't throw an exception if # they get used for key in ('instance', 'color'): @@ -577,7 +608,7 @@ class ContextFormatter(logging.Formatter): CONF.logging_debug_format_suffix): self._fmt += " " + CONF.logging_debug_format_suffix - # Cache this on the record, Logger will respect our formated copy + # Cache this on the record, Logger will respect our formatted copy if record.exc_info: record.exc_text = self.formatException(record.exc_info, record) return logging.Formatter.format(self, record) diff --git a/storyboard/openstack/common/processutils.py b/storyboard/openstack/common/processutils.py index d4424759..27abcaa7 100644 --- a/storyboard/openstack/common/processutils.py +++ b/storyboard/openstack/common/processutils.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # @@ -19,6 +17,7 @@ System-level utilities and helper functions. """ +import errno import logging as stdlib_logging import os import random @@ -27,8 +26,9 @@ import signal from eventlet.green import subprocess from eventlet import greenthread +import six -from storyboard.openstack.common.gettextutils import _ # noqa +from storyboard.openstack.common.gettextutils import _ from storyboard.openstack.common import log as logging @@ -55,11 +55,18 @@ class ProcessExecutionError(Exception): self.description = description if description is None: - description = "Unexpected error while running command." + description = _("Unexpected error while running command.") if exit_code is None: exit_code = '-' - message = ("%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r" - % (description, cmd, exit_code, stdout, stderr)) + message = _('%(description)s\n' + 'Command: %(cmd)s\n' + 'Exit code: %(exit_code)s\n' + 'Stdout: %(stdout)r\n' + 'Stderr: %(stderr)r') % {'description': description, + 'cmd': cmd, + 'exit_code': exit_code, + 'stdout': stdout, + 'stderr': stderr} super(ProcessExecutionError, self).__init__(message) @@ -82,7 +89,7 @@ def execute(*cmd, **kwargs): :param cmd: Passed to subprocess.Popen. :type cmd: string :param process_input: Send to opened process. - :type proces_input: string + :type process_input: string :param check_exit_code: Single bool, int, or list of allowed exit codes. Defaults to [0]. Raise :class:`ProcessExecutionError` unless @@ -135,8 +142,8 @@ def execute(*cmd, **kwargs): if run_as_root and hasattr(os, 'geteuid') and os.geteuid() != 0: if not root_helper: raise NoRootWrapSpecified( - message=('Command requested root, but did not specify a root ' - 'helper.')) + message=_('Command requested root, but did not ' + 'specify a root helper.')) cmd = shlex.split(root_helper) + list(cmd) cmd = map(str, cmd) @@ -162,20 +169,28 @@ def execute(*cmd, **kwargs): preexec_fn=preexec_fn, shell=shell) result = None - if process_input is not None: - result = obj.communicate(process_input) - else: - result = obj.communicate() + for _i in six.moves.range(20): + # NOTE(russellb) 20 is an arbitrary number of retries to + # prevent any chance of looping forever here. + try: + if process_input is not None: + result = obj.communicate(process_input) + else: + result = obj.communicate() + except OSError as e: + if e.errno in (errno.EAGAIN, errno.EINTR): + continue + raise + break obj.stdin.close() # pylint: disable=E1101 _returncode = obj.returncode # pylint: disable=E1101 - if _returncode: - LOG.log(loglevel, _('Result was %s') % _returncode) - if not ignore_exit_code and _returncode not in check_exit_code: - (stdout, stderr) = result - raise ProcessExecutionError(exit_code=_returncode, - stdout=stdout, - stderr=stderr, - cmd=' '.join(cmd)) + LOG.log(loglevel, _('Result was %s') % _returncode) + if not ignore_exit_code and _returncode not in check_exit_code: + (stdout, stderr) = result + raise ProcessExecutionError(exit_code=_returncode, + stdout=stdout, + stderr=stderr, + cmd=' '.join(cmd)) return result except ProcessExecutionError: if not attempts: diff --git a/storyboard/openstack/common/py3kcompat/__init__.py b/storyboard/openstack/common/py3kcompat/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/storyboard/openstack/common/py3kcompat/urlutils.py b/storyboard/openstack/common/py3kcompat/urlutils.py new file mode 100644 index 00000000..84e457a4 --- /dev/null +++ b/storyboard/openstack/common/py3kcompat/urlutils.py @@ -0,0 +1,67 @@ +# +# Copyright 2013 Canonical Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +""" +Python2/Python3 compatibility layer for OpenStack +""" + +import six + +if six.PY3: + # python3 + import urllib.error + import urllib.parse + import urllib.request + + urlencode = urllib.parse.urlencode + urljoin = urllib.parse.urljoin + quote = urllib.parse.quote + quote_plus = urllib.parse.quote_plus + parse_qsl = urllib.parse.parse_qsl + unquote = urllib.parse.unquote + unquote_plus = urllib.parse.unquote_plus + urlparse = urllib.parse.urlparse + urlsplit = urllib.parse.urlsplit + urlunsplit = urllib.parse.urlunsplit + SplitResult = urllib.parse.SplitResult + + urlopen = urllib.request.urlopen + URLError = urllib.error.URLError + pathname2url = urllib.request.pathname2url +else: + # python2 + import urllib + import urllib2 + import urlparse + + urlencode = urllib.urlencode + quote = urllib.quote + quote_plus = urllib.quote_plus + unquote = urllib.unquote + unquote_plus = urllib.unquote_plus + + parse = urlparse + parse_qsl = parse.parse_qsl + urljoin = parse.urljoin + urlparse = parse.urlparse + urlsplit = parse.urlsplit + urlunsplit = parse.urlunsplit + SplitResult = parse.SplitResult + + urlopen = urllib2.urlopen + URLError = urllib2.URLError + pathname2url = urllib.pathname2url diff --git a/storyboard/openstack/common/test.py b/storyboard/openstack/common/test.py index b8f9a0ef..1165c9d1 100644 --- a/storyboard/openstack/common/test.py +++ b/storyboard/openstack/common/test.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # @@ -17,11 +15,15 @@ """Common utilities used in testing""" +import logging import os import fixtures import testtools +_TRUE_VALUES = ('True', 'true', '1', 'yes') +_LOG_FORMAT = "%(levelname)8s [%(name)s] %(message)s" + class BaseTestCase(testtools.TestCase): @@ -29,7 +31,7 @@ class BaseTestCase(testtools.TestCase): super(BaseTestCase, self).setUp() self._set_timeout() self._fake_output() - self.useFixture(fixtures.FakeLogger('storyboard.openstack.common')) + self._fake_logs() self.useFixture(fixtures.NestedTempfile()) self.useFixture(fixtures.TempHomeDir()) @@ -44,11 +46,26 @@ class BaseTestCase(testtools.TestCase): self.useFixture(fixtures.Timeout(test_timeout, gentle=True)) def _fake_output(self): - if (os.environ.get('OS_STDOUT_CAPTURE') == 'True' or - os.environ.get('OS_STDOUT_CAPTURE') == '1'): + if os.environ.get('OS_STDOUT_CAPTURE') in _TRUE_VALUES: stdout = self.useFixture(fixtures.StringStream('stdout')).stream self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout)) - if (os.environ.get('OS_STDERR_CAPTURE') == 'True' or - os.environ.get('OS_STDERR_CAPTURE') == '1'): + if os.environ.get('OS_STDERR_CAPTURE') in _TRUE_VALUES: stderr = self.useFixture(fixtures.StringStream('stderr')).stream self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr)) + + def _fake_logs(self): + if os.environ.get('OS_DEBUG') in _TRUE_VALUES: + level = logging.DEBUG + else: + level = logging.INFO + capture_logs = os.environ.get('OS_LOG_CAPTURE') in _TRUE_VALUES + if capture_logs: + self.useFixture( + fixtures.FakeLogger( + format=_LOG_FORMAT, + level=level, + nuke_handlers=capture_logs, + ) + ) + else: + logging.basicConfig(format=_LOG_FORMAT, level=level) diff --git a/storyboard/openstack/common/timeutils.py b/storyboard/openstack/common/timeutils.py index b79ebf37..52688a02 100644 --- a/storyboard/openstack/common/timeutils.py +++ b/storyboard/openstack/common/timeutils.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # @@ -79,6 +77,9 @@ def is_older_than(before, seconds): """Return True if before is older than seconds.""" if isinstance(before, six.string_types): before = parse_strtime(before).replace(tzinfo=None) + else: + before = before.replace(tzinfo=None) + return utcnow() - before > datetime.timedelta(seconds=seconds) @@ -86,6 +87,9 @@ def is_newer_than(after, seconds): """Return True if after is newer than seconds.""" if isinstance(after, six.string_types): after = parse_strtime(after).replace(tzinfo=None) + else: + after = after.replace(tzinfo=None) + return after - utcnow() > datetime.timedelta(seconds=seconds) @@ -110,7 +114,7 @@ def utcnow(): def iso8601_from_timestamp(timestamp): - """Returns a iso8601 formated date from timestamp.""" + """Returns a iso8601 formatted date from timestamp.""" return isotime(datetime.datetime.utcfromtimestamp(timestamp)) @@ -178,6 +182,15 @@ def delta_seconds(before, after): datetime objects (as a float, to microsecond resolution). """ delta = after - before + return total_seconds(delta) + + +def total_seconds(delta): + """Return the total seconds of datetime.timedelta object. + + Compute total seconds of datetime.timedelta, datetime.timedelta + doesn't have method total_seconds in Python2.6, calculate it manually. + """ try: return delta.total_seconds() except AttributeError: @@ -188,8 +201,8 @@ def delta_seconds(before, after): def is_soon(dt, window): """Determines if time is going to happen in the next window seconds. - :params dt: the time - :params window: minimum seconds to remain to consider the time not soon + :param dt: the time + :param window: minimum seconds to remain to consider the time not soon :return: True if expiration is within the given duration """ diff --git a/storyboard/tests/db/migration/test_migrations_base.py b/storyboard/tests/db/migration/test_migrations_base.py index fe4d61dd..bccd436e 100644 --- a/storyboard/tests/db/migration/test_migrations_base.py +++ b/storyboard/tests/db/migration/test_migrations_base.py @@ -44,6 +44,7 @@ from storyboard.tests import base LOG = logging.getLogger(__name__) CONF = cfg.CONF +cfg.set_defaults(lockutils.util_opts, lock_path='/tmp') synchronized = lockutils.synchronized_with_prefix('storyboard-') @@ -240,7 +241,7 @@ class BaseMigrationTestCase(base.TestCase): self.assertEqual('', err, "Failed to run: %s\n%s" % (cmd, output)) - @synchronized('pgadmin', external=True, lock_path='/tmp') + @synchronized('pgadmin', external=True) def _reset_pg(self, conn_pieces): (user, password, database, host) = \ get_pgsql_connection_info(conn_pieces) @@ -264,7 +265,7 @@ class BaseMigrationTestCase(base.TestCase): os.unsetenv('PGPASSWORD') os.unsetenv('PGUSER') - @synchronized('mysql', external=True, lock_path='/tmp') + @synchronized('mysql', external=True) def _reset_mysql(self, conn_pieces): # We can execute the MySQL client to destroy and re-create # the MYSQL database, which is easier and less error-prone @@ -278,7 +279,7 @@ class BaseMigrationTestCase(base.TestCase): 'host': host, 'sql': sql}) self.execute_cmd(cmd) - @synchronized('sqlite', external=True, lock_path='/tmp') + @synchronized('sqlite', external=True) def _reset_sqlite(self, conn_pieces): # We can just delete the SQLite database, which is # the easiest and cleanest solution