Updated oslo

A bunch of things in the oslo changed, including the DB api. This
updates them.

Change-Id: Ibd579375ad0be5f7681fca60e44ca7f8c8a89c8a
This commit is contained in:
Michael Krotscheck 2014-03-05 16:52:52 -08:00
parent 3b7d01c7b6
commit 93b851b719
36 changed files with 1520 additions and 541 deletions

View File

@ -1,7 +1,7 @@
pbr>=0.5.21,<1.0
pbr>=0.6,<1.0
alembic>=0.4.1
Babel>=0.9.6
Babel>=1.3
iso8601>=0.1.8
oauthlib>=0.6
oslo.config>=1.2.0
@ -9,6 +9,6 @@ pecan>=0.2.0
python-openid
PyYAML>=3.1.0
requests>=1.1
six>=1.4.1
six>=1.5.2
SQLAlchemy>=0.8,<=0.8.99
WSME>=0.5b6

View File

@ -20,15 +20,14 @@ from wsme import types as wtypes
from oslo.config import cfg
from sqlalchemy.exc import SADeprecationWarning
from storyboard.db.api import get_session
import storyboard.db.models as sqlalchemy_models
from storyboard.openstack.common.db.sqlalchemy import session as db_session
CONF = cfg.CONF
class _Base(wtypes.Base):
id = int
created_at = datetime
updated_at = datetime
@ -55,7 +54,7 @@ class _Base(wtypes.Base):
@classmethod
def create(cls, session=None, wsme_entry=None):
if not session:
session = db_session.get_session(sqlite_fk=True)
session = get_session()
with session.begin():
db_entry = convert_to_db_model(cls, wsme_entry, session)
session.add(db_entry)
@ -65,12 +64,12 @@ class _Base(wtypes.Base):
@classmethod
def update(cls, key_property_name="id", key_property_value=None,
wsme_entry=None):
db_entry = cls.from_db(**{key_property_name: key_property_value})\
db_entry = cls.from_db(**{key_property_name: key_property_value}) \
.first()
if not db_entry:
return None
session = db_session.get_session(sqlite_fk=True)
session = get_session()
with session.begin():
updated_db_model = update_db_model(cls, db_entry, wsme_entry)
session.add(updated_db_model)
@ -80,17 +79,17 @@ class _Base(wtypes.Base):
@classmethod
def add_item(cls, cont_key_name, cont_key_value, item_cls, item_key_name,
item_key_value, container_name):
session = db_session.get_session(sqlite_fk=True)
session = get_session()
with session.begin():
db_container_enty = cls.from_db(session=session,
**{cont_key_name: cont_key_value})\
db_container_enty = cls \
.from_db(session=session, **{cont_key_name: cont_key_value}) \
.first()
if not db_container_enty:
return None
db_add_item = item_cls.from_db(session=session,
**{item_key_name: item_key_value}).\
first()
db_add_item = item_cls \
.from_db(session=session, **{item_key_name: item_key_value}) \
.first()
if not db_add_item:
return None
@ -114,7 +113,7 @@ class _Base(wtypes.Base):
def from_db(cls, session=None, **kwargs):
model_cls = WSME_TO_SQLALCHEMY[cls]
if not session:
session = db_session.get_session(sqlite_fk=True)
session = get_session()
query = session.query(model_cls)
return query.filter_by(**kwargs)
@ -140,7 +139,7 @@ def convert_to_wsme(cls, entry):
if isinstance(attr._get_datatype(), wtypes.ArrayType):
value = [convert_to_wsme(SQLALCHEMY_TO_WSME[type(item)], item)
for item in value]
for item in value]
setattr(wsme_object, attr_name, value)
return wsme_object
@ -168,7 +167,7 @@ def convert_to_db_model(cls, entry, session):
value = [convert_to_db_model(attr._get_datatype().item_type,
item,
session)
for item in value]
for item in value]
setattr(model_object, attr_name, value)
return model_object

View File

@ -22,9 +22,48 @@ from storyboard.openstack.common.db.sqlalchemy import session as db_session
from storyboard.openstack.common import log
CONF = cfg.CONF
CONF.import_group("database", "storyboard.openstack.common.db.options")
LOG = log.getLogger(__name__)
_FACADE = None
get_session = db_session.get_session
def _get_facade_instance():
"""Generate an instance of the DB Facade.
"""
global _FACADE
if _FACADE is None:
_FACADE = db_session.EngineFacade(
CONF.database.connection,
**dict(CONF.database.iteritems()))
return _FACADE
def _destroy_facade_instance():
"""Destroys the db facade instance currently in use.
"""
global _FACADE
_FACADE = None
def get_engine():
"""Returns the global instance of our database engine.
"""
facade = _get_facade_instance()
return facade.get_engine()
def get_session(autocommit=True, expire_on_commit=False):
"""Returns a database session from our facade.
"""
facade = _get_facade_instance()
return facade.get_session(autocommit=autocommit,
expire_on_commit=expire_on_commit)
def cleanup():
"""Manually clean up our database engine.
"""
_destroy_facade_instance()
def model_query(model, session=None):

View File

@ -17,8 +17,6 @@
SQLAlchemy Models for storing storyboard
"""
import warnings
from oslo.config import cfg
import six.moves.urllib.parse as urlparse
from sqlalchemy.ext import declarative
@ -38,9 +36,6 @@ from sqlalchemy import UnicodeText
from storyboard.openstack.common.db.sqlalchemy import models
# Turn SQLAlchemy warnings into errors
warnings.simplefilter('error')
CONF = cfg.CONF

View File

@ -19,7 +19,7 @@ import yaml
from oslo.config import cfg
from sqlalchemy.exc import SADeprecationWarning
from storyboard.openstack.common.db.sqlalchemy import session as db_session
from storyboard.db import api as db_api
from storyboard.db.models import Project
from storyboard.db.models import ProjectGroup
@ -47,7 +47,7 @@ def do_load_models(filename):
project_groups[group_name].append({"name": project_name,
"description": project_description})
session = db_session.get_session(sqlite_fk=True)
session = db_api.get_session()
with session.begin():
for project_group_name, projects in six.iteritems(project_groups):

View File

@ -0,0 +1,2 @@
import six
six.add_move(six.MovedModule('mox', 'mox', 'mox3.mox'))

View File

@ -0,0 +1,111 @@
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Simple class that stores security context information in the web request.
Projects should subclass this class if they wish to enhance the request
context or provide additional information in their specific WSGI pipeline.
"""
import itertools
import uuid
def generate_request_id():
return 'req-%s' % str(uuid.uuid4())
class RequestContext(object):
"""Helper class to represent useful information about a request context.
Stores information about the security context under which the user
accesses the system, as well as additional request information.
"""
user_idt_format = '{user} {tenant} {domain} {user_domain} {p_domain}'
def __init__(self, auth_token=None, user=None, tenant=None, domain=None,
user_domain=None, project_domain=None, is_admin=False,
read_only=False, show_deleted=False, request_id=None,
instance_uuid=None):
self.auth_token = auth_token
self.user = user
self.tenant = tenant
self.domain = domain
self.user_domain = user_domain
self.project_domain = project_domain
self.is_admin = is_admin
self.read_only = read_only
self.show_deleted = show_deleted
self.instance_uuid = instance_uuid
if not request_id:
request_id = generate_request_id()
self.request_id = request_id
def to_dict(self):
user_idt = (
self.user_idt_format.format(user=self.user or '-',
tenant=self.tenant or '-',
domain=self.domain or '-',
user_domain=self.user_domain or '-',
p_domain=self.project_domain or '-'))
return {'user': self.user,
'tenant': self.tenant,
'domain': self.domain,
'user_domain': self.user_domain,
'project_domain': self.project_domain,
'is_admin': self.is_admin,
'read_only': self.read_only,
'show_deleted': self.show_deleted,
'auth_token': self.auth_token,
'request_id': self.request_id,
'instance_uuid': self.instance_uuid,
'user_identity': user_idt}
def get_admin_context(show_deleted=False):
context = RequestContext(None,
tenant=None,
is_admin=True,
show_deleted=show_deleted)
return context
def get_context_from_function_and_args(function, args, kwargs):
"""Find an arg of type RequestContext and return it.
This is useful in a couple of decorators where we don't
know much about the function we're wrapping.
"""
for arg in itertools.chain(kwargs.values(), args):
if isinstance(arg, RequestContext):
return arg
return None
def is_user_context(context):
"""Indicates if the request context is a normal user."""
if not context:
return False
if context.is_admin:
return False
if not context.user_id or not context.project_id:
return False
return True

View File

@ -15,43 +15,148 @@
"""Multiple DB API backend support.
Supported configuration options:
The following two parameters are in the 'database' group:
`backend`: DB backend name or full module path to DB backend module.
A DB backend module should implement a method named 'get_backend' which
takes no arguments. The method can return any object that implements DB
API methods.
"""
from oslo.config import cfg
import functools
import logging
import threading
import time
from storyboard.openstack.common.db import exception
from storyboard.openstack.common.gettextutils import _LE
from storyboard.openstack.common import importutils
db_opts = [
cfg.StrOpt('backend',
default='sqlalchemy',
deprecated_name='db_backend',
deprecated_group='DEFAULT',
help='The backend to use for db'),
]
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_opts(db_opts, 'database')
def safe_for_db_retry(f):
"""Enable db-retry for decorated function, if config option enabled."""
f.__dict__['enable_retry'] = True
return f
class wrap_db_retry(object):
"""Retry db.api methods, if DBConnectionError() raised
Retry decorated db.api methods. If we enabled `use_db_reconnect`
in config, this decorator will be applied to all db.api functions,
marked with @safe_for_db_retry decorator.
Decorator catchs DBConnectionError() and retries function in a
loop until it succeeds, or until maximum retries count will be reached.
"""
def __init__(self, retry_interval, max_retries, inc_retry_interval,
max_retry_interval):
super(wrap_db_retry, self).__init__()
self.retry_interval = retry_interval
self.max_retries = max_retries
self.inc_retry_interval = inc_retry_interval
self.max_retry_interval = max_retry_interval
def __call__(self, f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
next_interval = self.retry_interval
remaining = self.max_retries
while True:
try:
return f(*args, **kwargs)
except exception.DBConnectionError as e:
if remaining == 0:
LOG.exception(_LE('DB exceeded retry limit.'))
raise exception.DBError(e)
if remaining != -1:
remaining -= 1
LOG.exception(_LE('DB connection error.'))
# NOTE(vsergeyev): We are using patched time module, so
# this effectively yields the execution
# context to another green thread.
time.sleep(next_interval)
if self.inc_retry_interval:
next_interval = min(
next_interval * 2,
self.max_retry_interval
)
return wrapper
class DBAPI(object):
def __init__(self, backend_mapping=None):
if backend_mapping is None:
backend_mapping = {}
backend_name = CONF.database.backend
# Import the untranslated name if we don't have a
# mapping.
backend_path = backend_mapping.get(backend_name, backend_name)
backend_mod = importutils.import_module(backend_path)
self.__backend = backend_mod.get_backend()
def __init__(self, backend_name, backend_mapping=None, lazy=False,
**kwargs):
"""Initialize the chosen DB API backend.
:param backend_name: name of the backend to load
:type backend_name: str
:param backend_mapping: backend name -> module/class to load mapping
:type backend_mapping: dict
:param lazy: load the DB backend lazily on the first DB API method call
:type lazy: bool
Keyword arguments:
:keyword use_db_reconnect: retry DB transactions on disconnect or not
:type use_db_reconnect: bool
:keyword retry_interval: seconds between transaction retries
:type retry_interval: int
:keyword inc_retry_interval: increase retry interval or not
:type inc_retry_interval: bool
:keyword max_retry_interval: max interval value between retries
:type max_retry_interval: int
:keyword max_retries: max number of retries before an error is raised
:type max_retries: int
"""
self._backend = None
self._backend_name = backend_name
self._backend_mapping = backend_mapping or {}
self._lock = threading.Lock()
if not lazy:
self._load_backend()
self.use_db_reconnect = kwargs.get('use_db_reconnect', False)
self.retry_interval = kwargs.get('retry_interval', 1)
self.inc_retry_interval = kwargs.get('inc_retry_interval', True)
self.max_retry_interval = kwargs.get('max_retry_interval', 10)
self.max_retries = kwargs.get('max_retries', 20)
def _load_backend(self):
with self._lock:
if not self._backend:
# Import the untranslated name if we don't have a mapping
backend_path = self._backend_mapping.get(self._backend_name,
self._backend_name)
backend_mod = importutils.import_module(backend_path)
self._backend = backend_mod.get_backend()
def __getattr__(self, key):
return getattr(self.__backend, key)
if not self._backend:
self._load_backend()
attr = getattr(self._backend, key)
if not hasattr(attr, '__call__'):
return attr
# NOTE(vsergeyev): If `use_db_reconnect` option is set to True, retry
# DB API methods, decorated with @safe_for_db_retry
# on disconnect.
if self.use_db_reconnect and hasattr(attr, 'enable_retry'):
attr = wrap_db_retry(
retry_interval=self.retry_interval,
max_retries=self.max_retries,
inc_retry_interval=self.inc_retry_interval,
max_retry_interval=self.max_retry_interval)(attr)
return attr

View File

@ -16,6 +16,8 @@
"""DB related custom exceptions."""
import six
from storyboard.openstack.common.gettextutils import _
@ -23,7 +25,7 @@ class DBError(Exception):
"""Wraps an implementation specific exception."""
def __init__(self, inner_exception=None):
self.inner_exception = inner_exception
super(DBError, self).__init__(str(inner_exception))
super(DBError, self).__init__(six.text_type(inner_exception))
class DBDuplicateEntry(DBError):
@ -46,7 +48,7 @@ class DBInvalidUnicodeParameter(Exception):
class DbMigrationError(DBError):
"""Wraps migration specific exception."""
def __init__(self, message=None):
super(DbMigrationError, self).__init__(str(message))
super(DbMigrationError, self).__init__(message)
class DBConnectionError(DBError):

View File

@ -0,0 +1,171 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from oslo.config import cfg
database_opts = [
cfg.StrOpt('sqlite_db',
deprecated_group='DEFAULT',
default='storyboard.sqlite',
help='The file name to use with SQLite'),
cfg.BoolOpt('sqlite_synchronous',
deprecated_group='DEFAULT',
default=True,
help='If True, SQLite uses synchronous mode'),
cfg.StrOpt('backend',
default='sqlalchemy',
deprecated_name='db_backend',
deprecated_group='DEFAULT',
help='The backend to use for db'),
cfg.StrOpt('connection',
help='The SQLAlchemy connection string used to connect to the '
'database',
secret=True,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_connection',
group='DATABASE'),
cfg.DeprecatedOpt('connection',
group='sql'), ]),
cfg.StrOpt('mysql_sql_mode',
default='TRADITIONAL',
help='The SQL mode to be used for MySQL sessions. '
'This option, including the default, overrides any '
'server-set SQL mode. To use whatever SQL mode '
'is set by the server configuration, '
'set this to no value. Example: mysql_sql_mode='),
cfg.IntOpt('idle_timeout',
default=3600,
deprecated_opts=[cfg.DeprecatedOpt('sql_idle_timeout',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_idle_timeout',
group='DATABASE'),
cfg.DeprecatedOpt('idle_timeout',
group='sql')],
help='Timeout before idle sql connections are reaped'),
cfg.IntOpt('min_pool_size',
default=1,
deprecated_opts=[cfg.DeprecatedOpt('sql_min_pool_size',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_min_pool_size',
group='DATABASE')],
help='Minimum number of SQL connections to keep open in a '
'pool'),
cfg.IntOpt('max_pool_size',
default=None,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_pool_size',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_max_pool_size',
group='DATABASE')],
help='Maximum number of SQL connections to keep open in a '
'pool'),
cfg.IntOpt('max_retries',
default=10,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_retries',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_max_retries',
group='DATABASE')],
help='Maximum db connection retries during startup. '
'(setting -1 implies an infinite retry count)'),
cfg.IntOpt('retry_interval',
default=10,
deprecated_opts=[cfg.DeprecatedOpt('sql_retry_interval',
group='DEFAULT'),
cfg.DeprecatedOpt('reconnect_interval',
group='DATABASE')],
help='Interval between retries of opening a sql connection'),
cfg.IntOpt('max_overflow',
default=None,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_overflow',
group='DEFAULT'),
cfg.DeprecatedOpt('sqlalchemy_max_overflow',
group='DATABASE')],
help='If set, use this value for max_overflow with sqlalchemy'),
cfg.IntOpt('connection_debug',
default=0,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_debug',
group='DEFAULT')],
help='Verbosity of SQL debugging information. 0=None, '
'100=Everything'),
cfg.BoolOpt('connection_trace',
default=False,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_trace',
group='DEFAULT')],
help='Add python stack traces to SQL as comment strings'),
cfg.IntOpt('pool_timeout',
default=None,
deprecated_opts=[cfg.DeprecatedOpt('sqlalchemy_pool_timeout',
group='DATABASE')],
help='If set, use this value for pool_timeout with sqlalchemy'),
cfg.BoolOpt('use_db_reconnect',
default=False,
help='Enable the experimental use of database reconnect '
'on connection lost'),
cfg.IntOpt('db_retry_interval',
default=1,
help='seconds between db connection retries'),
cfg.BoolOpt('db_inc_retry_interval',
default=True,
help='Whether to increase interval between db connection '
'retries, up to db_max_retry_interval'),
cfg.IntOpt('db_max_retry_interval',
default=10,
help='max seconds between db connection retries, if '
'db_inc_retry_interval is enabled'),
cfg.IntOpt('db_max_retries',
default=20,
help='maximum db connection retries before error is raised. '
'(setting -1 implies an infinite retry count)'),
]
CONF = cfg.CONF
CONF.register_opts(database_opts, 'database')
def set_defaults(sql_connection, sqlite_db, max_pool_size=None,
max_overflow=None, pool_timeout=None):
"""Set defaults for configuration variables."""
cfg.set_defaults(database_opts,
connection=sql_connection,
sqlite_db=sqlite_db)
# Update the QueuePool defaults
if max_pool_size is not None:
cfg.set_defaults(database_opts,
max_pool_size=max_pool_size)
if max_overflow is not None:
cfg.set_defaults(database_opts,
max_overflow=max_overflow)
if pool_timeout is not None:
cfg.set_defaults(database_opts,
pool_timeout=pool_timeout)
def list_opts():
"""Returns a list of oslo.config options available in the library.
The returned list includes all oslo.config options which may be registered
at runtime by the library.
Each element of the list is a tuple. The first element is the name of the
group under which the list of elements in the second element will be
registered. A group name of None corresponds to the [DEFAULT] group in
config files.
The purpose of this is to allow tools like the Oslo sample config file
generator to discover the options exposed to users by this library.
:returns: a list of (group_name, opts) tuples
"""
return [('database', copy.deepcopy(database_opts))]

View File

@ -51,13 +51,9 @@ import sqlalchemy
from sqlalchemy.schema import UniqueConstraint
from storyboard.openstack.common.db import exception
from storyboard.openstack.common.db.sqlalchemy import session as db_session
from storyboard.openstack.common.gettextutils import _
get_engine = db_session.get_engine
def _get_unique_constraints(self, table):
"""Retrieve information about existing unique constraints of the table
@ -172,11 +168,12 @@ def patch_migrate():
sqlite.SQLiteConstraintGenerator)
def db_sync(abs_path, version=None, init_version=0):
def db_sync(engine, abs_path, version=None, init_version=0):
"""Upgrade or downgrade a database.
Function runs the upgrade() or downgrade() functions in change scripts.
:param engine: SQLAlchemy engine instance for a given database
:param abs_path: Absolute path to migrate repository.
:param version: Database will upgrade/downgrade until this version.
If None - database will update to the latest
@ -190,18 +187,23 @@ def db_sync(abs_path, version=None, init_version=0):
raise exception.DbMigrationError(
message=_("version should be an integer"))
current_version = db_version(abs_path, init_version)
current_version = db_version(engine, abs_path, init_version)
repository = _find_migrate_repo(abs_path)
_db_schema_sanity_check()
_db_schema_sanity_check(engine)
if version is None or version > current_version:
return versioning_api.upgrade(get_engine(), repository, version)
return versioning_api.upgrade(engine, repository, version)
else:
return versioning_api.downgrade(get_engine(), repository,
return versioning_api.downgrade(engine, repository,
version)
def _db_schema_sanity_check():
engine = get_engine()
def _db_schema_sanity_check(engine):
"""Ensure all database tables were created with required parameters.
:param engine: SQLAlchemy engine instance for a given database
"""
if engine.name == 'mysql':
onlyutf8_sql = ('SELECT TABLE_NAME,TABLE_COLLATION '
'from information_schema.TABLES '
@ -216,23 +218,23 @@ def _db_schema_sanity_check():
) % ','.join(table_names))
def db_version(abs_path, init_version):
def db_version(engine, abs_path, init_version):
"""Show the current version of the repository.
:param engine: SQLAlchemy engine instance for a given database
:param abs_path: Absolute path to migrate repository
:param version: Initial database version
"""
repository = _find_migrate_repo(abs_path)
try:
return versioning_api.db_version(get_engine(), repository)
return versioning_api.db_version(engine, repository)
except versioning_exceptions.DatabaseNotControlledError:
meta = sqlalchemy.MetaData()
engine = get_engine()
meta.reflect(bind=engine)
tables = meta.tables
if len(tables) == 0 or 'alembic_version' in tables:
db_version_control(abs_path, init_version)
return versioning_api.db_version(get_engine(), repository)
db_version_control(engine, abs_path, version=init_version)
return versioning_api.db_version(engine, repository)
else:
raise exception.DbMigrationError(
message=_(
@ -241,17 +243,18 @@ def db_version(abs_path, init_version):
"manually."))
def db_version_control(abs_path, version=None):
def db_version_control(engine, abs_path, version=None):
"""Mark a database as under this repository's version control.
Once a database is under version control, schema changes should
only be done via change scripts in this repository.
:param engine: SQLAlchemy engine instance for a given database
:param abs_path: Absolute path to migrate repository
:param version: Initial database version
"""
repository = _find_migrate_repo(abs_path)
versioning_api.version_control(get_engine(), repository, version)
versioning_api.version_control(engine, repository, version)
return version

View File

@ -26,7 +26,6 @@ from sqlalchemy import Column, Integer
from sqlalchemy import DateTime
from sqlalchemy.orm import object_mapper
from storyboard.openstack.common.db.sqlalchemy import session as sa
from storyboard.openstack.common import timeutils
@ -34,10 +33,9 @@ class ModelBase(object):
"""Base class for models."""
__table_initialized__ = False
def save(self, session=None):
def save(self, session):
"""Save this object."""
if not session:
session = sa.get_session()
# NOTE(boris-42): This part of code should be look like:
# session.add(self)
# session.flush()
@ -110,7 +108,7 @@ class SoftDeleteMixin(object):
deleted_at = Column(DateTime)
deleted = Column(Integer, default=0)
def soft_delete(self, session=None):
def soft_delete(self, session):
"""Mark this object as deleted."""
self.deleted = self.id
self.deleted_at = timeutils.utcnow()

View File

@ -16,33 +16,24 @@
"""Session Handling for SQLAlchemy backend.
Initializing:
* Call set_defaults with the minimal of the following kwargs:
sql_connection, sqlite_db
Example::
session.set_defaults(
sql_connection="sqlite:///var/lib/storyboard/sqlite.db",
sqlite_db="/var/lib/storyboard/sqlite.db")
Recommended ways to use sessions within this framework:
* Don't use them explicitly; this is like running with AUTOCOMMIT=1.
model_query() will implicitly use a session when called without one
* Don't use them explicitly; this is like running with ``AUTOCOMMIT=1``.
`model_query()` will implicitly use a session when called without one
supplied. This is the ideal situation because it will allow queries
to be automatically retried if the database connection is interrupted.
Note: Automatic retry will be enabled in a future patch.
.. note:: Automatic retry will be enabled in a future patch.
It is generally fine to issue several queries in a row like this. Even though
they may be run in separate transactions and/or separate sessions, each one
will see the data from the prior calls. If needed, undo- or rollback-like
functionality should be handled at a logical level. For an example, look at
the code around quotas and reservation_rollback().
the code around quotas and `reservation_rollback()`.
Examples::
Examples:
.. code:: python
def get_foo(context, foo):
return (model_query(context, models.Foo).
@ -61,28 +52,29 @@ Recommended ways to use sessions within this framework:
return foo_ref
* Within the scope of a single method, keeping all the reads and writes within
the context managed by a single session. In this way, the session's __exit__
handler will take care of calling flush() and commit() for you.
If using this approach, you should not explicitly call flush() or commit().
Any error within the context of the session will cause the session to emit
a ROLLBACK. Database Errors like IntegrityError will be raised in
session's __exit__ handler, and any try/except within the context managed
by session will not be triggered. And catching other non-database errors in
the session will not trigger the ROLLBACK, so exception handlers should
always be outside the session, unless the developer wants to do a partial
commit on purpose. If the connection is dropped before this is possible,
the database will implicitly roll back the transaction.
* Within the scope of a single method, keep all the reads and writes within
the context managed by a single session. In this way, the session's
`__exit__` handler will take care of calling `flush()` and `commit()` for
you. If using this approach, you should not explicitly call `flush()` or
`commit()`. Any error within the context of the session will cause the
session to emit a `ROLLBACK`. Database errors like `IntegrityError` will be
raised in `session`'s `__exit__` handler, and any try/except within the
context managed by `session` will not be triggered. And catching other
non-database errors in the session will not trigger the ROLLBACK, so
exception handlers should always be outside the session, unless the
developer wants to do a partial commit on purpose. If the connection is
dropped before this is possible, the database will implicitly roll back the
transaction.
Note: statements in the session scope will not be automatically retried.
.. note:: Statements in the session scope will not be automatically retried.
If you create models within the session, they need to be added, but you
do not need to call model.save()
do not need to call `model.save()`:
::
.. code:: python
def create_many_foo(context, foos):
session = get_session()
session = sessionmaker()
with session.begin():
for foo in foos:
foo_ref = models.Foo()
@ -90,7 +82,7 @@ Recommended ways to use sessions within this framework:
session.add(foo_ref)
def update_bar(context, foo_id, newbar):
session = get_session()
session = sessionmaker()
with session.begin():
foo_ref = (model_query(context, models.Foo, session).
filter_by(id=foo_id).
@ -99,11 +91,16 @@ Recommended ways to use sessions within this framework:
filter_by(id=foo_ref['bar_id']).
update({'bar': newbar}))
Note: update_bar is a trivially simple example of using "with session.begin".
Whereas create_many_foo is a good example of when a transaction is needed,
it is always best to use as few queries as possible. The two queries in
update_bar can be better expressed using a single query which avoids
the need for an explicit transaction. It can be expressed like so::
.. note:: `update_bar` is a trivially simple example of using
``with session.begin``. Whereas `create_many_foo` is a good example of
when a transaction is needed, it is always best to use as few queries as
possible.
The two queries in `update_bar` can be better expressed using a single query
which avoids the need for an explicit transaction. It can be expressed like
so:
.. code:: python
def update_bar(context, foo_id, newbar):
subq = (model_query(context, models.Foo.id).
@ -114,21 +111,25 @@ Recommended ways to use sessions within this framework:
filter_by(id=subq.as_scalar()).
update({'bar': newbar}))
For reference, this emits approximately the following SQL statement::
For reference, this emits approximately the following SQL statement:
.. code:: sql
UPDATE bar SET bar = ${newbar}
WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1);
Note: create_duplicate_foo is a trivially simple example of catching an
exception while using "with session.begin". Here create two duplicate
instances with same primary key, must catch the exception out of context
managed by a single session:
.. note:: `create_duplicate_foo` is a trivially simple example of catching an
exception while using ``with session.begin``. Here create two duplicate
instances with same primary key, must catch the exception out of context
managed by a single session:
.. code:: python
def create_duplicate_foo(context):
foo1 = models.Foo()
foo2 = models.Foo()
foo1.id = foo2.id = 1
session = get_session()
session = sessionmaker()
try:
with session.begin():
session.add(foo1)
@ -138,7 +139,7 @@ Recommended ways to use sessions within this framework:
* Passing an active session between methods. Sessions should only be passed
to private methods. The private method must use a subtransaction; otherwise
SQLAlchemy will throw an error when you call session.begin() on an existing
SQLAlchemy will throw an error when you call `session.begin()` on an existing
transaction. Public methods should not accept a session parameter and should
not be involved in sessions within the caller's scope.
@ -151,10 +152,10 @@ Recommended ways to use sessions within this framework:
becomes less clear in this situation. When this is needed for code clarity,
it should be clearly documented.
::
.. code:: python
def myfunc(foo):
session = get_session()
session = sessionmaker()
with session.begin():
# do some database things
bar = _private_func(foo, session)
@ -162,7 +163,7 @@ Recommended ways to use sessions within this framework:
def _private_func(foo, session=None):
if not session:
session = get_session()
session = sessionmaker()
with session.begin(subtransaction=True):
# do some other database things
return bar
@ -172,13 +173,13 @@ There are some things which it is best to avoid:
* Don't keep a transaction open any longer than necessary.
This means that your "with session.begin()" block should be as short
This means that your ``with session.begin()`` block should be as short
as possible, while still containing all the related calls for that
transaction.
* Avoid "with_lockmode('UPDATE')" when possible.
* Avoid ``with_lockmode('UPDATE')`` when possible.
In MySQL/InnoDB, when a "SELECT ... FOR UPDATE" query does not match
In MySQL/InnoDB, when a ``SELECT ... FOR UPDATE`` query does not match
any rows, it will take a gap-lock. This is a form of write-lock on the
"gap" where no rows exist, and prevents any other writes to that space.
This can effectively prevent any INSERT into a table by locking the gap
@ -189,15 +190,18 @@ There are some things which it is best to avoid:
number of rows matching a query, and if only one row is returned,
then issue the SELECT FOR UPDATE.
The better long-term solution is to use INSERT .. ON DUPLICATE KEY UPDATE.
The better long-term solution is to use
``INSERT .. ON DUPLICATE KEY UPDATE``.
However, this can not be done until the "deleted" columns are removed and
proper UNIQUE constraints are added to the tables.
Enabling soft deletes:
* To use/enable soft-deletes, the SoftDeleteMixin must be added
to your model class. For example::
* To use/enable soft-deletes, the `SoftDeleteMixin` must be added
to your model class. For example:
.. code:: python
class NovaBase(models.SoftDeleteMixin, models.ModelBase):
pass
@ -205,15 +209,16 @@ Enabling soft deletes:
Efficient use of soft deletes:
* There are two possible ways to mark a record as deleted::
* There are two possible ways to mark a record as deleted:
`model.soft_delete()` and `query.soft_delete()`.
model.soft_delete() and query.soft_delete().
The `model.soft_delete()` method works with a single already-fetched entry.
`query.soft_delete()` makes only one db request for all entries that
correspond to the query.
model.soft_delete() method works with single already fetched entry.
query.soft_delete() makes only one db request for all entries that correspond
to query.
* In almost all cases you should use `query.soft_delete()`. Some examples:
* In almost all cases you should use query.soft_delete(). Some examples::
.. code:: python
def soft_delete_bar():
count = model_query(BarModel).find(some_condition).soft_delete()
@ -222,7 +227,7 @@ Efficient use of soft deletes:
def complex_soft_delete_with_synchronization_bar(session=None):
if session is None:
session = get_session()
session = sessionmaker()
with session.begin(subtransactions=True):
count = (model_query(BarModel).
find(some_condition).
@ -232,24 +237,26 @@ Efficient use of soft deletes:
if count == 0:
raise Exception("0 entries were soft deleted")
* There is only one situation where model.soft_delete() is appropriate: when
* There is only one situation where `model.soft_delete()` is appropriate: when
you fetch a single record, work with it, and mark it as deleted in the same
transaction.
::
.. code:: python
def soft_delete_bar_model():
session = get_session()
session = sessionmaker()
with session.begin():
bar_ref = model_query(BarModel).find(some_condition).first()
# Work with bar_ref
bar_ref.soft_delete(session=session)
However, if you need to work with all entries that correspond to query and
then soft delete them you should use query.soft_delete() method::
then soft delete them you should use the `query.soft_delete()` method:
.. code:: python
def soft_delete_multi_models():
session = get_session()
session = sessionmaker()
with session.begin():
query = (model_query(BarModel, session=session).
find(some_condition))
@ -260,22 +267,22 @@ Efficient use of soft deletes:
# session and these entries are not used after this.
When working with many rows, it is very important to use query.soft_delete,
which issues a single query. Using model.soft_delete(), as in the following
which issues a single query. Using `model.soft_delete()`, as in the following
example, is very inefficient.
::
.. code:: python
for bar_ref in bar_refs:
bar_ref.soft_delete(session=session)
# This will produce count(bar_refs) db requests.
"""
import functools
import os.path
import logging
import re
import time
from oslo.config import cfg
import six
from sqlalchemy import exc as sqla_exc
from sqlalchemy.interfaces import PoolListener
@ -284,151 +291,12 @@ from sqlalchemy.pool import NullPool, StaticPool
from sqlalchemy.sql.expression import literal_column
from storyboard.openstack.common.db import exception
from storyboard.openstack.common.gettextutils import _
from storyboard.openstack.common import log as logging
from storyboard.openstack.common.gettextutils import _LE, _LW, _LI
from storyboard.openstack.common import timeutils
sqlite_db_opts = [
cfg.StrOpt('sqlite_db',
default='storyboard.sqlite',
help='The file name to use with SQLite'),
cfg.BoolOpt('sqlite_synchronous',
default=True,
help='If True, SQLite uses synchronous mode'),
]
database_opts = [
cfg.StrOpt('connection',
default='sqlite:///' +
os.path.abspath(os.path.join(os.path.dirname(__file__),
'../', '$sqlite_db')),
help='The SQLAlchemy connection string used to connect to the '
'database',
secret=True,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_connection',
group='DATABASE'),
cfg.DeprecatedOpt('connection',
group='sql'), ]),
cfg.StrOpt('slave_connection',
default='',
secret=True,
help='The SQLAlchemy connection string used to connect to the '
'slave database'),
cfg.IntOpt('idle_timeout',
default=3600,
deprecated_opts=[cfg.DeprecatedOpt('sql_idle_timeout',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_idle_timeout',
group='DATABASE'),
cfg.DeprecatedOpt('idle_timeout',
group='sql')],
help='Timeout before idle sql connections are reaped'),
cfg.IntOpt('min_pool_size',
default=1,
deprecated_opts=[cfg.DeprecatedOpt('sql_min_pool_size',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_min_pool_size',
group='DATABASE')],
help='Minimum number of SQL connections to keep open in a '
'pool'),
cfg.IntOpt('max_pool_size',
default=None,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_pool_size',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_max_pool_size',
group='DATABASE')],
help='Maximum number of SQL connections to keep open in a '
'pool'),
cfg.IntOpt('max_retries',
default=10,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_retries',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_max_retries',
group='DATABASE')],
help='Maximum db connection retries during startup. '
'(setting -1 implies an infinite retry count)'),
cfg.IntOpt('retry_interval',
default=10,
deprecated_opts=[cfg.DeprecatedOpt('sql_retry_interval',
group='DEFAULT'),
cfg.DeprecatedOpt('reconnect_interval',
group='DATABASE')],
help='Interval between retries of opening a sql connection'),
cfg.IntOpt('max_overflow',
default=None,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_overflow',
group='DEFAULT'),
cfg.DeprecatedOpt('sqlalchemy_max_overflow',
group='DATABASE')],
help='If set, use this value for max_overflow with sqlalchemy'),
cfg.IntOpt('connection_debug',
default=0,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_debug',
group='DEFAULT')],
help='Verbosity of SQL debugging information. 0=None, '
'100=Everything'),
cfg.BoolOpt('connection_trace',
default=False,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_trace',
group='DEFAULT')],
help='Add python stack traces to SQL as comment strings'),
cfg.IntOpt('pool_timeout',
default=None,
deprecated_opts=[cfg.DeprecatedOpt('sqlalchemy_pool_timeout',
group='DATABASE')],
help='If set, use this value for pool_timeout with sqlalchemy'),
]
CONF = cfg.CONF
CONF.register_opts(sqlite_db_opts)
CONF.register_opts(database_opts, 'database')
LOG = logging.getLogger(__name__)
_ENGINE = None
_MAKER = None
_SLAVE_ENGINE = None
_SLAVE_MAKER = None
def set_defaults(sql_connection, sqlite_db, max_pool_size=None,
max_overflow=None, pool_timeout=None):
"""Set defaults for configuration variables."""
cfg.set_defaults(database_opts,
connection=sql_connection)
cfg.set_defaults(sqlite_db_opts,
sqlite_db=sqlite_db)
# Update the QueuePool defaults
if max_pool_size is not None:
cfg.set_defaults(database_opts,
max_pool_size=max_pool_size)
if max_overflow is not None:
cfg.set_defaults(database_opts,
max_overflow=max_overflow)
if pool_timeout is not None:
cfg.set_defaults(database_opts,
pool_timeout=pool_timeout)
def cleanup():
global _ENGINE, _MAKER
global _SLAVE_ENGINE, _SLAVE_MAKER
if _MAKER:
_MAKER.close_all()
_MAKER = None
if _ENGINE:
_ENGINE.dispose()
_ENGINE = None
if _SLAVE_MAKER:
_SLAVE_MAKER.close_all()
_SLAVE_MAKER = None
if _SLAVE_ENGINE:
_SLAVE_ENGINE.dispose()
_SLAVE_ENGINE = None
class SqliteForeignKeysListener(PoolListener):
"""Ensures that the foreign key constraints are enforced in SQLite.
@ -441,30 +309,6 @@ class SqliteForeignKeysListener(PoolListener):
dbapi_con.execute('pragma foreign_keys=ON')
def get_session(autocommit=True, expire_on_commit=False, sqlite_fk=False,
slave_session=False, mysql_traditional_mode=False):
"""Return a SQLAlchemy session."""
global _MAKER
global _SLAVE_MAKER
maker = _MAKER
if slave_session:
maker = _SLAVE_MAKER
if maker is None:
engine = get_engine(sqlite_fk=sqlite_fk, slave_engine=slave_session,
mysql_traditional_mode=mysql_traditional_mode)
maker = get_maker(engine, autocommit, expire_on_commit)
if slave_session:
_SLAVE_MAKER = maker
else:
_MAKER = maker
session = maker()
return session
# note(boris-42): In current versions of DB backends unique constraint
# violation messages follow the structure:
#
@ -473,9 +317,9 @@ def get_session(autocommit=True, expire_on_commit=False, sqlite_fk=False,
# N columns - (IntegrityError) column c1, c2, ..., N are not unique
#
# sqlite since 3.7.16:
# 1 column - (IntegrityError) UNIQUE constraint failed: k1
# 1 column - (IntegrityError) UNIQUE constraint failed: tbl.k1
#
# N columns - (IntegrityError) UNIQUE constraint failed: k1, k2
# N columns - (IntegrityError) UNIQUE constraint failed: tbl.k1, tbl.k2
#
# postgres:
# 1 column - (IntegrityError) duplicate key value violates unique
@ -488,11 +332,20 @@ def get_session(autocommit=True, expire_on_commit=False, sqlite_fk=False,
# 'c1'")
# N columns - (IntegrityError) (1062, "Duplicate entry 'values joined
# with -' for key 'name_of_our_constraint'")
#
# ibm_db_sa:
# N columns - (IntegrityError) SQL0803N One or more values in the INSERT
# statement, UPDATE statement, or foreign key update caused by a
# DELETE statement are not valid because the primary key, unique
# constraint or unique index identified by "2" constrains table
# "NOVA.KEY_PAIRS" from having duplicate values for the index
# key.
_DUP_KEY_RE_DB = {
"sqlite": (re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"),
re.compile(r"^.*UNIQUE\s+constraint\s+failed:\s+(.+)$")),
"postgresql": (re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"),),
"mysql": (re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$"),)
"mysql": (re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$"),),
"ibm_db_sa": (re.compile(r"^.*SQL0803N.*$"),),
}
@ -514,7 +367,7 @@ def _raise_if_duplicate_entry_error(integrity_error, engine_name):
return [columns]
return columns[len(uniqbase):].split("0")[1:]
if engine_name not in ["mysql", "sqlite", "postgresql"]:
if engine_name not in ["ibm_db_sa", "mysql", "sqlite", "postgresql"]:
return
# FIXME(johannes): The usage of the .message attribute has been
@ -529,10 +382,15 @@ def _raise_if_duplicate_entry_error(integrity_error, engine_name):
else:
return
columns = match.group(1)
# NOTE(mriedem): The ibm_db_sa integrity error message doesn't provide the
# columns so we have to omit that from the DBDuplicateEntry error.
columns = ''
if engine_name != 'ibm_db_sa':
columns = match.group(1)
if engine_name == "sqlite":
columns = columns.strip().split(", ")
columns = [c.split('.')[-1] for c in columns.strip().split(", ")]
else:
columns = get_columns_from_uniq_cons_or_name(columns)
raise exception.DBDuplicateEntry(columns, integrity_error)
@ -570,57 +428,39 @@ def _raise_if_deadlock_error(operational_error, engine_name):
def _wrap_db_error(f):
#TODO(rpodolyaka): in a subsequent commit make this a class decorator to
# ensure it can only applied to Session subclasses instances (as we use
# Session instance bind attribute below)
@functools.wraps(f)
def _wrap(*args, **kwargs):
def _wrap(self, *args, **kwargs):
try:
return f(*args, **kwargs)
return f(self, *args, **kwargs)
except UnicodeEncodeError:
raise exception.DBInvalidUnicodeParameter()
# note(boris-42): We should catch unique constraint violation and
# wrap it by our own DBDuplicateEntry exception. Unique constraint
# violation is wrapped by IntegrityError.
except sqla_exc.OperationalError as e:
_raise_if_deadlock_error(e, get_engine().name)
_raise_if_db_connection_lost(e, self.bind)
_raise_if_deadlock_error(e, self.bind.dialect.name)
# NOTE(comstud): A lot of code is checking for OperationalError
# so let's not wrap it for now.
raise
# note(boris-42): We should catch unique constraint violation and
# wrap it by our own DBDuplicateEntry exception. Unique constraint
# violation is wrapped by IntegrityError.
except sqla_exc.IntegrityError as e:
# note(boris-42): SqlAlchemy doesn't unify errors from different
# DBs so we must do this. Also in some tables (for example
# instance_types) there are more than one unique constraint. This
# means we should get names of columns, which values violate
# unique constraint, from error message.
_raise_if_duplicate_entry_error(e, get_engine().name)
_raise_if_duplicate_entry_error(e, self.bind.dialect.name)
raise exception.DBError(e)
except Exception as e:
LOG.exception(_('DB exception wrapped.'))
LOG.exception(_LE('DB exception wrapped.'))
raise exception.DBError(e)
return _wrap
def get_engine(sqlite_fk=False, slave_engine=False,
mysql_traditional_mode=False):
"""Return a SQLAlchemy engine."""
global _ENGINE
global _SLAVE_ENGINE
engine = _ENGINE
db_uri = CONF.database.connection
if slave_engine:
engine = _SLAVE_ENGINE
db_uri = CONF.database.slave_connection
if engine is None:
engine = create_engine(db_uri, sqlite_fk=sqlite_fk,
mysql_traditional_mode=mysql_traditional_mode)
if slave_engine:
_SLAVE_ENGINE = engine
else:
_ENGINE = engine
return engine
def _synchronous_switch_listener(dbapi_conn, connection_rec):
"""Switch sqlite connections to non-synchronous mode."""
dbapi_conn.execute("PRAGMA synchronous = OFF")
@ -662,7 +502,7 @@ def _ping_listener(engine, dbapi_conn, connection_rec, connection_proxy):
cursor.execute(ping_sql)
except Exception as ex:
if engine.dialect.is_disconnect(ex, dbapi_conn, cursor):
msg = _('Database server has gone away: %s') % ex
msg = _LW('Database server has gone away: %s') % ex
LOG.warning(msg)
raise sqla_exc.DisconnectionError(msg)
else:
@ -677,7 +517,44 @@ def _set_mode_traditional(dbapi_con, connection_rec, connection_proxy):
than a declared field just with warning. That is fraught with data
corruption.
"""
dbapi_con.cursor().execute("SET SESSION sql_mode = TRADITIONAL;")
_set_session_sql_mode(dbapi_con, connection_rec,
connection_proxy, 'TRADITIONAL')
def _set_session_sql_mode(dbapi_con, connection_rec,
connection_proxy, sql_mode=None):
"""Set the sql_mode session variable.
MySQL supports several server modes. The default is None, but sessions
may choose to enable server modes like TRADITIONAL, ANSI,
several STRICT_* modes and others.
Note: passing in '' (empty string) for sql_mode clears
the SQL mode for the session, overriding a potentially set
server default. Passing in None (the default) makes this
a no-op, meaning if a server-side SQL mode is set, it still applies.
"""
cursor = dbapi_con.cursor()
if sql_mode is not None:
cursor.execute("SET SESSION sql_mode = %s", [sql_mode])
# Check against the real effective SQL mode. Even when unset by
# our own config, the server may still be operating in a specific
# SQL mode as set by the server configuration
cursor.execute("SHOW VARIABLES LIKE 'sql_mode'")
row = cursor.fetchone()
if row is None:
LOG.warning(_LW('Unable to detect effective SQL mode'))
return
realmode = row[1]
LOG.info(_LI('MySQL server mode set to %s') % realmode)
# 'TRADITIONAL' mode enables several other modes, so
# we need a substring match here
if not ('TRADITIONAL' in realmode.upper() or
'STRICT_ALL_TABLES' in realmode.upper()):
LOG.warning(_LW("MySQL SQL mode is '%s', "
"consider enabling TRADITIONAL or STRICT_ALL_TABLES")
% realmode)
def _is_db_connection_error(args):
@ -685,73 +562,86 @@ def _is_db_connection_error(args):
# NOTE(adam_g): This is currently MySQL specific and needs to be extended
# to support Postgres and others.
# For the db2, the error code is -30081 since the db2 is still not ready
conn_err_codes = ('2002', '2003', '2006', '-30081')
conn_err_codes = ('2002', '2003', '2006', '2013', '-30081')
for err_code in conn_err_codes:
if args.find(err_code) != -1:
return True
return False
def create_engine(sql_connection, sqlite_fk=False,
mysql_traditional_mode=False):
def _raise_if_db_connection_lost(error, engine):
# NOTE(vsergeyev): Function is_disconnect(e, connection, cursor)
# requires connection and cursor in incoming parameters,
# but we have no possibility to create connection if DB
# is not available, so in such case reconnect fails.
# But is_disconnect() ignores these parameters, so it
# makes sense to pass to function None as placeholder
# instead of connection and cursor.
if engine.dialect.is_disconnect(error, None, None):
raise exception.DBConnectionError(error)
def create_engine(sql_connection, sqlite_fk=False, mysql_sql_mode=None,
mysql_traditional_mode=False, idle_timeout=3600,
connection_debug=0, max_pool_size=None, max_overflow=None,
pool_timeout=None, sqlite_synchronous=True,
connection_trace=False, max_retries=10, retry_interval=10):
"""Return a new SQLAlchemy engine."""
# NOTE(geekinutah): At this point we could be connecting to the normal
# db handle or the slave db handle. Things like
# _wrap_db_error aren't going to work well if their
# backends don't match. Let's check.
_assert_matching_drivers()
connection_dict = sqlalchemy.engine.url.make_url(sql_connection)
engine_args = {
"pool_recycle": CONF.database.idle_timeout,
"echo": False,
"pool_recycle": idle_timeout,
'convert_unicode': True,
}
# Map our SQL debug level to SQLAlchemy's options
if CONF.database.connection_debug >= 100:
engine_args['echo'] = 'debug'
elif CONF.database.connection_debug >= 50:
engine_args['echo'] = True
logger = logging.getLogger('sqlalchemy.engine')
# Map SQL debug level to Python log level
if connection_debug >= 100:
logger.setLevel(logging.DEBUG)
elif connection_debug >= 50:
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.WARNING)
if "sqlite" in connection_dict.drivername:
if sqlite_fk:
engine_args["listeners"] = [SqliteForeignKeysListener()]
engine_args["poolclass"] = NullPool
if CONF.database.connection == "sqlite://":
if sql_connection == "sqlite://":
engine_args["poolclass"] = StaticPool
engine_args["connect_args"] = {'check_same_thread': False}
else:
if CONF.database.max_pool_size is not None:
engine_args['pool_size'] = CONF.database.max_pool_size
if CONF.database.max_overflow is not None:
engine_args['max_overflow'] = CONF.database.max_overflow
if CONF.database.pool_timeout is not None:
engine_args['pool_timeout'] = CONF.database.pool_timeout
if max_pool_size is not None:
engine_args['pool_size'] = max_pool_size
if max_overflow is not None:
engine_args['max_overflow'] = max_overflow
if pool_timeout is not None:
engine_args['pool_timeout'] = pool_timeout
engine = sqlalchemy.create_engine(sql_connection, **engine_args)
sqlalchemy.event.listen(engine, 'checkin', _thread_yield)
if engine.name in ['mysql', 'ibm_db_sa']:
callback = functools.partial(_ping_listener, engine)
sqlalchemy.event.listen(engine, 'checkout', callback)
if mysql_traditional_mode:
sqlalchemy.event.listen(engine, 'checkout', _set_mode_traditional)
else:
LOG.warning(_("This application has not enabled MySQL traditional"
" mode, which means silent data corruption may"
" occur. Please encourage the application"
" developers to enable this mode."))
ping_callback = functools.partial(_ping_listener, engine)
sqlalchemy.event.listen(engine, 'checkout', ping_callback)
if engine.name == 'mysql':
if mysql_traditional_mode:
mysql_sql_mode = 'TRADITIONAL'
if mysql_sql_mode:
mode_callback = functools.partial(_set_session_sql_mode,
sql_mode=mysql_sql_mode)
sqlalchemy.event.listen(engine, 'checkout', mode_callback)
elif 'sqlite' in connection_dict.drivername:
if not CONF.sqlite_synchronous:
if not sqlite_synchronous:
sqlalchemy.event.listen(engine, 'connect',
_synchronous_switch_listener)
sqlalchemy.event.listen(engine, 'connect', _add_regexp_listener)
if (CONF.database.connection_trace and
engine.dialect.dbapi.__name__ == 'MySQLdb'):
if connection_trace and engine.dialect.dbapi.__name__ == 'MySQLdb':
_patch_mysqldb_with_stacktrace_comments()
try:
@ -760,15 +650,15 @@ def create_engine(sql_connection, sqlite_fk=False,
if not _is_db_connection_error(e.args[0]):
raise
remaining = CONF.database.max_retries
remaining = max_retries
if remaining == -1:
remaining = 'infinite'
while True:
msg = _('SQL connection failed. %s attempts left.')
msg = _LW('SQL connection failed. %s attempts left.')
LOG.warning(msg % remaining)
if remaining != 'infinite':
remaining -= 1
time.sleep(CONF.database.retry_interval)
time.sleep(retry_interval)
try:
engine.connect()
break
@ -855,13 +745,149 @@ def _patch_mysqldb_with_stacktrace_comments():
setattr(MySQLdb.cursors.BaseCursor, '_do_query', _do_query)
def _assert_matching_drivers():
"""Make sure slave handle and normal handle have the same driver."""
# NOTE(geekinutah): There's no use case for writing to one backend and
# reading from another. Who knows what the future holds?
if CONF.database.slave_connection == '':
return
class EngineFacade(object):
"""A helper class for removing of global engine instances from storyboard.db.
normal = sqlalchemy.engine.url.make_url(CONF.database.connection)
slave = sqlalchemy.engine.url.make_url(CONF.database.slave_connection)
assert normal.drivername == slave.drivername
As a library, storyboard.db can't decide where to store/when to create engine
and sessionmaker instances, so this must be left for a target application.
On the other hand, in order to simplify the adoption of storyboard.db changes,
we'll provide a helper class, which creates engine and sessionmaker
on its instantiation and provides get_engine()/get_session() methods
that are compatible with corresponding utility functions that currently
exist in target projects, e.g. in Nova.
engine/sessionmaker instances will still be global (and they are meant to
be global), but they will be stored in the app context, rather that in the
storyboard.db context.
Note: using of this helper is completely optional and you are encouraged to
integrate engine/sessionmaker instances into your apps any way you like
(e.g. one might want to bind a session to a request context). Two important
things to remember:
1. An Engine instance is effectively a pool of DB connections, so it's
meant to be shared (and it's thread-safe).
2. A Session instance is not meant to be shared and represents a DB
transactional context (i.e. it's not thread-safe). sessionmaker is
a factory of sessions.
"""
def __init__(self, sql_connection,
sqlite_fk=False, mysql_sql_mode=None,
autocommit=True, expire_on_commit=False, **kwargs):
"""Initialize engine and sessionmaker instances.
:param sqlite_fk: enable foreign keys in SQLite
:type sqlite_fk: bool
:param mysql_sql_mode: set SQL mode in MySQL
:type mysql_sql_mode: string
:param autocommit: use autocommit mode for created Session instances
:type autocommit: bool
:param expire_on_commit: expire session objects on commit
:type expire_on_commit: bool
Keyword arguments:
:keyword idle_timeout: timeout before idle sql connections are reaped
(defaults to 3600)
:keyword connection_debug: verbosity of SQL debugging information.
0=None, 100=Everything (defaults to 0)
:keyword max_pool_size: maximum number of SQL connections to keep open
in a pool (defaults to SQLAlchemy settings)
:keyword max_overflow: if set, use this value for max_overflow with
sqlalchemy (defaults to SQLAlchemy settings)
:keyword pool_timeout: if set, use this value for pool_timeout with
sqlalchemy (defaults to SQLAlchemy settings)
:keyword sqlite_synchronous: if True, SQLite uses synchronous mode
(defaults to True)
:keyword connection_trace: add python stack traces to SQL as comment
strings (defaults to False)
:keyword max_retries: maximum db connection retries during startup.
(setting -1 implies an infinite retry count)
(defaults to 10)
:keyword retry_interval: interval between retries of opening a sql
connection (defaults to 10)
"""
super(EngineFacade, self).__init__()
self._engine = create_engine(
sql_connection=sql_connection,
sqlite_fk=sqlite_fk,
mysql_sql_mode=mysql_sql_mode,
idle_timeout=kwargs.get('idle_timeout', 3600),
connection_debug=kwargs.get('connection_debug', 0),
max_pool_size=kwargs.get('max_pool_size'),
max_overflow=kwargs.get('max_overflow'),
pool_timeout=kwargs.get('pool_timeout'),
sqlite_synchronous=kwargs.get('sqlite_synchronous', True),
connection_trace=kwargs.get('connection_trace', False),
max_retries=kwargs.get('max_retries', 10),
retry_interval=kwargs.get('retry_interval', 10))
self._session_maker = get_maker(
engine=self._engine,
autocommit=autocommit,
expire_on_commit=expire_on_commit)
def get_engine(self):
"""Get the engine instance (note, that it's shared)."""
return self._engine
def get_session(self, **kwargs):
"""Get a Session instance.
If passed, keyword arguments values override the ones used when the
sessionmaker instance was created.
:keyword autocommit: use autocommit mode for created Session instances
:type autocommit: bool
:keyword expire_on_commit: expire session objects on commit
:type expire_on_commit: bool
"""
for arg in kwargs:
if arg not in ('autocommit', 'expire_on_commit'):
del kwargs[arg]
return self._session_maker(**kwargs)
@classmethod
def from_config(cls, connection_string, conf,
sqlite_fk=False, mysql_sql_mode=None,
autocommit=True, expire_on_commit=False):
"""Initialize EngineFacade using oslo.config config instance options.
:param connection_string: SQLAlchemy connection string
:type connection_string: string
:param conf: oslo.config config instance
:type conf: oslo.config.cfg.ConfigOpts
:param sqlite_fk: enable foreign keys in SQLite
:type sqlite_fk: bool
:param mysql_sql_mode: set SQL mode in MySQL
:type mysql_sql_mode: string
:param autocommit: use autocommit mode for created Session instances
:type autocommit: bool
:param expire_on_commit: expire session objects on commit
:type expire_on_commit: bool
"""
return cls(sql_connection=connection_string,
sqlite_fk=sqlite_fk,
mysql_sql_mode=mysql_sql_mode,
autocommit=autocommit,
expire_on_commit=expire_on_commit,
**dict(conf.database.items()))

View File

@ -0,0 +1,149 @@
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import functools
import os
import fixtures
import six
from storyboard.openstack.common.db.sqlalchemy import session
from storyboard.openstack.common.db.sqlalchemy import utils
from storyboard.openstack.common import test
class DbFixture(fixtures.Fixture):
"""Basic database fixture.
Allows to run tests on various db backends, such as SQLite, MySQL and
PostgreSQL. By default use sqlite backend. To override default backend
uri set env variable OS_TEST_DBAPI_CONNECTION with database admin
credentials for specific backend.
"""
def _get_uri(self):
return os.getenv('OS_TEST_DBAPI_CONNECTION', 'sqlite://')
def __init__(self, test):
super(DbFixture, self).__init__()
self.test = test
def setUp(self):
super(DbFixture, self).setUp()
self.test.engine = session.create_engine(self._get_uri())
self.test.sessionmaker = session.get_maker(self.test.engine)
self.addCleanup(self.test.engine.dispose)
class DbTestCase(test.BaseTestCase):
"""Base class for testing of DB code.
Using `DbFixture`. Intended to be the main database test case to use all
the tests on a given backend with user defined uri. Backend specific
tests should be decorated with `backend_specific` decorator.
"""
FIXTURE = DbFixture
def setUp(self):
super(DbTestCase, self).setUp()
self.useFixture(self.FIXTURE(self))
ALLOWED_DIALECTS = ['sqlite', 'mysql', 'postgresql']
def backend_specific(*dialects):
"""Decorator to skip backend specific tests on inappropriate engines.
::dialects: list of dialects names under which the test will be launched.
"""
def wrap(f):
@functools.wraps(f)
def ins_wrap(self):
if not set(dialects).issubset(ALLOWED_DIALECTS):
raise ValueError(
"Please use allowed dialects: %s" % ALLOWED_DIALECTS)
if self.engine.name not in dialects:
msg = ('The test "%s" can be run '
'only on %s. Current engine is %s.')
args = (f.__name__, ' '.join(dialects), self.engine.name)
self.skip(msg % args)
else:
return f(self)
return ins_wrap
return wrap
@six.add_metaclass(abc.ABCMeta)
class OpportunisticFixture(DbFixture):
"""Base fixture to use default CI databases.
The databases exist in OpenStack CI infrastructure. But for the
correct functioning in local environment the databases must be
created manually.
"""
DRIVER = abc.abstractproperty(lambda: None)
DBNAME = PASSWORD = USERNAME = 'openstack_citest'
def _get_uri(self):
return utils.get_connect_string(backend=self.DRIVER,
user=self.USERNAME,
passwd=self.PASSWORD,
database=self.DBNAME)
@six.add_metaclass(abc.ABCMeta)
class OpportunisticTestCase(DbTestCase):
"""Base test case to use default CI databases.
The subclasses of the test case are running only when openstack_citest
database is available otherwise a tests will be skipped.
"""
FIXTURE = abc.abstractproperty(lambda: None)
def setUp(self):
credentials = {
'backend': self.FIXTURE.DRIVER,
'user': self.FIXTURE.USERNAME,
'passwd': self.FIXTURE.PASSWORD,
'database': self.FIXTURE.DBNAME}
if self.FIXTURE.DRIVER and not utils.is_backend_avail(**credentials):
msg = '%s backend is not available.' % self.FIXTURE.DRIVER
return self.skip(msg)
super(OpportunisticTestCase, self).setUp()
class MySQLOpportunisticFixture(OpportunisticFixture):
DRIVER = 'mysql'
class PostgreSQLOpportunisticFixture(OpportunisticFixture):
DRIVER = 'postgresql'
class MySQLOpportunisticTestCase(OpportunisticTestCase):
FIXTURE = MySQLOpportunisticFixture
class PostgreSQLOpportunisticTestCase(OpportunisticTestCase):
FIXTURE = PostgreSQLOpportunisticFixture

View File

@ -15,18 +15,18 @@
# under the License.
import functools
import logging
import os
import subprocess
import lockfile
from six import moves
from six.moves.urllib import parse
import sqlalchemy
import sqlalchemy.exc
from storyboard.openstack.common.db.sqlalchemy import utils
from storyboard.openstack.common.gettextutils import _
from storyboard.openstack.common import log as logging
from storyboard.openstack.common.py3kcompat import urlutils
from storyboard.openstack.common.gettextutils import _LE
from storyboard.openstack.common import test
LOG = logging.getLogger(__name__)
@ -35,14 +35,20 @@ LOG = logging.getLogger(__name__)
def _have_mysql(user, passwd, database):
present = os.environ.get('TEST_MYSQL_PRESENT')
if present is None:
return utils.is_backend_avail('mysql', user, passwd, database)
return utils.is_backend_avail(backend='mysql',
user=user,
passwd=passwd,
database=database)
return present.lower() in ('', 'true')
def _have_postgresql(user, passwd, database):
present = os.environ.get('TEST_POSTGRESQL_PRESENT')
if present is None:
return utils.is_backend_avail('postgres', user, passwd, database)
return utils.is_backend_avail(backend='postgres',
user=user,
passwd=passwd,
database=database)
return present.lower() in ('', 'true')
@ -54,10 +60,10 @@ def _set_db_lock(lock_path=None, lock_prefix=None):
path = lock_path or os.environ.get("STORYBOARD_LOCK_PATH")
lock = lockfile.FileLock(os.path.join(path, lock_prefix))
with lock:
LOG.debug(_('Got lock "%s"') % f.__name__)
LOG.debug('Got lock "%s"' % f.__name__)
return f(*args, **kwargs)
finally:
LOG.debug(_('Lock released "%s"') % f.__name__)
LOG.debug('Lock released "%s"' % f.__name__)
return wrapper
return decorator
@ -147,7 +153,7 @@ class BaseMigrationTestCase(test.BaseTestCase):
def _reset_databases(self):
for key, engine in self.engines.items():
conn_string = self.test_databases[key]
conn_pieces = urlutils.urlparse(conn_string)
conn_pieces = parse.urlparse(conn_string)
engine.dispose()
if conn_string.startswith('sqlite'):
# We can just delete the SQLite database, which is
@ -258,6 +264,6 @@ class WalkVersionsMixin(object):
if check:
check(engine, data)
except Exception:
LOG.error("Failed to migrate to version %s on engine %s" %
LOG.error(_LE("Failed to migrate to version %s on engine %s") %
(version, engine))
raise

View File

@ -16,6 +16,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import logging
import re
from migrate.changeset import UniqueConstraint
@ -29,6 +30,7 @@ from sqlalchemy import func
from sqlalchemy import Index
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import or_
from sqlalchemy.sql.expression import literal_column
from sqlalchemy.sql.expression import UpdateBase
from sqlalchemy.sql import select
@ -36,9 +38,9 @@ from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy.types import NullType
from storyboard.openstack.common.gettextutils import _
from storyboard.openstack.common import log as logging
from storyboard.openstack.common import context as request_context
from storyboard.openstack.common.db.sqlalchemy import models
from storyboard.openstack.common.gettextutils import _, _LI, _LW
from storyboard.openstack.common import timeutils
@ -94,7 +96,7 @@ def paginate_query(query, model, limit, sort_keys, marker=None,
if 'id' not in sort_keys:
# TODO(justinsb): If this ever gives a false-positive, check
# the actual primary key, rather than assuming its id
LOG.warning(_('Id not in sort_keys; is sort_keys unique?'))
LOG.warning(_LW('Id not in sort_keys; is sort_keys unique?'))
assert(not (sort_dir and sort_dirs))
@ -157,6 +159,94 @@ def paginate_query(query, model, limit, sort_keys, marker=None,
return query
def _read_deleted_filter(query, db_model, read_deleted):
if 'deleted' not in db_model.__table__.columns:
raise ValueError(_("There is no `deleted` column in `%s` table. "
"Project doesn't use soft-deleted feature.")
% db_model.__name__)
default_deleted_value = db_model.__table__.c.deleted.default.arg
if read_deleted == 'no':
query = query.filter(db_model.deleted == default_deleted_value)
elif read_deleted == 'yes':
pass # omit the filter to include deleted and active
elif read_deleted == 'only':
query = query.filter(db_model.deleted != default_deleted_value)
else:
raise ValueError(_("Unrecognized read_deleted value '%s'")
% read_deleted)
return query
def _project_filter(query, db_model, context, project_only):
if project_only and 'project_id' not in db_model.__table__.columns:
raise ValueError(_("There is no `project_id` column in `%s` table.")
% db_model.__name__)
if request_context.is_user_context(context) and project_only:
if project_only == 'allow_none':
is_none = None
query = query.filter(or_(db_model.project_id == context.project_id,
db_model.project_id == is_none))
else:
query = query.filter(db_model.project_id == context.project_id)
return query
def model_query(context, model, session, args=None, project_only=False,
read_deleted=None):
"""Query helper that accounts for context's `read_deleted` field.
:param context: context to query under
:param model: Model to query. Must be a subclass of ModelBase.
:type model: models.ModelBase
:param session: The session to use.
:type session: sqlalchemy.orm.session.Session
:param args: Arguments to query. If None - model is used.
:type args: tuple
:param project_only: If present and context is user-type, then restrict
query to match the context's project_id. If set to
'allow_none', restriction includes project_id = None.
:type project_only: bool
:param read_deleted: If present, overrides context's read_deleted field.
:type read_deleted: bool
Usage:
result = (utils.model_query(context, models.Instance, session=session)
.filter_by(uuid=instance_uuid)
.all())
query = utils.model_query(
context, Node,
session=session,
args=(func.count(Node.id), func.sum(Node.ram))
).filter_by(project_id=project_id)
"""
if not read_deleted:
if hasattr(context, 'read_deleted'):
# NOTE(viktors): some projects use `read_deleted` attribute in
# their contexts instead of `show_deleted`.
read_deleted = context.read_deleted
else:
read_deleted = context.show_deleted
if not issubclass(model, models.ModelBase):
raise TypeError(_("model should be a subclass of ModelBase"))
query = session.query(model) if not args else session.query(*args)
query = _read_deleted_filter(query, model, read_deleted)
query = _project_filter(query, model, context, project_only)
return query
def get_table(engine, name):
"""Returns an sqlalchemy table dynamically from db.
@ -277,8 +367,8 @@ def drop_old_duplicate_entries_from_table(migrate_engine, table_name,
rows_to_delete_select = select([table.c.id]).where(delete_condition)
for row in migrate_engine.execute(rows_to_delete_select).fetchall():
LOG.info(_("Deleting duplicated row with id: %(id)s from table: "
"%(table)s") % dict(id=row[0], table=table_name))
LOG.info(_LI("Deleting duplicated row with id: %(id)s from table: "
"%(table)s") % dict(id=row[0], table=table_name))
if use_soft_delete:
delete_statement = table.update().\
@ -499,27 +589,29 @@ def _change_deleted_column_type_to_id_type_sqlite(migrate_engine, table_name,
execute()
def get_connect_string(backend, user, passwd, database):
def get_connect_string(backend, database, user=None, passwd=None):
"""Get database connection
Try to get a connection with a very specific set of values, if we get
these then we'll run the tests, otherwise they are skipped
"""
if backend == "postgres":
backend = "postgresql+psycopg2"
elif backend == "mysql":
backend = "mysql+mysqldb"
args = {'backend': backend,
'user': user,
'passwd': passwd,
'database': database}
if backend == 'sqlite':
template = '%(backend)s:///%(database)s'
else:
raise Exception("Unrecognized backend: '%s'" % backend)
return ("%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s"
% {'backend': backend, 'user': user, 'passwd': passwd,
'database': database})
template = "%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s"
return template % args
def is_backend_avail(backend, user, passwd, database):
def is_backend_avail(backend, database, user=None, passwd=None):
try:
connect_uri = get_connect_string(backend, user, passwd, database)
connect_uri = get_connect_string(backend=backend,
database=database,
user=user,
passwd=passwd)
engine = sqlalchemy.create_engine(connect_uri)
connection = engine.connect()
except Exception:

View File

@ -24,7 +24,7 @@ import traceback
import six
from storyboard.openstack.common.gettextutils import _
from storyboard.openstack.common.gettextutils import _LE
class save_and_reraise_exception(object):
@ -59,7 +59,7 @@ class save_and_reraise_exception(object):
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
logging.error(_('Original exception being dropped: %s'),
logging.error(_LE('Original exception being dropped: %s'),
traceback.format_exception(self.type_,
self.value,
self.tb))
@ -88,8 +88,8 @@ def forever_retry_uncaught_exceptions(infunc):
if (cur_time - last_log_time > 60 or
this_exc_message != last_exc_message):
logging.exception(
_('Unexpected exception occurred %d time(s)... '
'retrying.') % exc_count)
_LE('Unexpected exception occurred %d time(s)... '
'retrying.') % exc_count)
last_log_time = cur_time
last_exc_message = this_exc_message
exc_count = 0

View File

@ -19,7 +19,6 @@ import os
import tempfile
from storyboard.openstack.common import excutils
from storyboard.openstack.common.gettextutils import _
from storyboard.openstack.common import log as logging
LOG = logging.getLogger(__name__)
@ -59,7 +58,7 @@ def read_cached_file(filename, force_reload=False):
cache_info = _FILE_CACHE.setdefault(filename, {})
if not cache_info or mtime > cache_info.get('mtime', 0):
LOG.debug(_("Reloading cached file %s") % filename)
LOG.debug("Reloading cached file %s" % filename)
with open(filename) as fap:
cache_info['data'] = fap.read()
cache_info['mtime'] = mtime

View File

@ -21,16 +21,10 @@ import six
class Config(fixtures.Fixture):
"""Override some configuration values.
"""Allows overriding configuration settings for the test.
The keyword arguments are the names of configuration options to
override and their values.
`conf` will be reset on cleanup.
If a group argument is supplied, the overrides are applied to
the specified configuration option group.
All overrides are automatically cleared at the end of the current
test by the reset() method, which is registered by addCleanup().
"""
def __init__(self, conf=cfg.CONF):
@ -38,9 +32,54 @@ class Config(fixtures.Fixture):
def setUp(self):
super(Config, self).setUp()
# NOTE(morganfainberg): unregister must be added to cleanup before
# reset is because cleanup works in reverse order of registered items,
# and a reset must occur before unregistering options can occur.
self.addCleanup(self._unregister_config_opts)
self.addCleanup(self.conf.reset)
self._registered_config_opts = {}
def config(self, **kw):
"""Override configuration values.
The keyword arguments are the names of configuration options to
override and their values.
If a `group` argument is supplied, the overrides are applied to
the specified configuration option group, otherwise the overrides
are applied to the ``default`` group.
"""
group = kw.pop('group', None)
for k, v in six.iteritems(kw):
self.conf.set_override(k, v, group)
def _unregister_config_opts(self):
for group in self._registered_config_opts:
self.conf.unregister_opts(self._registered_config_opts[group],
group=group)
def register_opt(self, opt, group=None):
"""Register a single option for the test run.
Options registered in this manner will automatically be unregistered
during cleanup.
If a `group` argument is supplied, it will register the new option
to that group, otherwise the option is registered to the ``default``
group.
"""
self.conf.register_opt(opt, group=group)
self._registered_config_opts.setdefault(group, set()).add(opt)
def register_opts(self, opts, group=None):
"""Register multiple options for the test run.
This works in the same manner as register_opt() but takes a list of
options as the first argument. All arguments will be registered to the
same group if the ``group`` argument is supplied, otherwise all options
will be registered to the ``default`` group.
"""
for opt in opts:
self.register_opt(opt, group=group)

View File

@ -48,4 +48,4 @@ class LockFixture(fixtures.Fixture):
def setUp(self):
super(LockFixture, self).setUp()
self.addCleanup(self.mgr.__exit__, None, None, None)
self.mgr.__enter__()
self.lock = self.mgr.__enter__()

View File

@ -0,0 +1,34 @@
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
def get_logging_handle_error_fixture():
"""returns a fixture to make logging raise formatting exceptions.
Usage:
self.useFixture(logging.get_logging_handle_error_fixture())
"""
return fixtures.MonkeyPatch('logging.Handler.handleError',
_handleError)
def _handleError(self, record):
"""Monkey patch for logging.Handler.handleError.
The default handleError just logs the error to stderr but we want
the option of actually raising an exception.
"""
raise

View File

@ -15,6 +15,17 @@
# License for the specific language governing permissions and limitations
# under the License.
##############################################################################
##############################################################################
##
## DO NOT MODIFY THIS FILE
##
## This file is being graduated to the storyboardtest library. Please make all
## changes there, and only backport critical fixes here. - dhellmann
##
##############################################################################
##############################################################################
import fixtures
import mock

View File

@ -15,8 +15,19 @@
# License for the specific language governing permissions and limitations
# under the License.
##############################################################################
##############################################################################
##
## DO NOT MODIFY THIS FILE
##
## This file is being graduated to the storyboardtest library. Please make all
## changes there, and only backport critical fixes here. - dhellmann
##
##############################################################################
##############################################################################
import fixtures
import mox
from six.moves import mox
class MoxStubout(fixtures.Fixture):

View File

@ -23,6 +23,7 @@ Usual usage in an openstack.common module:
"""
import copy
import functools
import gettext
import locale
from logging import handlers
@ -35,6 +36,17 @@ import six
_localedir = os.environ.get('storyboard'.upper() + '_LOCALEDIR')
_t = gettext.translation('storyboard', localedir=_localedir, fallback=True)
# We use separate translation catalogs for each log level, so set up a
# mapping between the log level name and the translator. The domain
# for the log level is project_name + "-log-" + log_level so messages
# for each level end up in their own catalog.
_t_log_levels = dict(
(level, gettext.translation('storyboard' + '-log-' + level,
localedir=_localedir,
fallback=True))
for level in ['info', 'warning', 'error', 'critical']
)
_AVAILABLE_LANGUAGES = {}
USE_LAZY = False
@ -60,6 +72,28 @@ def _(msg):
return _t.ugettext(msg)
def _log_translation(msg, level):
"""Build a single translation of a log message
"""
if USE_LAZY:
return Message(msg, domain='storyboard' + '-log-' + level)
else:
translator = _t_log_levels[level]
if six.PY3:
return translator.gettext(msg)
return translator.ugettext(msg)
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = functools.partial(_log_translation, level='info')
_LW = functools.partial(_log_translation, level='warning')
_LE = functools.partial(_log_translation, level='error')
_LC = functools.partial(_log_translation, level='critical')
def install(domain, lazy=False):
"""Install a _() function using the given translation domain.

View File

@ -58,6 +58,13 @@ def import_module(import_str):
return sys.modules[import_str]
def import_versioned_module(version, submodule=None):
module = 'storyboard.v%s' % version
if submodule:
module = '.'.join((module, submodule))
return import_module(module)
def try_import(import_str, default=None):
"""Try to import a module and if it fails return default."""
try:

View File

@ -36,17 +36,9 @@ import functools
import inspect
import itertools
import json
try:
import xmlrpclib
except ImportError:
# NOTE(jaypipes): xmlrpclib was renamed to xmlrpc.client in Python3
# however the function and object call signatures
# remained the same. This whole try/except block should
# be removed and replaced with a call to six.moves once
# six 1.4.2 is released. See http://bit.ly/1bqrVzu
import xmlrpc.client as xmlrpclib
import six
import six.moves.xmlrpc_client as xmlrpclib
from storyboard.openstack.common import gettextutils
from storyboard.openstack.common import importutils

View File

@ -15,6 +15,7 @@
import contextlib
import errno
import fcntl
import functools
import os
import shutil
@ -28,7 +29,7 @@ import weakref
from oslo.config import cfg
from storyboard.openstack.common import fileutils
from storyboard.openstack.common.gettextutils import _
from storyboard.openstack.common.gettextutils import _, _LE, _LI
from storyboard.openstack.common import log as logging
@ -52,7 +53,7 @@ def set_defaults(lock_path):
cfg.set_defaults(util_opts, lock_path=lock_path)
class _InterProcessLock(object):
class _FileLock(object):
"""Lock implementation which allows multiple locks, working around
issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
not require any cleanup. Since the lock is always held on a file
@ -79,7 +80,7 @@ class _InterProcessLock(object):
if not os.path.exists(basedir):
fileutils.ensure_tree(basedir)
LOG.info(_('Created lock path: %s'), basedir)
LOG.info(_LI('Created lock path: %s'), basedir)
self.lockfile = open(self.fname, 'w')
@ -90,7 +91,7 @@ class _InterProcessLock(object):
# Also upon reading the MSDN docs for locking(), it seems
# to have a laughable 10 attempts "blocking" mechanism.
self.trylock()
LOG.debug(_('Got file lock "%s"'), self.fname)
LOG.debug('Got file lock "%s"', self.fname)
return True
except IOError as e:
if e.errno in (errno.EACCES, errno.EAGAIN):
@ -114,14 +115,17 @@ class _InterProcessLock(object):
try:
self.unlock()
self.lockfile.close()
LOG.debug('Released file lock "%s"', self.fname)
except IOError:
LOG.exception(_("Could not release the acquired lock `%s`"),
LOG.exception(_LE("Could not release the acquired lock `%s`"),
self.fname)
LOG.debug(_('Released file lock "%s"'), self.fname)
def __exit__(self, exc_type, exc_val, exc_tb):
self.release()
def exists(self):
return os.path.exists(self.fname)
def trylock(self):
raise NotImplementedError()
@ -129,7 +133,7 @@ class _InterProcessLock(object):
raise NotImplementedError()
class _WindowsLock(_InterProcessLock):
class _WindowsLock(_FileLock):
def trylock(self):
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1)
@ -137,7 +141,7 @@ class _WindowsLock(_InterProcessLock):
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1)
class _PosixLock(_InterProcessLock):
class _FcntlLock(_FileLock):
def trylock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
@ -145,35 +149,106 @@ class _PosixLock(_InterProcessLock):
fcntl.lockf(self.lockfile, fcntl.LOCK_UN)
class _PosixLock(object):
def __init__(self, name):
# Hash the name because it's not valid to have POSIX semaphore
# names with things like / in them. Then use base64 to encode
# the digest() instead taking the hexdigest() because the
# result is shorter and most systems can't have shm sempahore
# names longer than 31 characters.
h = hashlib.sha1()
h.update(name.encode('ascii'))
self.name = str((b'/' + base64.urlsafe_b64encode(
h.digest())).decode('ascii'))
def acquire(self, timeout=None):
self.semaphore = posix_ipc.Semaphore(self.name,
flags=posix_ipc.O_CREAT,
initial_value=1)
self.semaphore.acquire(timeout)
return self
def __enter__(self):
self.acquire()
return self
def release(self):
self.semaphore.release()
self.semaphore.close()
def __exit__(self, exc_type, exc_val, exc_tb):
self.release()
def exists(self):
try:
semaphore = posix_ipc.Semaphore(self.name)
except posix_ipc.ExistentialError:
return False
else:
semaphore.close()
return True
if os.name == 'nt':
import msvcrt
InterProcessLock = _WindowsLock
FileLock = _WindowsLock
else:
import fcntl
import base64
import hashlib
import posix_ipc
InterProcessLock = _PosixLock
FileLock = _FcntlLock
_semaphores = weakref.WeakValueDictionary()
_semaphores_lock = threading.Lock()
def external_lock(name, lock_file_prefix=None):
with internal_lock(name):
LOG.debug(_('Attempting to grab external lock "%(lock)s"'),
{'lock': name})
def _get_lock_path(name, lock_file_prefix, lock_path=None):
# NOTE(mikal): the lock name cannot contain directory
# separators
name = name.replace(os.sep, '_')
if lock_file_prefix:
sep = '' if lock_file_prefix.endswith('-') else '-'
name = '%s%s%s' % (lock_file_prefix, sep, name)
# NOTE(mikal): the lock name cannot contain directory
# separators
name = name.replace(os.sep, '_')
if lock_file_prefix:
sep = '' if lock_file_prefix.endswith('-') else '-'
name = '%s%s%s' % (lock_file_prefix, sep, name)
local_lock_path = lock_path or CONF.lock_path
if not CONF.lock_path:
if not local_lock_path:
# NOTE(bnemec): Create a fake lock path for posix locks so we don't
# unnecessarily raise the RequiredOptError below.
if InterProcessLock is not _PosixLock:
raise cfg.RequiredOptError('lock_path')
local_lock_path = 'posixlock:/'
lock_file_path = os.path.join(CONF.lock_path, name)
return os.path.join(local_lock_path, name)
return InterProcessLock(lock_file_path)
def external_lock(name, lock_file_prefix=None, lock_path=None):
LOG.debug('Attempting to grab external lock "%(lock)s"',
{'lock': name})
lock_file_path = _get_lock_path(name, lock_file_prefix, lock_path)
# NOTE(bnemec): If an explicit lock_path was passed to us then it
# means the caller is relying on file-based locking behavior, so
# we can't use posix locks for those calls.
if lock_path:
return FileLock(lock_file_path)
return InterProcessLock(lock_file_path)
def remove_external_lock_file(name, lock_file_prefix=None):
"""Remove a external lock file when it's not used anymore
This will be helpful when we have a lot of lock files
"""
with internal_lock(name):
lock_file_path = _get_lock_path(name, lock_file_prefix)
try:
os.remove(lock_file_path)
except OSError:
LOG.info(_LI('Failed to remove file %(file)s'),
{'file': lock_file_path})
def internal_lock(name):
@ -184,12 +259,12 @@ def internal_lock(name):
sem = threading.Semaphore()
_semaphores[name] = sem
LOG.debug(_('Got semaphore "%(lock)s"'), {'lock': name})
LOG.debug('Got semaphore "%(lock)s"', {'lock': name})
return sem
@contextlib.contextmanager
def lock(name, lock_file_prefix=None, external=False):
def lock(name, lock_file_prefix=None, external=False, lock_path=None):
"""Context based lock
This function yields a `threading.Semaphore` instance (if we don't use
@ -204,15 +279,17 @@ def lock(name, lock_file_prefix=None, external=False):
workers both run a a method decorated with @synchronized('mylock',
external=True), only one of them will execute at a time.
"""
if external and not CONF.disable_process_locking:
lock = external_lock(name, lock_file_prefix)
else:
lock = internal_lock(name)
with lock:
yield lock
int_lock = internal_lock(name)
with int_lock:
if external and not CONF.disable_process_locking:
ext_lock = external_lock(name, lock_file_prefix, lock_path)
with ext_lock:
yield ext_lock
else:
yield int_lock
def synchronized(name, lock_file_prefix=None, external=False):
def synchronized(name, lock_file_prefix=None, external=False, lock_path=None):
"""Synchronization decorator.
Decorating a method like so::
@ -240,12 +317,12 @@ def synchronized(name, lock_file_prefix=None, external=False):
@functools.wraps(f)
def inner(*args, **kwargs):
try:
with lock(name, lock_file_prefix, external):
LOG.debug(_('Got semaphore / lock "%(function)s"'),
with lock(name, lock_file_prefix, external, lock_path):
LOG.debug('Got semaphore / lock "%(function)s"',
{'function': f.__name__})
return f(*args, **kwargs)
finally:
LOG.debug(_('Semaphore / lock released "%(function)s"'),
LOG.debug('Semaphore / lock released "%(function)s"',
{'function': f.__name__})
return inner
return wrap

View File

@ -15,7 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
"""Openstack logging handler.
"""OpenStack logging handler.
This module adds to logging functionality by adding the option to specify
a context object when calling the various log methods. If the context object
@ -304,18 +304,39 @@ class ContextAdapter(BaseLoggerAdapter):
self.logger = logger
self.project = project_name
self.version = version_string
self._deprecated_messages_sent = dict()
@property
def handlers(self):
return self.logger.handlers
def deprecated(self, msg, *args, **kwargs):
"""Call this method when a deprecated feature is used.
If the system is configured for fatal deprecations then the message
is logged at the 'critical' level and :class:`DeprecatedConfig` will
be raised.
Otherwise, the message will be logged (once) at the 'warn' level.
:raises: :class:`DeprecatedConfig` if the system is configured for
fatal deprecations.
"""
stdmsg = _("Deprecated: %s") % msg
if CONF.fatal_deprecations:
self.critical(stdmsg, *args, **kwargs)
raise DeprecatedConfig(msg=stdmsg)
else:
self.warn(stdmsg, *args, **kwargs)
# Using a list because a tuple with dict can't be stored in a set.
sent_args = self._deprecated_messages_sent.setdefault(msg, list())
if args in sent_args:
# Already logged this message, so don't log it again.
return
sent_args.append(args)
self.warn(stdmsg, *args, **kwargs)
def process(self, msg, kwargs):
# NOTE(mrodden): catch any Message/other object and
@ -336,7 +357,7 @@ class ContextAdapter(BaseLoggerAdapter):
extra.update(_dictify_context(context))
instance = kwargs.pop('instance', None)
instance_uuid = (extra.get('instance_uuid', None) or
instance_uuid = (extra.get('instance_uuid') or
kwargs.pop('instance_uuid', None))
instance_extra = ''
if instance:
@ -432,12 +453,12 @@ def _load_log_config(log_config_append):
raise LogConfigError(log_config_append, str(exc))
def setup(product_name):
def setup(product_name, version='unknown'):
"""Setup logging."""
if CONF.log_config_append:
_load_log_config(CONF.log_config_append)
else:
_setup_logging_from_conf()
_setup_logging_from_conf(product_name, version)
sys.excepthook = _create_logging_excepthook(product_name)
@ -482,7 +503,7 @@ class RFCSysLogHandler(logging.handlers.SysLogHandler):
return msg
def _setup_logging_from_conf():
def _setup_logging_from_conf(project, version):
log_root = getLogger(None).logger
for handler in log_root.handlers:
log_root.removeHandler(handler)
@ -530,9 +551,16 @@ def _setup_logging_from_conf():
log_root.info('Deprecated: log_format is now deprecated and will '
'be removed in the next release')
else:
handler.setFormatter(ContextFormatter(datefmt=datefmt))
handler.setFormatter(ContextFormatter(project=project,
version=version,
datefmt=datefmt))
log_root.setLevel(logging.DEBUG)
if CONF.debug:
log_root.setLevel(logging.DEBUG)
elif CONF.verbose:
log_root.setLevel(logging.INFO)
else:
log_root.setLevel(logging.WARNING)
for pair in CONF.default_log_levels:
mod, _sep, level_name = pair.partition('=')
@ -583,10 +611,42 @@ class ContextFormatter(logging.Formatter):
For information about what variables are available for the formatter see:
http://docs.python.org/library/logging.html#formatter
If available, uses the context value stored in TLS - local.store.context
"""
def __init__(self, *args, **kwargs):
"""Initialize ContextFormatter instance
Takes additional keyword arguments which can be used in the message
format string.
:keyword project: project name
:type project: string
:keyword version: project version
:type version: string
"""
self.project = kwargs.pop('project', 'unknown')
self.version = kwargs.pop('version', 'unknown')
logging.Formatter.__init__(self, *args, **kwargs)
def format(self, record):
"""Uses contextstring if request_id is set, otherwise default."""
# store project info
record.project = self.project
record.version = self.version
# store request info
context = getattr(local.store, 'context', None)
if context:
d = _dictify_context(context)
for k, v in d.items():
setattr(record, k, v)
# NOTE(sdague): default the fancier formatting params
# to an empty string so we don't throw an exception if
# they get used
@ -594,7 +654,7 @@ class ContextFormatter(logging.Formatter):
if key not in record.__dict__:
record.__dict__[key] = ''
if record.__dict__.get('request_id', None):
if record.__dict__.get('request_id'):
self._fmt = CONF.logging_context_format_string
else:
self._fmt = CONF.logging_default_format_string

View File

@ -151,7 +151,8 @@ def execute(*cmd, **kwargs):
while attempts > 0:
attempts -= 1
try:
LOG.log(loglevel, _('Running cmd (subprocess): %s'), ' '.join(cmd))
LOG.log(loglevel, 'Running cmd (subprocess): %s',
' '.join(cmd))
_PIPE = subprocess.PIPE # pylint: disable=E1101
if os.name == 'nt':
@ -184,7 +185,7 @@ def execute(*cmd, **kwargs):
break
obj.stdin.close() # pylint: disable=E1101
_returncode = obj.returncode # pylint: disable=E1101
LOG.log(loglevel, _('Result was %s') % _returncode)
LOG.log(loglevel, 'Result was %s' % _returncode)
if not ignore_exit_code and _returncode not in check_exit_code:
(stdout, stderr) = result
raise ProcessExecutionError(exit_code=_returncode,
@ -196,7 +197,7 @@ def execute(*cmd, **kwargs):
if not attempts:
raise
else:
LOG.log(loglevel, _('%r failed. Retrying.'), cmd)
LOG.log(loglevel, '%r failed. Retrying.', cmd)
if delay_on_retry:
greenthread.sleep(random.randint(20, 200) / 100.0)
finally:
@ -235,7 +236,7 @@ def trycmd(*args, **kwargs):
def ssh_execute(ssh, cmd, process_input=None,
addl_env=None, check_exit_code=True):
LOG.debug(_('Running cmd (SSH): %s'), cmd)
LOG.debug('Running cmd (SSH): %s', cmd)
if addl_env:
raise InvalidArgumentError(_('Environment not supported over SSH'))
@ -256,7 +257,7 @@ def ssh_execute(ssh, cmd, process_input=None,
# exit_status == -1 if no exit code was returned
if exit_status != -1:
LOG.debug(_('Result was %s') % exit_status)
LOG.debug('Result was %s' % exit_status)
if check_exit_code and exit_status != 0:
raise ProcessExecutionError(exit_code=exit_status,
stdout=stdout,

View File

@ -13,10 +13,22 @@
# License for the specific language governing permissions and limitations
# under the License.
##############################################################################
##############################################################################
##
## DO NOT MODIFY THIS FILE
##
## This file is being graduated to the storyboardtest library. Please make all
## changes there, and only backport critical fixes here. - dhellmann
##
##############################################################################
##############################################################################
"""Common utilities used in testing"""
import logging
import os
import tempfile
import fixtures
import testtools
@ -34,6 +46,7 @@ class BaseTestCase(testtools.TestCase):
self._fake_logs()
self.useFixture(fixtures.NestedTempfile())
self.useFixture(fixtures.TempHomeDir())
self.tempdirs = []
def _set_timeout(self):
test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
@ -69,3 +82,18 @@ class BaseTestCase(testtools.TestCase):
)
else:
logging.basicConfig(format=_LOG_FORMAT, level=level)
def create_tempfiles(self, files, ext='.conf'):
tempfiles = []
for (basename, contents) in files:
if not os.path.isabs(basename):
(fd, path) = tempfile.mkstemp(prefix=basename, suffix=ext)
else:
path = basename + ext
fd = os.open(path, os.O_CREAT | os.O_WRONLY)
tempfiles.append(path)
try:
os.write(fd, contents)
finally:
os.close(fd)
return tempfiles

View File

@ -36,19 +36,6 @@ _TRUE_VALUES = ('true', '1', 'yes')
_DB_CACHE = None
test_opts = [
cfg.StrOpt('sqlite_clean_db',
default='clean.sqlite',
help='File name of clean sqlite db')]
CONF.register_opts(test_opts)
CONF.import_opt('connection',
'storyboard.openstack.common.db.sqlalchemy.session',
group='database')
CONF.import_opt('sqlite_db',
'storyboard.openstack.common.db.sqlalchemy.session')
logging.setup('storyboard')

View File

@ -13,8 +13,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import eventlet
eventlet.monkey_patch(os=False)
eventlet.monkey_patch(os=False)
import os
import warnings
@ -24,7 +24,7 @@ from alembic import config as alembic_config
import fixtures
from oslo.config import cfg
from sqlalchemy.exc import SADeprecationWarning
from storyboard.openstack.common.db.sqlalchemy import session
from storyboard.db import api as db_api
CONF = cfg.CONF
@ -32,7 +32,6 @@ warnings.simplefilter("ignore", SADeprecationWarning)
class Database(fixtures.Fixture):
def __init__(self):
config = alembic_config.Config(os.path.join(
os.path.dirname(__file__),
@ -42,7 +41,7 @@ class Database(fixtures.Fixture):
'storyboard.db.migration:alembic_migrations')
config.storyboard_config = CONF
self.engine = session.get_engine()
self.engine = db_api.get_engine()
conn = self.engine.connect()
self._sync_db(config, conn)
@ -56,9 +55,9 @@ class Database(fixtures.Fixture):
return _script._upgrade_revs('head', rev)
with environment.EnvironmentContext(
config,
_script,
fn=upgrade,
config,
_script,
fn=upgrade,
):
context.configure(connection=conn)
with context.begin_transaction():
@ -67,9 +66,9 @@ class Database(fixtures.Fixture):
def setUp(self):
super(Database, self).setUp()
session.get_session()
engine = session.get_engine()
db_api.get_session()
engine = db_api.get_engine()
conn = engine.connect()
conn.connection.executescript(self._DB)
self.addCleanup(session.cleanup)
self.addCleanup(db_api.cleanup)

View File

@ -31,11 +31,9 @@ from alembic import config as alembic_config
from alembic import migration
from oslo.config import cfg
import six.moves.urllib.parse as urlparse
import sqlalchemy
import sqlalchemy.exc
from storyboard.db import api as db_api
import storyboard.db.migration
from storyboard.openstack.common.db.sqlalchemy import session
from storyboard.openstack.common import lockutils
from storyboard.openstack.common import log as logging
from storyboard.openstack.common import processutils
@ -52,6 +50,7 @@ def _get_connect_string(backend, user, passwd, database):
"""Try to get a connection with a very specific set of values, if we get
these then we'll run the tests, otherwise they are skipped
"""
if backend == "postgres":
backend = "postgresql+psycopg2"
elif backend == "mysql":
@ -65,7 +64,8 @@ def _get_connect_string(backend, user, passwd, database):
def _is_backend_avail(backend, user, passwd, database):
try:
connect_uri = _get_connect_string(backend, user, passwd, database)
engine = sqlalchemy.create_engine(connect_uri)
CONF.database.connection = connect_uri
engine = db_api.get_engine()
connection = engine.connect()
except Exception:
# intentionally catch all to handle exceptions even if we don't
@ -228,7 +228,7 @@ class BaseMigrationTestCase(base.TestCase):
self.engines = {}
for key, value in self.test_databases.items():
self.engines[key] = sqlalchemy.create_engine(value)
self.engines[key] = db_api.get_engine()
# NOTE(jhesketh): We only need to make sure the databases are created
# not necessarily clean of tables.
@ -382,7 +382,7 @@ class BaseWalkMigrationTestCase(BaseMigrationTestCase):
self.engines = {}
for key, value in self.test_databases.items():
self.engines[key] = sqlalchemy.create_engine(value)
self.engines[key] = db_api.get_engine()
self._create_databases()
@ -394,7 +394,7 @@ class BaseWalkMigrationTestCase(BaseMigrationTestCase):
database functionality (reset default settings and session cleanup).
"""
CONF.set_override('connection', str(engine.url), group='database')
session.cleanup()
db_api.cleanup()
def _test_mysql_opportunistically(self):
# Test that table creation on mysql only builds InnoDB tables
@ -406,7 +406,7 @@ class BaseWalkMigrationTestCase(BaseMigrationTestCase):
self.DATABASE)
(user, password, database, host) = \
get_mysql_connection_info(urlparse.urlparse(connect_string))
engine = sqlalchemy.create_engine(connect_string)
engine = db_api.get_engine()
self.engines[database] = engine
self.test_databases[database] = connect_string
@ -435,7 +435,7 @@ class BaseWalkMigrationTestCase(BaseMigrationTestCase):
# automatically in tearDown so no need to clean it up here.
connect_string = _get_connect_string("postgres", self.USER,
self.PASSWD, self.DATABASE)
engine = sqlalchemy.create_engine(connect_string)
engine = db_api.get_engine()
(user, password, database, host) = \
get_mysql_connection_info(urlparse.urlparse(connect_string))
self.engines[database] = engine
@ -453,11 +453,11 @@ class BaseWalkMigrationTestCase(BaseMigrationTestCase):
"""
self.ALEMBIC_CONFIG.stdout = buf = io.StringIO()
CONF.set_override('connection', str(engine.url), group='database')
session.cleanup()
db_api.cleanup()
getattr(command, alembic_command)(*args, **kwargs)
res = buf.getvalue().strip()
LOG.debug('Alembic command `%s` returns: %s' % (alembic_command, res))
session.cleanup()
db_api.cleanup()
return res
def _get_alembic_versions(self, engine):

View File

@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from storyboard.db import api as dbapi
from storyboard.db import api as db_api
from storyboard.tests import base
@ -50,7 +50,7 @@ class ProjectsTest(BaseDbTestCase):
}
def test_save_project(self):
self._test_create(self.project_01, dbapi.project_create)
self._test_create(self.project_01, db_api.project_create)
def test_update_project(self):
delta = {
@ -58,7 +58,7 @@ class ProjectsTest(BaseDbTestCase):
'description': u'New Description'
}
self._test_update(self.project_01, delta,
dbapi.project_create, dbapi.project_update)
db_api.project_create, db_api.project_update)
class StoriesTest(BaseDbTestCase):
@ -72,7 +72,7 @@ class StoriesTest(BaseDbTestCase):
}
def test_create_story(self):
self._test_create(self.story_01, dbapi.story_create)
self._test_create(self.story_01, db_api.story_create)
def test_update_story(self):
delta = {
@ -80,7 +80,7 @@ class StoriesTest(BaseDbTestCase):
'description': u'New Description'
}
self._test_update(self.story_01, delta,
dbapi.story_create, dbapi.story_update)
db_api.story_create, db_api.story_update)
class TasksTest(BaseDbTestCase):
@ -95,7 +95,7 @@ class TasksTest(BaseDbTestCase):
}
def test_create_task(self):
self._test_create(self.task_01, dbapi.task_create)
self._test_create(self.task_01, db_api.task_create)
def test_update_task(self):
delta = {
@ -104,4 +104,4 @@ class TasksTest(BaseDbTestCase):
}
self._test_update(self.task_01, delta,
dbapi.task_create, dbapi.task_update)
db_api.task_create, db_api.task_update)

View File

@ -16,7 +16,7 @@
import sys
import mock
from storyboard.openstack.common.db.sqlalchemy import session as db_session
from storyboard.db import api
import testscenarios
from storyboard.db.migration import cli
@ -36,7 +36,7 @@ class TestLoadProjects(base.FunctionalTest):
def test_cli(self):
with mock.patch.object(sys, 'argv', self.argv):
cli.main()
session = db_session.get_session(sqlite_fk=True)
session = api.get_session()
project_groups = session.query(models.ProjectGroup).all()
projects = session.query(models.Project).all()
@ -53,7 +53,7 @@ class TestLoadProjects(base.FunctionalTest):
# call again and nothing should change
cli.main()
session = db_session.get_session(sqlite_fk=True)
session = api.get_session()
projects = session.query(models.Project).all()
self.assertIsNotNone(projects)

View File

@ -6,9 +6,11 @@ fixtures>=0.3.14
mock>=1.0
python-subunit
oslo.sphinx
testrepository>=0.0.17
testrepository>=0.0.18
testscenarios>=0.4,<0.5
testtools>=0.9.32
testtools>=0.9.34
posix_ipc
# Some of the tests use real MySQL and Postgres databases
MySQL-python