Sync latest oslo.db code

Common db code was updated in oslo. The most important thing is that
engine instances don't stored anymore in oslo.db - ce69e7f.
This patch moves methods `get_engine` and `get_session` to module
`heat.db.sqalchemy.api`.

Latest commit in oslo related to db module:
2fd457bf2ccbeb2b84ffb204778b6417cd5405ba

Change-Id: Iaa2e9ba26e824c678c698914170e3dffbf1c5c95
This commit is contained in:
Andrey Kurilin 2014-02-24 16:46:29 +02:00
parent 155314d236
commit cfcbc994d3
19 changed files with 1190 additions and 606 deletions

View File

@ -153,14 +153,6 @@
#max_json_body_size=1048576 #max_json_body_size=1048576
#
# Options defined in heat.db.api
#
# The backend to use for db. (string value)
#db_backend=sqlalchemy
# #
# Options defined in heat.engine.clients # Options defined in heat.engine.clients
# #
@ -179,17 +171,6 @@
#loadbalancer_template=<None> #loadbalancer_template=<None>
#
# Options defined in heat.openstack.common.db.sqlalchemy.session
#
# the filename to use with sqlite (string value)
#sqlite_db=heat.sqlite
# If true, use synchronous mode for sqlite (boolean value)
#sqlite_synchronous=true
# #
# Options defined in heat.openstack.common.eventlet_backdoor # Options defined in heat.openstack.common.eventlet_backdoor
# #
@ -801,30 +782,34 @@
[database] [database]
# #
# Options defined in heat.openstack.common.db.api # Options defined in heat.openstack.common.db.options
# #
# The file name to use with SQLite (string value)
#sqlite_db=heat.sqlite
# If True, SQLite uses synchronous mode (boolean value)
#sqlite_synchronous=true
# The backend to use for db (string value) # The backend to use for db (string value)
# Deprecated group/name - [DEFAULT]/db_backend # Deprecated group/name - [DEFAULT]/db_backend
#backend=sqlalchemy #backend=sqlalchemy
#
# Options defined in heat.openstack.common.db.sqlalchemy.session
#
# The SQLAlchemy connection string used to connect to the # The SQLAlchemy connection string used to connect to the
# database (string value) # database (string value)
# Deprecated group/name - [DEFAULT]/sql_connection # Deprecated group/name - [DEFAULT]/sql_connection
# Deprecated group/name - [DATABASE]/sql_connection # Deprecated group/name - [DATABASE]/sql_connection
# Deprecated group/name - [sql]/connection # Deprecated group/name - [sql]/connection
#connection=sqlite:////heat/openstack/common/db/$sqlite_db #connection=<None>
# The SQLAlchemy connection string used to connect to the # The SQL mode to be used for MySQL sessions. This option,
# slave database (string value) # including the default, overrides any server-set SQL mode. To
#slave_connection= # use whatever SQL mode is set by the server configuration,
# set this to no value. Example: mysql_sql_mode= (string
# value)
#mysql_sql_mode=TRADITIONAL
# timeout before idle sql connections are reaped (integer # Timeout before idle sql connections are reaped (integer
# value) # value)
# Deprecated group/name - [DEFAULT]/sql_idle_timeout # Deprecated group/name - [DEFAULT]/sql_idle_timeout
# Deprecated group/name - [DATABASE]/sql_idle_timeout # Deprecated group/name - [DATABASE]/sql_idle_timeout
@ -843,13 +828,13 @@
# Deprecated group/name - [DATABASE]/sql_max_pool_size # Deprecated group/name - [DATABASE]/sql_max_pool_size
#max_pool_size=<None> #max_pool_size=<None>
# maximum db connection retries during startup. (setting -1 # Maximum db connection retries during startup. (setting -1
# implies an infinite retry count) (integer value) # implies an infinite retry count) (integer value)
# Deprecated group/name - [DEFAULT]/sql_max_retries # Deprecated group/name - [DEFAULT]/sql_max_retries
# Deprecated group/name - [DATABASE]/sql_max_retries # Deprecated group/name - [DATABASE]/sql_max_retries
#max_retries=10 #max_retries=10
# interval between retries of opening a sql connection # Interval between retries of opening a sql connection
# (integer value) # (integer value)
# Deprecated group/name - [DEFAULT]/sql_retry_interval # Deprecated group/name - [DEFAULT]/sql_retry_interval
# Deprecated group/name - [DATABASE]/reconnect_interval # Deprecated group/name - [DATABASE]/reconnect_interval
@ -876,6 +861,25 @@
# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout # Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
#pool_timeout=<None> #pool_timeout=<None>
# Enable the experimental use of database reconnect on
# connection lost (boolean value)
#use_db_reconnect=false
# seconds between db connection retries (integer value)
#db_retry_interval=1
# Whether to increase interval between db connection retries,
# up to db_max_retry_interval (boolean value)
#db_inc_retry_interval=true
# max seconds between db connection retries, if
# db_inc_retry_interval is enabled (integer value)
#db_max_retry_interval=10
# maximum db connection retries before error is raised.
# (setting -1 implies an infinite retry count) (integer value)
#db_max_retries=20
[ec2authtoken] [ec2authtoken]

View File

@ -32,7 +32,7 @@ CONF = cfg.CONF
def do_db_version(): def do_db_version():
"""Print database's current migration level.""" """Print database's current migration level."""
print(api.db_version()) print(api.db_version(api.get_engine()))
def do_db_sync(): def do_db_sync():
@ -40,7 +40,7 @@ def do_db_sync():
Place a database under migration control and upgrade, Place a database under migration control and upgrade,
creating first if necessary. creating first if necessary.
""" """
api.db_sync(CONF.command.version) api.db_sync(api.get_engine(), CONF.command.version)
def purge_deleted(): def purge_deleted():

View File

@ -28,17 +28,19 @@ from oslo.config import cfg
from heat.openstack.common.db import api as db_api from heat.openstack.common.db import api as db_api
db_opts = [
cfg.StrOpt('db_backend',
default='sqlalchemy',
help='The backend to use for db.')]
CONF = cfg.CONF CONF = cfg.CONF
CONF.register_opts(db_opts) CONF.import_opt('backend', 'heat.openstack.common.db.options',
group='database')
_BACKEND_MAPPING = {'sqlalchemy': 'heat.db.sqlalchemy.api'} _BACKEND_MAPPING = {'sqlalchemy': 'heat.db.sqlalchemy.api'}
IMPL = db_api.DBAPI(backend_mapping=_BACKEND_MAPPING) IMPL = db_api.DBAPI(CONF.database.backend, backend_mapping=_BACKEND_MAPPING,
lazy=True)
def get_engine():
return IMPL.get_engine()
def get_session(): def get_session():
@ -261,11 +263,11 @@ def software_deployment_delete(context, deployment_id):
return IMPL.software_deployment_delete(context, deployment_id) return IMPL.software_deployment_delete(context, deployment_id)
def db_sync(version=None): def db_sync(engine, version=None):
"""Migrate the database to `version` or the most recent version.""" """Migrate the database to `version` or the most recent version."""
return IMPL.db_sync(version=version) return IMPL.db_sync(engine, version=version)
def db_version(): def db_version(engine):
"""Display the current database version.""" """Display the current database version."""
return IMPL.db_version() return IMPL.db_version(engine)

View File

@ -31,13 +31,28 @@ from heat.openstack.common.gettextutils import _
cfg.CONF.import_opt('max_events_per_stack', 'heat.common.config') cfg.CONF.import_opt('max_events_per_stack', 'heat.common.config')
get_engine = db_session.get_engine CONF = cfg.CONF
get_session = db_session.get_session CONF.import_opt('max_events_per_stack', 'heat.common.config')
CONF.import_opt('connection', 'heat.openstack.common.db.options',
group='database')
_facade = None
def get_facade():
global _facade
if not _facade:
_facade = db_session.EngineFacade(
CONF.database.connection, **dict(CONF.database.iteritems()))
return _facade
get_engine = lambda: get_facade().get_engine()
get_session = lambda: get_facade().get_session()
def get_backend(): def get_backend():
"""The backend is this module itself.""" """The backend is this module itself."""
return sys.modules[__name__] return sys.modules[__name__]
@ -743,11 +758,11 @@ def purge_deleted(age, granularity='days'):
engine.execute(user_creds_del) engine.execute(user_creds_del)
def db_sync(version=None): def db_sync(engine, version=None):
"""Migrate the database to `version` or the most recent version.""" """Migrate the database to `version` or the most recent version."""
return migration.db_sync(version=version) return migration.db_sync(engine, version=version)
def db_version(): def db_version(engine):
"""Display the current database version.""" """Display the current database version."""
return migration.db_version() return migration.db_version(engine)

View File

@ -19,19 +19,20 @@ from heat.openstack.common.db.sqlalchemy import migration as oslo_migration
INIT_VERSION = 14 INIT_VERSION = 14
def db_sync(version=None): def db_sync(engine, version=None):
path = os.path.join(os.path.abspath(os.path.dirname(__file__)), path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'migrate_repo') 'migrate_repo')
return oslo_migration.db_sync(path, version, init_version=INIT_VERSION) return oslo_migration.db_sync(engine, path, version,
init_version=INIT_VERSION)
def db_version(): def db_version(engine):
path = os.path.join(os.path.abspath(os.path.dirname(__file__)), path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'migrate_repo') 'migrate_repo')
return oslo_migration.db_version(path, INIT_VERSION) return oslo_migration.db_version(engine, path, INIT_VERSION)
def db_version_control(version=None): def db_version_control(engine, version=None):
path = os.path.join(os.path.abspath(os.path.dirname(__file__)), path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'migrate_repo') 'migrate_repo')
return oslo_migration.db_version_control(path, version) return oslo_migration.db_version_control(engine, path, version)

View File

@ -24,11 +24,14 @@ from sqlalchemy.orm.session import Session
from heat.db.sqlalchemy.types import Json from heat.db.sqlalchemy.types import Json
from heat.openstack.common.db.sqlalchemy import models from heat.openstack.common.db.sqlalchemy import models
from heat.openstack.common.db.sqlalchemy import session
from heat.openstack.common import timeutils from heat.openstack.common import timeutils
BASE = declarative_base() BASE = declarative_base()
get_session = session.get_session
def get_session():
from heat.db.sqlalchemy import api as db_api
return db_api.get_session()
class HeatBase(models.ModelBase, models.TimestampMixin): class HeatBase(models.ModelBase, models.TimestampMixin):

View File

@ -39,7 +39,7 @@ class LazyPluggable(object):
return getattr(backend, key) return getattr(backend, key)
IMPL = LazyPluggable('db_backend', IMPL = LazyPluggable('backend',
sqlalchemy='heat.db.sqlalchemy.api') sqlalchemy='heat.db.sqlalchemy.api')

View File

@ -16,43 +16,148 @@
"""Multiple DB API backend support. """Multiple DB API backend support.
Supported configuration options:
The following two parameters are in the 'database' group:
`backend`: DB backend name or full module path to DB backend module.
A DB backend module should implement a method named 'get_backend' which A DB backend module should implement a method named 'get_backend' which
takes no arguments. The method can return any object that implements DB takes no arguments. The method can return any object that implements DB
API methods. API methods.
""" """
from oslo.config import cfg import functools
import logging
import threading
import time
from heat.openstack.common.db import exception
from heat.openstack.common.gettextutils import _LE
from heat.openstack.common import importutils from heat.openstack.common import importutils
db_opts = [ LOG = logging.getLogger(__name__)
cfg.StrOpt('backend',
default='sqlalchemy',
deprecated_name='db_backend',
deprecated_group='DEFAULT',
help='The backend to use for db'),
]
CONF = cfg.CONF
CONF.register_opts(db_opts, 'database') def safe_for_db_retry(f):
"""Enable db-retry for decorated function, if config option enabled."""
f.__dict__['enable_retry'] = True
return f
class wrap_db_retry(object):
"""Retry db.api methods, if DBConnectionError() raised
Retry decorated db.api methods. If we enabled `use_db_reconnect`
in config, this decorator will be applied to all db.api functions,
marked with @safe_for_db_retry decorator.
Decorator catchs DBConnectionError() and retries function in a
loop until it succeeds, or until maximum retries count will be reached.
"""
def __init__(self, retry_interval, max_retries, inc_retry_interval,
max_retry_interval):
super(wrap_db_retry, self).__init__()
self.retry_interval = retry_interval
self.max_retries = max_retries
self.inc_retry_interval = inc_retry_interval
self.max_retry_interval = max_retry_interval
def __call__(self, f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
next_interval = self.retry_interval
remaining = self.max_retries
while True:
try:
return f(*args, **kwargs)
except exception.DBConnectionError as e:
if remaining == 0:
LOG.exception(_LE('DB exceeded retry limit.'))
raise exception.DBError(e)
if remaining != -1:
remaining -= 1
LOG.exception(_LE('DB connection error.'))
# NOTE(vsergeyev): We are using patched time module, so
# this effectively yields the execution
# context to another green thread.
time.sleep(next_interval)
if self.inc_retry_interval:
next_interval = min(
next_interval * 2,
self.max_retry_interval
)
return wrapper
class DBAPI(object): class DBAPI(object):
def __init__(self, backend_mapping=None): def __init__(self, backend_name, backend_mapping=None, lazy=False,
if backend_mapping is None: **kwargs):
backend_mapping = {} """Initialize the chosen DB API backend.
backend_name = CONF.database.backend
# Import the untranslated name if we don't have a :param backend_name: name of the backend to load
# mapping. :type backend_name: str
backend_path = backend_mapping.get(backend_name, backend_name)
backend_mod = importutils.import_module(backend_path) :param backend_mapping: backend name -> module/class to load mapping
self.__backend = backend_mod.get_backend() :type backend_mapping: dict
:param lazy: load the DB backend lazily on the first DB API method call
:type lazy: bool
Keyword arguments:
:keyword use_db_reconnect: retry DB transactions on disconnect or not
:type use_db_reconnect: bool
:keyword retry_interval: seconds between transaction retries
:type retry_interval: int
:keyword inc_retry_interval: increase retry interval or not
:type inc_retry_interval: bool
:keyword max_retry_interval: max interval value between retries
:type max_retry_interval: int
:keyword max_retries: max number of retries before an error is raised
:type max_retries: int
"""
self._backend = None
self._backend_name = backend_name
self._backend_mapping = backend_mapping or {}
self._lock = threading.Lock()
if not lazy:
self._load_backend()
self.use_db_reconnect = kwargs.get('use_db_reconnect', False)
self.retry_interval = kwargs.get('retry_interval', 1)
self.inc_retry_interval = kwargs.get('inc_retry_interval', True)
self.max_retry_interval = kwargs.get('max_retry_interval', 10)
self.max_retries = kwargs.get('max_retries', 20)
def _load_backend(self):
with self._lock:
if not self._backend:
# Import the untranslated name if we don't have a mapping
backend_path = self._backend_mapping.get(self._backend_name,
self._backend_name)
backend_mod = importutils.import_module(backend_path)
self._backend = backend_mod.get_backend()
def __getattr__(self, key): def __getattr__(self, key):
return getattr(self.__backend, key) if not self._backend:
self._load_backend()
attr = getattr(self._backend, key)
if not hasattr(attr, '__call__'):
return attr
# NOTE(vsergeyev): If `use_db_reconnect` option is set to True, retry
# DB API methods, decorated with @safe_for_db_retry
# on disconnect.
if self.use_db_reconnect and hasattr(attr, 'enable_retry'):
attr = wrap_db_retry(
retry_interval=self.retry_interval,
max_retries=self.max_retries,
inc_retry_interval=self.inc_retry_interval,
max_retry_interval=self.max_retry_interval)(attr)
return attr

View File

@ -17,6 +17,8 @@
"""DB related custom exceptions.""" """DB related custom exceptions."""
import six
from heat.openstack.common.gettextutils import _ from heat.openstack.common.gettextutils import _
@ -24,7 +26,7 @@ class DBError(Exception):
"""Wraps an implementation specific exception.""" """Wraps an implementation specific exception."""
def __init__(self, inner_exception=None): def __init__(self, inner_exception=None):
self.inner_exception = inner_exception self.inner_exception = inner_exception
super(DBError, self).__init__(str(inner_exception)) super(DBError, self).__init__(six.text_type(inner_exception))
class DBDuplicateEntry(DBError): class DBDuplicateEntry(DBError):
@ -47,7 +49,7 @@ class DBInvalidUnicodeParameter(Exception):
class DbMigrationError(DBError): class DbMigrationError(DBError):
"""Wraps migration specific exception.""" """Wraps migration specific exception."""
def __init__(self, message=None): def __init__(self, message=None):
super(DbMigrationError, self).__init__(str(message)) super(DbMigrationError, self).__init__(message)
class DBConnectionError(DBError): class DBConnectionError(DBError):

View File

@ -0,0 +1,171 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from oslo.config import cfg
database_opts = [
cfg.StrOpt('sqlite_db',
deprecated_group='DEFAULT',
default='heat.sqlite',
help='The file name to use with SQLite'),
cfg.BoolOpt('sqlite_synchronous',
deprecated_group='DEFAULT',
default=True,
help='If True, SQLite uses synchronous mode'),
cfg.StrOpt('backend',
default='sqlalchemy',
deprecated_name='db_backend',
deprecated_group='DEFAULT',
help='The backend to use for db'),
cfg.StrOpt('connection',
help='The SQLAlchemy connection string used to connect to the '
'database',
secret=True,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_connection',
group='DATABASE'),
cfg.DeprecatedOpt('connection',
group='sql'), ]),
cfg.StrOpt('mysql_sql_mode',
default='TRADITIONAL',
help='The SQL mode to be used for MySQL sessions. '
'This option, including the default, overrides any '
'server-set SQL mode. To use whatever SQL mode '
'is set by the server configuration, '
'set this to no value. Example: mysql_sql_mode='),
cfg.IntOpt('idle_timeout',
default=3600,
deprecated_opts=[cfg.DeprecatedOpt('sql_idle_timeout',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_idle_timeout',
group='DATABASE'),
cfg.DeprecatedOpt('idle_timeout',
group='sql')],
help='Timeout before idle sql connections are reaped'),
cfg.IntOpt('min_pool_size',
default=1,
deprecated_opts=[cfg.DeprecatedOpt('sql_min_pool_size',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_min_pool_size',
group='DATABASE')],
help='Minimum number of SQL connections to keep open in a '
'pool'),
cfg.IntOpt('max_pool_size',
default=None,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_pool_size',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_max_pool_size',
group='DATABASE')],
help='Maximum number of SQL connections to keep open in a '
'pool'),
cfg.IntOpt('max_retries',
default=10,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_retries',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_max_retries',
group='DATABASE')],
help='Maximum db connection retries during startup. '
'(setting -1 implies an infinite retry count)'),
cfg.IntOpt('retry_interval',
default=10,
deprecated_opts=[cfg.DeprecatedOpt('sql_retry_interval',
group='DEFAULT'),
cfg.DeprecatedOpt('reconnect_interval',
group='DATABASE')],
help='Interval between retries of opening a sql connection'),
cfg.IntOpt('max_overflow',
default=None,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_overflow',
group='DEFAULT'),
cfg.DeprecatedOpt('sqlalchemy_max_overflow',
group='DATABASE')],
help='If set, use this value for max_overflow with sqlalchemy'),
cfg.IntOpt('connection_debug',
default=0,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_debug',
group='DEFAULT')],
help='Verbosity of SQL debugging information. 0=None, '
'100=Everything'),
cfg.BoolOpt('connection_trace',
default=False,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_trace',
group='DEFAULT')],
help='Add python stack traces to SQL as comment strings'),
cfg.IntOpt('pool_timeout',
default=None,
deprecated_opts=[cfg.DeprecatedOpt('sqlalchemy_pool_timeout',
group='DATABASE')],
help='If set, use this value for pool_timeout with sqlalchemy'),
cfg.BoolOpt('use_db_reconnect',
default=False,
help='Enable the experimental use of database reconnect '
'on connection lost'),
cfg.IntOpt('db_retry_interval',
default=1,
help='seconds between db connection retries'),
cfg.BoolOpt('db_inc_retry_interval',
default=True,
help='Whether to increase interval between db connection '
'retries, up to db_max_retry_interval'),
cfg.IntOpt('db_max_retry_interval',
default=10,
help='max seconds between db connection retries, if '
'db_inc_retry_interval is enabled'),
cfg.IntOpt('db_max_retries',
default=20,
help='maximum db connection retries before error is raised. '
'(setting -1 implies an infinite retry count)'),
]
CONF = cfg.CONF
CONF.register_opts(database_opts, 'database')
def set_defaults(sql_connection, sqlite_db, max_pool_size=None,
max_overflow=None, pool_timeout=None):
"""Set defaults for configuration variables."""
cfg.set_defaults(database_opts,
connection=sql_connection,
sqlite_db=sqlite_db)
# Update the QueuePool defaults
if max_pool_size is not None:
cfg.set_defaults(database_opts,
max_pool_size=max_pool_size)
if max_overflow is not None:
cfg.set_defaults(database_opts,
max_overflow=max_overflow)
if pool_timeout is not None:
cfg.set_defaults(database_opts,
pool_timeout=pool_timeout)
def list_opts():
"""Returns a list of oslo.config options available in the library.
The returned list includes all oslo.config options which may be registered
at runtime by the library.
Each element of the list is a tuple. The first element is the name of the
group under which the list of elements in the second element will be
registered. A group name of None corresponds to the [DEFAULT] group in
config files.
The purpose of this is to allow tools like the Oslo sample config file
generator to discover the options exposed to users by this library.
:returns: a list of (group_name, opts) tuples
"""
return [('database', copy.deepcopy(database_opts))]

View File

@ -39,51 +39,19 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE. # THE SOFTWARE.
import distutils.version as dist_version
import os import os
import re import re
import migrate
from migrate.changeset import ansisql from migrate.changeset import ansisql
from migrate.changeset.databases import sqlite from migrate.changeset.databases import sqlite
from migrate.versioning import util as migrate_util from migrate import exceptions as versioning_exceptions
from migrate.versioning import api as versioning_api
from migrate.versioning.repository import Repository
import sqlalchemy import sqlalchemy
from sqlalchemy.schema import UniqueConstraint from sqlalchemy.schema import UniqueConstraint
from heat.openstack.common.db import exception from heat.openstack.common.db import exception
from heat.openstack.common.db.sqlalchemy import session as db_session from heat.openstack.common.gettextutils import _
from heat.openstack.common.gettextutils import _ # noqa
@migrate_util.decorator
def patched_with_engine(f, *a, **kw):
url = a[0]
engine = migrate_util.construct_engine(url, **kw)
try:
kw['engine'] = engine
return f(*a, **kw)
finally:
if isinstance(engine, migrate_util.Engine) and engine is not url:
migrate_util.log.debug('Disposing SQLAlchemy engine %s', engine)
engine.dispose()
# TODO(jkoelker) When migrate 0.7.3 is released and nova depends
# on that version or higher, this can be removed
MIN_PKG_VERSION = dist_version.StrictVersion('0.7.3')
if (not hasattr(migrate, '__version__') or
dist_version.StrictVersion(migrate.__version__) < MIN_PKG_VERSION):
migrate_util.with_engine = patched_with_engine
# NOTE(jkoelker) Delay importing migrate until we are patched
from migrate import exceptions as versioning_exceptions
from migrate.versioning import api as versioning_api
from migrate.versioning.repository import Repository
get_engine = db_session.get_engine
def _get_unique_constraints(self, table): def _get_unique_constraints(self, table):
@ -200,11 +168,12 @@ def patch_migrate():
sqlite.SQLiteConstraintGenerator) sqlite.SQLiteConstraintGenerator)
def db_sync(abs_path, version=None, init_version=0, sanity_check=True): def db_sync(engine, abs_path, version=None, init_version=0, sanity_check=True):
"""Upgrade or downgrade a database. """Upgrade or downgrade a database.
Function runs the upgrade() or downgrade() functions in change scripts. Function runs the upgrade() or downgrade() functions in change scripts.
:param engine: SQLAlchemy engine instance for a given database
:param abs_path: Absolute path to migrate repository. :param abs_path: Absolute path to migrate repository.
:param version: Database will upgrade/downgrade until this version. :param version: Database will upgrade/downgrade until this version.
If None - database will update to the latest If None - database will update to the latest
@ -220,19 +189,24 @@ def db_sync(abs_path, version=None, init_version=0, sanity_check=True):
raise exception.DbMigrationError( raise exception.DbMigrationError(
message=_("version should be an integer")) message=_("version should be an integer"))
current_version = db_version(abs_path, init_version) current_version = db_version(engine, abs_path, init_version)
repository = _find_migrate_repo(abs_path) repository = _find_migrate_repo(abs_path)
if sanity_check: if sanity_check:
_db_schema_sanity_check() _db_schema_sanity_check(engine)
if version is None or version > current_version: if version is None or version > current_version:
return versioning_api.upgrade(get_engine(), repository, version) return versioning_api.upgrade(engine, repository, version)
else: else:
return versioning_api.downgrade(get_engine(), repository, return versioning_api.downgrade(engine, repository,
version) version)
def _db_schema_sanity_check(): def _db_schema_sanity_check(engine):
engine = get_engine() """Ensure all database tables were created with required parameters.
:param engine: SQLAlchemy engine instance for a given database
"""
if engine.name == 'mysql': if engine.name == 'mysql':
onlyutf8_sql = ('SELECT TABLE_NAME,TABLE_COLLATION ' onlyutf8_sql = ('SELECT TABLE_NAME,TABLE_COLLATION '
'from information_schema.TABLES ' 'from information_schema.TABLES '
@ -254,23 +228,23 @@ def _db_schema_sanity_check():
) % ','.join(table_names)) ) % ','.join(table_names))
def db_version(abs_path, init_version): def db_version(engine, abs_path, init_version):
"""Show the current version of the repository. """Show the current version of the repository.
:param engine: SQLAlchemy engine instance for a given database
:param abs_path: Absolute path to migrate repository :param abs_path: Absolute path to migrate repository
:param version: Initial database version :param version: Initial database version
""" """
repository = _find_migrate_repo(abs_path) repository = _find_migrate_repo(abs_path)
try: try:
return versioning_api.db_version(get_engine(), repository) return versioning_api.db_version(engine, repository)
except versioning_exceptions.DatabaseNotControlledError: except versioning_exceptions.DatabaseNotControlledError:
meta = sqlalchemy.MetaData() meta = sqlalchemy.MetaData()
engine = get_engine()
meta.reflect(bind=engine) meta.reflect(bind=engine)
tables = meta.tables tables = meta.tables
if len(tables) == 0: if len(tables) == 0 or 'alembic_version' in tables:
db_version_control(abs_path, init_version) db_version_control(engine, abs_path, version=init_version)
return versioning_api.db_version(get_engine(), repository) return versioning_api.db_version(engine, repository)
else: else:
raise exception.DbMigrationError( raise exception.DbMigrationError(
message=_( message=_(
@ -279,17 +253,18 @@ def db_version(abs_path, init_version):
"manually.")) "manually."))
def db_version_control(abs_path, version=None): def db_version_control(engine, abs_path, version=None):
"""Mark a database as under this repository's version control. """Mark a database as under this repository's version control.
Once a database is under version control, schema changes should Once a database is under version control, schema changes should
only be done via change scripts in this repository. only be done via change scripts in this repository.
:param engine: SQLAlchemy engine instance for a given database
:param abs_path: Absolute path to migrate repository :param abs_path: Absolute path to migrate repository
:param version: Initial database version :param version: Initial database version
""" """
repository = _find_migrate_repo(abs_path) repository = _find_migrate_repo(abs_path)
versioning_api.version_control(get_engine(), repository, version) versioning_api.version_control(engine, repository, version)
return version return version

View File

@ -27,18 +27,16 @@ from sqlalchemy import Column, Integer
from sqlalchemy import DateTime from sqlalchemy import DateTime
from sqlalchemy.orm import object_mapper from sqlalchemy.orm import object_mapper
from heat.openstack.common.db.sqlalchemy import session as sa
from heat.openstack.common import timeutils from heat.openstack.common import timeutils
class ModelBase(object): class ModelBase(six.Iterator):
"""Base class for models.""" """Base class for models."""
__table_initialized__ = False __table_initialized__ = False
def save(self, session=None): def save(self, session):
"""Save this object.""" """Save this object."""
if not session:
session = sa.get_session()
# NOTE(boris-42): This part of code should be look like: # NOTE(boris-42): This part of code should be look like:
# session.add(self) # session.add(self)
# session.flush() # session.flush()
@ -81,10 +79,14 @@ class ModelBase(object):
self._i = iter(columns) self._i = iter(columns)
return self return self
def next(self): # In Python 3, __next__() has replaced next().
def __next__(self):
n = six.advance_iterator(self._i) n = six.advance_iterator(self._i)
return n, getattr(self, n) return n, getattr(self, n)
def next(self):
return self.__next__()
def update(self, values): def update(self, values):
"""Make the model object behave like a dict.""" """Make the model object behave like a dict."""
for k, v in six.iteritems(values): for k, v in six.iteritems(values):
@ -111,7 +113,7 @@ class SoftDeleteMixin(object):
deleted_at = Column(DateTime) deleted_at = Column(DateTime)
deleted = Column(Integer, default=0) deleted = Column(Integer, default=0)
def soft_delete(self, session=None): def soft_delete(self, session):
"""Mark this object as deleted.""" """Mark this object as deleted."""
self.deleted = self.id self.deleted = self.id
self.deleted_at = timeutils.utcnow() self.deleted_at = timeutils.utcnow()

View File

@ -17,6 +17,7 @@
"""Provision test environment for specific DB backends""" """Provision test environment for specific DB backends"""
import argparse import argparse
import logging
import os import os
import random import random
import string import string
@ -27,23 +28,12 @@ import sqlalchemy
from heat.openstack.common.db import exception as exc from heat.openstack.common.db import exception as exc
SQL_CONNECTION = os.getenv('OS_TEST_DBAPI_ADMIN_CONNECTION', 'sqlite://') LOG = logging.getLogger(__name__)
def _gen_credentials(*names): def get_engine(uri):
"""Generate credentials."""
auth_dict = {}
for name in names:
val = ''.join(random.choice(string.ascii_lowercase)
for i in moves.range(10))
auth_dict[name] = val
return auth_dict
def _get_engine(uri=SQL_CONNECTION):
"""Engine creation """Engine creation
By default the uri is SQL_CONNECTION which is admin credentials.
Call the function without arguments to get admin connection. Admin Call the function without arguments to get admin connection. Admin
connection required to create temporary user and database for each connection required to create temporary user and database for each
particular test. Otherwise use existing connection to recreate connection particular test. Otherwise use existing connection to recreate connection
@ -63,50 +53,43 @@ def _execute_sql(engine, sql, driver):
except sqlalchemy.exc.OperationalError: except sqlalchemy.exc.OperationalError:
msg = ('%s does not match database admin ' msg = ('%s does not match database admin '
'credentials or database does not exist.') 'credentials or database does not exist.')
raise exc.DBConnectionError(msg % SQL_CONNECTION) LOG.exception(msg % engine.url)
raise exc.DBConnectionError(msg % engine.url)
def create_database(engine): def create_database(engine):
"""Provide temporary user and database for each particular test.""" """Provide temporary user and database for each particular test."""
driver = engine.name driver = engine.name
auth = _gen_credentials('database', 'user', 'passwd') auth = {
'database': ''.join(random.choice(string.ascii_lowercase)
sqls = { for i in moves.range(10)),
'mysql': [ 'user': engine.url.username,
"drop database if exists %(database)s;", 'passwd': engine.url.password,
"grant all on %(database)s.* to '%(user)s'@'localhost'"
" identified by '%(passwd)s';",
"create database %(database)s;",
],
'postgresql': [
"drop database if exists %(database)s;",
"drop user if exists %(user)s;",
"create user %(user)s with password '%(passwd)s';",
"create database %(database)s owner %(user)s;",
]
} }
sqls = [
"drop database if exists %(database)s;",
"create database %(database)s;"
]
if driver == 'sqlite': if driver == 'sqlite':
return 'sqlite:////tmp/%s' % auth['database'] return 'sqlite:////tmp/%s' % auth['database']
elif driver in ['mysql', 'postgresql']:
try: sql_query = map(lambda x: x % auth, sqls)
sql_rows = sqls[driver] _execute_sql(engine, sql_query, driver)
except KeyError: else:
raise ValueError('Unsupported RDBMS %s' % driver) raise ValueError('Unsupported RDBMS %s' % driver)
sql_query = map(lambda x: x % auth, sql_rows)
_execute_sql(engine, sql_query, driver)
params = auth.copy() params = auth.copy()
params['backend'] = driver params['backend'] = driver
return "%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s" % params return "%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s" % params
def drop_database(engine, current_uri): def drop_database(admin_engine, current_uri):
"""Drop temporary database and user after each particular test.""" """Drop temporary database and user after each particular test."""
engine = _get_engine(current_uri)
admin_engine = _get_engine() engine = get_engine(current_uri)
driver = engine.name driver = engine.name
auth = {'database': engine.url.database, 'user': engine.url.username} auth = {'database': engine.url.database, 'user': engine.url.username}
@ -115,26 +98,11 @@ def drop_database(engine, current_uri):
os.remove(auth['database']) os.remove(auth['database'])
except OSError: except OSError:
pass pass
return elif driver in ['mysql', 'postgresql']:
sql = "drop database if exists %(database)s;"
sqls = { _execute_sql(admin_engine, [sql % auth], driver)
'mysql': [ else:
"drop database if exists %(database)s;",
"drop user '%(user)s'@'localhost';",
],
'postgresql': [
"drop database if exists %(database)s;",
"drop user if exists %(user)s;",
]
}
try:
sql_rows = sqls[driver]
except KeyError:
raise ValueError('Unsupported RDBMS %s' % driver) raise ValueError('Unsupported RDBMS %s' % driver)
sql_query = map(lambda x: x % auth, sql_rows)
_execute_sql(admin_engine, sql_query, driver)
def main(): def main():
@ -173,7 +141,9 @@ def main():
args = parser.parse_args() args = parser.parse_args()
engine = _get_engine() connection_string = os.getenv('OS_TEST_DBAPI_ADMIN_CONNECTION',
'sqlite://')
engine = get_engine(connection_string)
which = args.which which = args.which
if which == "create": if which == "create":

View File

@ -17,33 +17,24 @@
"""Session Handling for SQLAlchemy backend. """Session Handling for SQLAlchemy backend.
Initializing:
* Call set_defaults with the minimal of the following kwargs:
sql_connection, sqlite_db
Example::
session.set_defaults(
sql_connection="sqlite:///var/lib/heat/sqlite.db",
sqlite_db="/var/lib/heat/sqlite.db")
Recommended ways to use sessions within this framework: Recommended ways to use sessions within this framework:
* Don't use them explicitly; this is like running with AUTOCOMMIT=1. * Don't use them explicitly; this is like running with ``AUTOCOMMIT=1``.
model_query() will implicitly use a session when called without one `model_query()` will implicitly use a session when called without one
supplied. This is the ideal situation because it will allow queries supplied. This is the ideal situation because it will allow queries
to be automatically retried if the database connection is interrupted. to be automatically retried if the database connection is interrupted.
Note: Automatic retry will be enabled in a future patch. .. note:: Automatic retry will be enabled in a future patch.
It is generally fine to issue several queries in a row like this. Even though It is generally fine to issue several queries in a row like this. Even though
they may be run in separate transactions and/or separate sessions, each one they may be run in separate transactions and/or separate sessions, each one
will see the data from the prior calls. If needed, undo- or rollback-like will see the data from the prior calls. If needed, undo- or rollback-like
functionality should be handled at a logical level. For an example, look at functionality should be handled at a logical level. For an example, look at
the code around quotas and reservation_rollback(). the code around quotas and `reservation_rollback()`.
Examples:: Examples:
.. code:: python
def get_foo(context, foo): def get_foo(context, foo):
return (model_query(context, models.Foo). return (model_query(context, models.Foo).
@ -62,28 +53,29 @@ Recommended ways to use sessions within this framework:
return foo_ref return foo_ref
* Within the scope of a single method, keeping all the reads and writes within * Within the scope of a single method, keep all the reads and writes within
the context managed by a single session. In this way, the session's __exit__ the context managed by a single session. In this way, the session's
handler will take care of calling flush() and commit() for you. `__exit__` handler will take care of calling `flush()` and `commit()` for
If using this approach, you should not explicitly call flush() or commit(). you. If using this approach, you should not explicitly call `flush()` or
Any error within the context of the session will cause the session to emit `commit()`. Any error within the context of the session will cause the
a ROLLBACK. Database Errors like IntegrityError will be raised in session to emit a `ROLLBACK`. Database errors like `IntegrityError` will be
session's __exit__ handler, and any try/except within the context managed raised in `session`'s `__exit__` handler, and any try/except within the
by session will not be triggered. And catching other non-database errors in context managed by `session` will not be triggered. And catching other
the session will not trigger the ROLLBACK, so exception handlers should non-database errors in the session will not trigger the ROLLBACK, so
always be outside the session, unless the developer wants to do a partial exception handlers should always be outside the session, unless the
commit on purpose. If the connection is dropped before this is possible, developer wants to do a partial commit on purpose. If the connection is
the database will implicitly roll back the transaction. dropped before this is possible, the database will implicitly roll back the
transaction.
Note: statements in the session scope will not be automatically retried. .. note:: Statements in the session scope will not be automatically retried.
If you create models within the session, they need to be added, but you If you create models within the session, they need to be added, but you
do not need to call model.save() do not need to call `model.save()`:
:: .. code:: python
def create_many_foo(context, foos): def create_many_foo(context, foos):
session = get_session() session = sessionmaker()
with session.begin(): with session.begin():
for foo in foos: for foo in foos:
foo_ref = models.Foo() foo_ref = models.Foo()
@ -91,7 +83,7 @@ Recommended ways to use sessions within this framework:
session.add(foo_ref) session.add(foo_ref)
def update_bar(context, foo_id, newbar): def update_bar(context, foo_id, newbar):
session = get_session() session = sessionmaker()
with session.begin(): with session.begin():
foo_ref = (model_query(context, models.Foo, session). foo_ref = (model_query(context, models.Foo, session).
filter_by(id=foo_id). filter_by(id=foo_id).
@ -100,11 +92,16 @@ Recommended ways to use sessions within this framework:
filter_by(id=foo_ref['bar_id']). filter_by(id=foo_ref['bar_id']).
update({'bar': newbar})) update({'bar': newbar}))
Note: update_bar is a trivially simple example of using "with session.begin". .. note:: `update_bar` is a trivially simple example of using
Whereas create_many_foo is a good example of when a transaction is needed, ``with session.begin``. Whereas `create_many_foo` is a good example of
it is always best to use as few queries as possible. The two queries in when a transaction is needed, it is always best to use as few queries as
update_bar can be better expressed using a single query which avoids possible.
the need for an explicit transaction. It can be expressed like so::
The two queries in `update_bar` can be better expressed using a single query
which avoids the need for an explicit transaction. It can be expressed like
so:
.. code:: python
def update_bar(context, foo_id, newbar): def update_bar(context, foo_id, newbar):
subq = (model_query(context, models.Foo.id). subq = (model_query(context, models.Foo.id).
@ -115,21 +112,25 @@ Recommended ways to use sessions within this framework:
filter_by(id=subq.as_scalar()). filter_by(id=subq.as_scalar()).
update({'bar': newbar})) update({'bar': newbar}))
For reference, this emits approximately the following SQL statement:: For reference, this emits approximately the following SQL statement:
.. code:: sql
UPDATE bar SET bar = ${newbar} UPDATE bar SET bar = ${newbar}
WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1); WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1);
Note: create_duplicate_foo is a trivially simple example of catching an .. note:: `create_duplicate_foo` is a trivially simple example of catching an
exception while using "with session.begin". Here create two duplicate exception while using ``with session.begin``. Here create two duplicate
instances with same primary key, must catch the exception out of context instances with same primary key, must catch the exception out of context
managed by a single session: managed by a single session:
.. code:: python
def create_duplicate_foo(context): def create_duplicate_foo(context):
foo1 = models.Foo() foo1 = models.Foo()
foo2 = models.Foo() foo2 = models.Foo()
foo1.id = foo2.id = 1 foo1.id = foo2.id = 1
session = get_session() session = sessionmaker()
try: try:
with session.begin(): with session.begin():
session.add(foo1) session.add(foo1)
@ -139,7 +140,7 @@ Recommended ways to use sessions within this framework:
* Passing an active session between methods. Sessions should only be passed * Passing an active session between methods. Sessions should only be passed
to private methods. The private method must use a subtransaction; otherwise to private methods. The private method must use a subtransaction; otherwise
SQLAlchemy will throw an error when you call session.begin() on an existing SQLAlchemy will throw an error when you call `session.begin()` on an existing
transaction. Public methods should not accept a session parameter and should transaction. Public methods should not accept a session parameter and should
not be involved in sessions within the caller's scope. not be involved in sessions within the caller's scope.
@ -152,10 +153,10 @@ Recommended ways to use sessions within this framework:
becomes less clear in this situation. When this is needed for code clarity, becomes less clear in this situation. When this is needed for code clarity,
it should be clearly documented. it should be clearly documented.
:: .. code:: python
def myfunc(foo): def myfunc(foo):
session = get_session() session = sessionmaker()
with session.begin(): with session.begin():
# do some database things # do some database things
bar = _private_func(foo, session) bar = _private_func(foo, session)
@ -163,7 +164,7 @@ Recommended ways to use sessions within this framework:
def _private_func(foo, session=None): def _private_func(foo, session=None):
if not session: if not session:
session = get_session() session = sessionmaker()
with session.begin(subtransaction=True): with session.begin(subtransaction=True):
# do some other database things # do some other database things
return bar return bar
@ -173,13 +174,13 @@ There are some things which it is best to avoid:
* Don't keep a transaction open any longer than necessary. * Don't keep a transaction open any longer than necessary.
This means that your "with session.begin()" block should be as short This means that your ``with session.begin()`` block should be as short
as possible, while still containing all the related calls for that as possible, while still containing all the related calls for that
transaction. transaction.
* Avoid "with_lockmode('UPDATE')" when possible. * Avoid ``with_lockmode('UPDATE')`` when possible.
In MySQL/InnoDB, when a "SELECT ... FOR UPDATE" query does not match In MySQL/InnoDB, when a ``SELECT ... FOR UPDATE`` query does not match
any rows, it will take a gap-lock. This is a form of write-lock on the any rows, it will take a gap-lock. This is a form of write-lock on the
"gap" where no rows exist, and prevents any other writes to that space. "gap" where no rows exist, and prevents any other writes to that space.
This can effectively prevent any INSERT into a table by locking the gap This can effectively prevent any INSERT into a table by locking the gap
@ -190,15 +191,18 @@ There are some things which it is best to avoid:
number of rows matching a query, and if only one row is returned, number of rows matching a query, and if only one row is returned,
then issue the SELECT FOR UPDATE. then issue the SELECT FOR UPDATE.
The better long-term solution is to use INSERT .. ON DUPLICATE KEY UPDATE. The better long-term solution is to use
``INSERT .. ON DUPLICATE KEY UPDATE``.
However, this can not be done until the "deleted" columns are removed and However, this can not be done until the "deleted" columns are removed and
proper UNIQUE constraints are added to the tables. proper UNIQUE constraints are added to the tables.
Enabling soft deletes: Enabling soft deletes:
* To use/enable soft-deletes, the SoftDeleteMixin must be added * To use/enable soft-deletes, the `SoftDeleteMixin` must be added
to your model class. For example:: to your model class. For example:
.. code:: python
class NovaBase(models.SoftDeleteMixin, models.ModelBase): class NovaBase(models.SoftDeleteMixin, models.ModelBase):
pass pass
@ -206,15 +210,16 @@ Enabling soft deletes:
Efficient use of soft deletes: Efficient use of soft deletes:
* There are two possible ways to mark a record as deleted:: * There are two possible ways to mark a record as deleted:
`model.soft_delete()` and `query.soft_delete()`.
model.soft_delete() and query.soft_delete(). The `model.soft_delete()` method works with a single already-fetched entry.
`query.soft_delete()` makes only one db request for all entries that
correspond to the query.
model.soft_delete() method works with single already fetched entry. * In almost all cases you should use `query.soft_delete()`. Some examples:
query.soft_delete() makes only one db request for all entries that correspond
to query.
* In almost all cases you should use query.soft_delete(). Some examples:: .. code:: python
def soft_delete_bar(): def soft_delete_bar():
count = model_query(BarModel).find(some_condition).soft_delete() count = model_query(BarModel).find(some_condition).soft_delete()
@ -223,7 +228,7 @@ Efficient use of soft deletes:
def complex_soft_delete_with_synchronization_bar(session=None): def complex_soft_delete_with_synchronization_bar(session=None):
if session is None: if session is None:
session = get_session() session = sessionmaker()
with session.begin(subtransactions=True): with session.begin(subtransactions=True):
count = (model_query(BarModel). count = (model_query(BarModel).
find(some_condition). find(some_condition).
@ -233,24 +238,26 @@ Efficient use of soft deletes:
if count == 0: if count == 0:
raise Exception("0 entries were soft deleted") raise Exception("0 entries were soft deleted")
* There is only one situation where model.soft_delete() is appropriate: when * There is only one situation where `model.soft_delete()` is appropriate: when
you fetch a single record, work with it, and mark it as deleted in the same you fetch a single record, work with it, and mark it as deleted in the same
transaction. transaction.
:: .. code:: python
def soft_delete_bar_model(): def soft_delete_bar_model():
session = get_session() session = sessionmaker()
with session.begin(): with session.begin():
bar_ref = model_query(BarModel).find(some_condition).first() bar_ref = model_query(BarModel).find(some_condition).first()
# Work with bar_ref # Work with bar_ref
bar_ref.soft_delete(session=session) bar_ref.soft_delete(session=session)
However, if you need to work with all entries that correspond to query and However, if you need to work with all entries that correspond to query and
then soft delete them you should use query.soft_delete() method:: then soft delete them you should use the `query.soft_delete()` method:
.. code:: python
def soft_delete_multi_models(): def soft_delete_multi_models():
session = get_session() session = sessionmaker()
with session.begin(): with session.begin():
query = (model_query(BarModel, session=session). query = (model_query(BarModel, session=session).
find(some_condition)) find(some_condition))
@ -261,22 +268,22 @@ Efficient use of soft deletes:
# session and these entries are not used after this. # session and these entries are not used after this.
When working with many rows, it is very important to use query.soft_delete, When working with many rows, it is very important to use query.soft_delete,
which issues a single query. Using model.soft_delete(), as in the following which issues a single query. Using `model.soft_delete()`, as in the following
example, is very inefficient. example, is very inefficient.
:: .. code:: python
for bar_ref in bar_refs: for bar_ref in bar_refs:
bar_ref.soft_delete(session=session) bar_ref.soft_delete(session=session)
# This will produce count(bar_refs) db requests. # This will produce count(bar_refs) db requests.
""" """
import functools import functools
import os.path import logging
import re import re
import time import time
from oslo.config import cfg
import six import six
from sqlalchemy import exc as sqla_exc from sqlalchemy import exc as sqla_exc
from sqlalchemy.interfaces import PoolListener from sqlalchemy.interfaces import PoolListener
@ -285,151 +292,12 @@ from sqlalchemy.pool import NullPool, StaticPool
from sqlalchemy.sql.expression import literal_column from sqlalchemy.sql.expression import literal_column
from heat.openstack.common.db import exception from heat.openstack.common.db import exception
from heat.openstack.common.gettextutils import _ from heat.openstack.common.gettextutils import _LE, _LW
from heat.openstack.common import log as logging
from heat.openstack.common import timeutils from heat.openstack.common import timeutils
sqlite_db_opts = [
cfg.StrOpt('sqlite_db',
default='heat.sqlite',
help='the filename to use with sqlite'),
cfg.BoolOpt('sqlite_synchronous',
default=True,
help='If true, use synchronous mode for sqlite'),
]
database_opts = [
cfg.StrOpt('connection',
default='sqlite:///' +
os.path.abspath(os.path.join(os.path.dirname(__file__),
'../', '$sqlite_db')),
help='The SQLAlchemy connection string used to connect to the '
'database',
secret=True,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_connection',
group='DATABASE'),
cfg.DeprecatedOpt('connection',
group='sql'), ]),
cfg.StrOpt('slave_connection',
default='',
secret=True,
help='The SQLAlchemy connection string used to connect to the '
'slave database'),
cfg.IntOpt('idle_timeout',
default=3600,
deprecated_opts=[cfg.DeprecatedOpt('sql_idle_timeout',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_idle_timeout',
group='DATABASE'),
cfg.DeprecatedOpt('idle_timeout',
group='sql')],
help='timeout before idle sql connections are reaped'),
cfg.IntOpt('min_pool_size',
default=1,
deprecated_opts=[cfg.DeprecatedOpt('sql_min_pool_size',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_min_pool_size',
group='DATABASE')],
help='Minimum number of SQL connections to keep open in a '
'pool'),
cfg.IntOpt('max_pool_size',
default=None,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_pool_size',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_max_pool_size',
group='DATABASE')],
help='Maximum number of SQL connections to keep open in a '
'pool'),
cfg.IntOpt('max_retries',
default=10,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_retries',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_max_retries',
group='DATABASE')],
help='maximum db connection retries during startup. '
'(setting -1 implies an infinite retry count)'),
cfg.IntOpt('retry_interval',
default=10,
deprecated_opts=[cfg.DeprecatedOpt('sql_retry_interval',
group='DEFAULT'),
cfg.DeprecatedOpt('reconnect_interval',
group='DATABASE')],
help='interval between retries of opening a sql connection'),
cfg.IntOpt('max_overflow',
default=None,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_overflow',
group='DEFAULT'),
cfg.DeprecatedOpt('sqlalchemy_max_overflow',
group='DATABASE')],
help='If set, use this value for max_overflow with sqlalchemy'),
cfg.IntOpt('connection_debug',
default=0,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_debug',
group='DEFAULT')],
help='Verbosity of SQL debugging information. 0=None, '
'100=Everything'),
cfg.BoolOpt('connection_trace',
default=False,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_trace',
group='DEFAULT')],
help='Add python stack traces to SQL as comment strings'),
cfg.IntOpt('pool_timeout',
default=None,
deprecated_opts=[cfg.DeprecatedOpt('sqlalchemy_pool_timeout',
group='DATABASE')],
help='If set, use this value for pool_timeout with sqlalchemy'),
]
CONF = cfg.CONF
CONF.register_opts(sqlite_db_opts)
CONF.register_opts(database_opts, 'database')
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
_ENGINE = None
_MAKER = None
_SLAVE_ENGINE = None
_SLAVE_MAKER = None
def set_defaults(sql_connection, sqlite_db, max_pool_size=None,
max_overflow=None, pool_timeout=None):
"""Set defaults for configuration variables."""
cfg.set_defaults(database_opts,
connection=sql_connection)
cfg.set_defaults(sqlite_db_opts,
sqlite_db=sqlite_db)
# Update the QueuePool defaults
if max_pool_size is not None:
cfg.set_defaults(database_opts,
max_pool_size=max_pool_size)
if max_overflow is not None:
cfg.set_defaults(database_opts,
max_overflow=max_overflow)
if pool_timeout is not None:
cfg.set_defaults(database_opts,
pool_timeout=pool_timeout)
def cleanup():
global _ENGINE, _MAKER
global _SLAVE_ENGINE, _SLAVE_MAKER
if _MAKER:
_MAKER.close_all()
_MAKER = None
if _ENGINE:
_ENGINE.dispose()
_ENGINE = None
if _SLAVE_MAKER:
_SLAVE_MAKER.close_all()
_SLAVE_MAKER = None
if _SLAVE_ENGINE:
_SLAVE_ENGINE.dispose()
_SLAVE_ENGINE = None
class SqliteForeignKeysListener(PoolListener): class SqliteForeignKeysListener(PoolListener):
"""Ensures that the foreign key constraints are enforced in SQLite. """Ensures that the foreign key constraints are enforced in SQLite.
@ -442,30 +310,6 @@ class SqliteForeignKeysListener(PoolListener):
dbapi_con.execute('pragma foreign_keys=ON') dbapi_con.execute('pragma foreign_keys=ON')
def get_session(autocommit=True, expire_on_commit=False, sqlite_fk=False,
slave_session=False, mysql_traditional_mode=False):
"""Return a SQLAlchemy session."""
global _MAKER
global _SLAVE_MAKER
maker = _MAKER
if slave_session:
maker = _SLAVE_MAKER
if maker is None:
engine = get_engine(sqlite_fk=sqlite_fk, slave_engine=slave_session,
mysql_traditional_mode=mysql_traditional_mode)
maker = get_maker(engine, autocommit, expire_on_commit)
if slave_session:
_SLAVE_MAKER = maker
else:
_MAKER = maker
session = maker()
return session
# note(boris-42): In current versions of DB backends unique constraint # note(boris-42): In current versions of DB backends unique constraint
# violation messages follow the structure: # violation messages follow the structure:
# #
@ -474,9 +318,9 @@ def get_session(autocommit=True, expire_on_commit=False, sqlite_fk=False,
# N columns - (IntegrityError) column c1, c2, ..., N are not unique # N columns - (IntegrityError) column c1, c2, ..., N are not unique
# #
# sqlite since 3.7.16: # sqlite since 3.7.16:
# 1 column - (IntegrityError) UNIQUE constraint failed: k1 # 1 column - (IntegrityError) UNIQUE constraint failed: tbl.k1
# #
# N columns - (IntegrityError) UNIQUE constraint failed: k1, k2 # N columns - (IntegrityError) UNIQUE constraint failed: tbl.k1, tbl.k2
# #
# postgres: # postgres:
# 1 column - (IntegrityError) duplicate key value violates unique # 1 column - (IntegrityError) duplicate key value violates unique
@ -489,11 +333,20 @@ def get_session(autocommit=True, expire_on_commit=False, sqlite_fk=False,
# 'c1'") # 'c1'")
# N columns - (IntegrityError) (1062, "Duplicate entry 'values joined # N columns - (IntegrityError) (1062, "Duplicate entry 'values joined
# with -' for key 'name_of_our_constraint'") # with -' for key 'name_of_our_constraint'")
#
# ibm_db_sa:
# N columns - (IntegrityError) SQL0803N One or more values in the INSERT
# statement, UPDATE statement, or foreign key update caused by a
# DELETE statement are not valid because the primary key, unique
# constraint or unique index identified by "2" constrains table
# "NOVA.KEY_PAIRS" from having duplicate values for the index
# key.
_DUP_KEY_RE_DB = { _DUP_KEY_RE_DB = {
"sqlite": (re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"), "sqlite": (re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"),
re.compile(r"^.*UNIQUE\s+constraint\s+failed:\s+(.+)$")), re.compile(r"^.*UNIQUE\s+constraint\s+failed:\s+(.+)$")),
"postgresql": (re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"),), "postgresql": (re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"),),
"mysql": (re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$"),) "mysql": (re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$"),),
"ibm_db_sa": (re.compile(r"^.*SQL0803N.*$"),),
} }
@ -515,7 +368,7 @@ def _raise_if_duplicate_entry_error(integrity_error, engine_name):
return [columns] return [columns]
return columns[len(uniqbase):].split("0")[1:] return columns[len(uniqbase):].split("0")[1:]
if engine_name not in ["mysql", "sqlite", "postgresql"]: if engine_name not in ["ibm_db_sa", "mysql", "sqlite", "postgresql"]:
return return
# FIXME(johannes): The usage of the .message attribute has been # FIXME(johannes): The usage of the .message attribute has been
@ -530,10 +383,15 @@ def _raise_if_duplicate_entry_error(integrity_error, engine_name):
else: else:
return return
columns = match.group(1) # NOTE(mriedem): The ibm_db_sa integrity error message doesn't provide the
# columns so we have to omit that from the DBDuplicateEntry error.
columns = ''
if engine_name != 'ibm_db_sa':
columns = match.group(1)
if engine_name == "sqlite": if engine_name == "sqlite":
columns = columns.strip().split(", ") columns = [c.split('.')[-1] for c in columns.strip().split(", ")]
else: else:
columns = get_columns_from_uniq_cons_or_name(columns) columns = get_columns_from_uniq_cons_or_name(columns)
raise exception.DBDuplicateEntry(columns, integrity_error) raise exception.DBDuplicateEntry(columns, integrity_error)
@ -572,56 +430,39 @@ def _raise_if_deadlock_error(operational_error, engine_name):
def _wrap_db_error(f): def _wrap_db_error(f):
@functools.wraps(f) @functools.wraps(f)
def _wrap(*args, **kwargs): def _wrap(self, *args, **kwargs):
try: try:
return f(*args, **kwargs) assert issubclass(
self.__class__, sqlalchemy.orm.session.Session
), ('_wrap_db_error() can only be applied to methods of '
'subclasses of sqlalchemy.orm.session.Session.')
return f(self, *args, **kwargs)
except UnicodeEncodeError: except UnicodeEncodeError:
raise exception.DBInvalidUnicodeParameter() raise exception.DBInvalidUnicodeParameter()
# note(boris-42): We should catch unique constraint violation and
# wrap it by our own DBDuplicateEntry exception. Unique constraint
# violation is wrapped by IntegrityError.
except sqla_exc.OperationalError as e: except sqla_exc.OperationalError as e:
_raise_if_deadlock_error(e, get_engine().name) _raise_if_db_connection_lost(e, self.bind)
_raise_if_deadlock_error(e, self.bind.dialect.name)
# NOTE(comstud): A lot of code is checking for OperationalError # NOTE(comstud): A lot of code is checking for OperationalError
# so let's not wrap it for now. # so let's not wrap it for now.
raise raise
# note(boris-42): We should catch unique constraint violation and
# wrap it by our own DBDuplicateEntry exception. Unique constraint
# violation is wrapped by IntegrityError.
except sqla_exc.IntegrityError as e: except sqla_exc.IntegrityError as e:
# note(boris-42): SqlAlchemy doesn't unify errors from different # note(boris-42): SqlAlchemy doesn't unify errors from different
# DBs so we must do this. Also in some tables (for example # DBs so we must do this. Also in some tables (for example
# instance_types) there are more than one unique constraint. This # instance_types) there are more than one unique constraint. This
# means we should get names of columns, which values violate # means we should get names of columns, which values violate
# unique constraint, from error message. # unique constraint, from error message.
_raise_if_duplicate_entry_error(e, get_engine().name) _raise_if_duplicate_entry_error(e, self.bind.dialect.name)
raise exception.DBError(e) raise exception.DBError(e)
except Exception as e: except Exception as e:
LOG.exception(_('DB exception wrapped.')) LOG.exception(_LE('DB exception wrapped.'))
raise exception.DBError(e) raise exception.DBError(e)
return _wrap return _wrap
def get_engine(sqlite_fk=False, slave_engine=False,
mysql_traditional_mode=False):
"""Return a SQLAlchemy engine."""
global _ENGINE
global _SLAVE_ENGINE
engine = _ENGINE
db_uri = CONF.database.connection
if slave_engine:
engine = _SLAVE_ENGINE
db_uri = CONF.database.slave_connection
if engine is None:
engine = create_engine(db_uri, sqlite_fk=sqlite_fk,
mysql_traditional_mode=mysql_traditional_mode)
if slave_engine:
_SLAVE_ENGINE = engine
else:
_ENGINE = engine
return engine
def _synchronous_switch_listener(dbapi_conn, connection_rec): def _synchronous_switch_listener(dbapi_conn, connection_rec):
"""Switch sqlite connections to non-synchronous mode.""" """Switch sqlite connections to non-synchronous mode."""
dbapi_conn.execute("PRAGMA synchronous = OFF") dbapi_conn.execute("PRAGMA synchronous = OFF")
@ -663,22 +504,78 @@ def _ping_listener(engine, dbapi_conn, connection_rec, connection_proxy):
cursor.execute(ping_sql) cursor.execute(ping_sql)
except Exception as ex: except Exception as ex:
if engine.dialect.is_disconnect(ex, dbapi_conn, cursor): if engine.dialect.is_disconnect(ex, dbapi_conn, cursor):
msg = _('Database server has gone away: %s') % ex msg = _LW('Database server has gone away: %s') % ex
LOG.warning(msg) LOG.warning(msg)
# if the database server has gone away, all connections in the pool
# have become invalid and we can safely close all of them here,
# rather than waste time on checking of every single connection
engine.dispose()
# this will be handled by SQLAlchemy and will force it to create
# a new connection and retry the original action
raise sqla_exc.DisconnectionError(msg) raise sqla_exc.DisconnectionError(msg)
else: else:
raise raise
def _set_mode_traditional(dbapi_con, connection_rec, connection_proxy): def _set_session_sql_mode(dbapi_con, connection_rec, sql_mode=None):
"""Set engine mode to 'traditional'. """Set the sql_mode session variable.
Required to prevent silent truncates at insert or update operations MySQL supports several server modes. The default is None, but sessions
under MySQL. By default MySQL truncates inserted string if it longer may choose to enable server modes like TRADITIONAL, ANSI,
than a declared field just with warning. That is fraught with data several STRICT_* modes and others.
corruption.
Note: passing in '' (empty string) for sql_mode clears
the SQL mode for the session, overriding a potentially set
server default.
""" """
dbapi_con.cursor().execute("SET SESSION sql_mode = TRADITIONAL;")
cursor = dbapi_con.cursor()
cursor.execute("SET SESSION sql_mode = %s", [sql_mode])
def _mysql_get_effective_sql_mode(engine):
"""Returns the effective SQL mode for connections from the engine pool.
Returns ``None`` if the mode isn't available, otherwise returns the mode.
"""
# Get the real effective SQL mode. Even when unset by
# our own config, the server may still be operating in a specific
# SQL mode as set by the server configuration.
# Also note that the checkout listener will be called on execute to
# set the mode if it's registered.
row = engine.execute("SHOW VARIABLES LIKE 'sql_mode'").fetchone()
if row is None:
return
return row[1]
def _mysql_check_effective_sql_mode(engine):
"""Logs a message based on the effective SQL mode for MySQL connections."""
realmode = _mysql_get_effective_sql_mode(engine)
if realmode is None:
LOG.warning(_LW('Unable to detect effective SQL mode'))
return
LOG.debug('MySQL server mode set to %s', realmode)
# 'TRADITIONAL' mode enables several other modes, so
# we need a substring match here
if not ('TRADITIONAL' in realmode.upper() or
'STRICT_ALL_TABLES' in realmode.upper()):
LOG.warning(_LW("MySQL SQL mode is '%s', "
"consider enabling TRADITIONAL or STRICT_ALL_TABLES"),
realmode)
def _mysql_set_mode_callback(engine, sql_mode):
if sql_mode is not None:
mode_callback = functools.partial(_set_session_sql_mode,
sql_mode=sql_mode)
sqlalchemy.event.listen(engine, 'connect', mode_callback)
_mysql_check_effective_sql_mode(engine)
def _is_db_connection_error(args): def _is_db_connection_error(args):
@ -693,66 +590,75 @@ def _is_db_connection_error(args):
return False return False
def create_engine(sql_connection, sqlite_fk=False, def _raise_if_db_connection_lost(error, engine):
mysql_traditional_mode=False): # NOTE(vsergeyev): Function is_disconnect(e, connection, cursor)
# requires connection and cursor in incoming parameters,
# but we have no possibility to create connection if DB
# is not available, so in such case reconnect fails.
# But is_disconnect() ignores these parameters, so it
# makes sense to pass to function None as placeholder
# instead of connection and cursor.
if engine.dialect.is_disconnect(error, None, None):
raise exception.DBConnectionError(error)
def create_engine(sql_connection, sqlite_fk=False, mysql_sql_mode=None,
idle_timeout=3600,
connection_debug=0, max_pool_size=None, max_overflow=None,
pool_timeout=None, sqlite_synchronous=True,
connection_trace=False, max_retries=10, retry_interval=10):
"""Return a new SQLAlchemy engine.""" """Return a new SQLAlchemy engine."""
# NOTE(geekinutah): At this point we could be connecting to the normal
# db handle or the slave db handle. Things like
# _wrap_db_error aren't going to work well if their
# backends don't match. Let's check.
_assert_matching_drivers()
connection_dict = sqlalchemy.engine.url.make_url(sql_connection) connection_dict = sqlalchemy.engine.url.make_url(sql_connection)
engine_args = { engine_args = {
"pool_recycle": CONF.database.idle_timeout, "pool_recycle": idle_timeout,
"echo": False,
'convert_unicode': True, 'convert_unicode': True,
} }
# Map our SQL debug level to SQLAlchemy's options logger = logging.getLogger('sqlalchemy.engine')
if CONF.database.connection_debug >= 100:
engine_args['echo'] = 'debug' # Map SQL debug level to Python log level
elif CONF.database.connection_debug >= 50: if connection_debug >= 100:
engine_args['echo'] = True logger.setLevel(logging.DEBUG)
elif connection_debug >= 50:
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.WARNING)
if "sqlite" in connection_dict.drivername: if "sqlite" in connection_dict.drivername:
if sqlite_fk: if sqlite_fk:
engine_args["listeners"] = [SqliteForeignKeysListener()] engine_args["listeners"] = [SqliteForeignKeysListener()]
engine_args["poolclass"] = NullPool engine_args["poolclass"] = NullPool
if CONF.database.connection == "sqlite://": if sql_connection == "sqlite://":
engine_args["poolclass"] = StaticPool engine_args["poolclass"] = StaticPool
engine_args["connect_args"] = {'check_same_thread': False} engine_args["connect_args"] = {'check_same_thread': False}
else: else:
if CONF.database.max_pool_size is not None: if max_pool_size is not None:
engine_args['pool_size'] = CONF.database.max_pool_size engine_args['pool_size'] = max_pool_size
if CONF.database.max_overflow is not None: if max_overflow is not None:
engine_args['max_overflow'] = CONF.database.max_overflow engine_args['max_overflow'] = max_overflow
if CONF.database.pool_timeout is not None: if pool_timeout is not None:
engine_args['pool_timeout'] = CONF.database.pool_timeout engine_args['pool_timeout'] = pool_timeout
engine = sqlalchemy.create_engine(sql_connection, **engine_args) engine = sqlalchemy.create_engine(sql_connection, **engine_args)
sqlalchemy.event.listen(engine, 'checkin', _thread_yield) sqlalchemy.event.listen(engine, 'checkin', _thread_yield)
if engine.name in ['mysql', 'ibm_db_sa']: if engine.name in ['mysql', 'ibm_db_sa']:
callback = functools.partial(_ping_listener, engine) ping_callback = functools.partial(_ping_listener, engine)
sqlalchemy.event.listen(engine, 'checkout', callback) sqlalchemy.event.listen(engine, 'checkout', ping_callback)
if mysql_traditional_mode: if engine.name == 'mysql':
sqlalchemy.event.listen(engine, 'checkout', _set_mode_traditional) if mysql_sql_mode:
else: _mysql_set_mode_callback(engine, mysql_sql_mode)
LOG.warning(_("This application has not enabled MySQL traditional"
" mode, which means silent data corruption may"
" occur. Please encourage the application"
" developers to enable this mode."))
elif 'sqlite' in connection_dict.drivername: elif 'sqlite' in connection_dict.drivername:
if not CONF.sqlite_synchronous: if not sqlite_synchronous:
sqlalchemy.event.listen(engine, 'connect', sqlalchemy.event.listen(engine, 'connect',
_synchronous_switch_listener) _synchronous_switch_listener)
sqlalchemy.event.listen(engine, 'connect', _add_regexp_listener) sqlalchemy.event.listen(engine, 'connect', _add_regexp_listener)
if (CONF.database.connection_trace and if connection_trace and engine.dialect.dbapi.__name__ == 'MySQLdb':
engine.dialect.dbapi.__name__ == 'MySQLdb'):
_patch_mysqldb_with_stacktrace_comments() _patch_mysqldb_with_stacktrace_comments()
try: try:
@ -761,15 +667,15 @@ def create_engine(sql_connection, sqlite_fk=False,
if not _is_db_connection_error(e.args[0]): if not _is_db_connection_error(e.args[0]):
raise raise
remaining = CONF.database.max_retries remaining = max_retries
if remaining == -1: if remaining == -1:
remaining = 'infinite' remaining = 'infinite'
while True: while True:
msg = _('SQL connection failed. %s attempts left.') msg = _LW('SQL connection failed. %s attempts left.')
LOG.warning(msg % remaining) LOG.warning(msg % remaining)
if remaining != 'infinite': if remaining != 'infinite':
remaining -= 1 remaining -= 1
time.sleep(CONF.database.retry_interval) time.sleep(retry_interval)
try: try:
engine.connect() engine.connect()
break break
@ -856,13 +762,144 @@ def _patch_mysqldb_with_stacktrace_comments():
setattr(MySQLdb.cursors.BaseCursor, '_do_query', _do_query) setattr(MySQLdb.cursors.BaseCursor, '_do_query', _do_query)
def _assert_matching_drivers(): class EngineFacade(object):
"""Make sure slave handle and normal handle have the same driver.""" """A helper class for removing of global engine instances from heat.db.
# NOTE(geekinutah): There's no use case for writing to one backend and
# reading from another. Who knows what the future holds?
if CONF.database.slave_connection == '':
return
normal = sqlalchemy.engine.url.make_url(CONF.database.connection) As a library, heat.db can't decide where to store/when to create engine
slave = sqlalchemy.engine.url.make_url(CONF.database.slave_connection) and sessionmaker instances, so this must be left for a target application.
assert normal.drivername == slave.drivername
On the other hand, in order to simplify the adoption of heat.db changes,
we'll provide a helper class, which creates engine and sessionmaker
on its instantiation and provides get_engine()/get_session() methods
that are compatible with corresponding utility functions that currently
exist in target projects, e.g. in Nova.
engine/sessionmaker instances will still be global (and they are meant to
be global), but they will be stored in the app context, rather that in the
heat.db context.
Note: using of this helper is completely optional and you are encouraged to
integrate engine/sessionmaker instances into your apps any way you like
(e.g. one might want to bind a session to a request context). Two important
things to remember:
1. An Engine instance is effectively a pool of DB connections, so it's
meant to be shared (and it's thread-safe).
2. A Session instance is not meant to be shared and represents a DB
transactional context (i.e. it's not thread-safe). sessionmaker is
a factory of sessions.
"""
def __init__(self, sql_connection,
sqlite_fk=False, autocommit=True,
expire_on_commit=False, **kwargs):
"""Initialize engine and sessionmaker instances.
:param sqlite_fk: enable foreign keys in SQLite
:type sqlite_fk: bool
:param autocommit: use autocommit mode for created Session instances
:type autocommit: bool
:param expire_on_commit: expire session objects on commit
:type expire_on_commit: bool
Keyword arguments:
:keyword mysql_sql_mode: the SQL mode to be used for MySQL sessions.
(defaults to TRADITIONAL)
:keyword idle_timeout: timeout before idle sql connections are reaped
(defaults to 3600)
:keyword connection_debug: verbosity of SQL debugging information.
0=None, 100=Everything (defaults to 0)
:keyword max_pool_size: maximum number of SQL connections to keep open
in a pool (defaults to SQLAlchemy settings)
:keyword max_overflow: if set, use this value for max_overflow with
sqlalchemy (defaults to SQLAlchemy settings)
:keyword pool_timeout: if set, use this value for pool_timeout with
sqlalchemy (defaults to SQLAlchemy settings)
:keyword sqlite_synchronous: if True, SQLite uses synchronous mode
(defaults to True)
:keyword connection_trace: add python stack traces to SQL as comment
strings (defaults to False)
:keyword max_retries: maximum db connection retries during startup.
(setting -1 implies an infinite retry count)
(defaults to 10)
:keyword retry_interval: interval between retries of opening a sql
connection (defaults to 10)
"""
super(EngineFacade, self).__init__()
self._engine = create_engine(
sql_connection=sql_connection,
sqlite_fk=sqlite_fk,
mysql_sql_mode=kwargs.get('mysql_sql_mode', 'TRADITIONAL'),
idle_timeout=kwargs.get('idle_timeout', 3600),
connection_debug=kwargs.get('connection_debug', 0),
max_pool_size=kwargs.get('max_pool_size'),
max_overflow=kwargs.get('max_overflow'),
pool_timeout=kwargs.get('pool_timeout'),
sqlite_synchronous=kwargs.get('sqlite_synchronous', True),
connection_trace=kwargs.get('connection_trace', False),
max_retries=kwargs.get('max_retries', 10),
retry_interval=kwargs.get('retry_interval', 10))
self._session_maker = get_maker(
engine=self._engine,
autocommit=autocommit,
expire_on_commit=expire_on_commit)
def get_engine(self):
"""Get the engine instance (note, that it's shared)."""
return self._engine
def get_session(self, **kwargs):
"""Get a Session instance.
If passed, keyword arguments values override the ones used when the
sessionmaker instance was created.
:keyword autocommit: use autocommit mode for created Session instances
:type autocommit: bool
:keyword expire_on_commit: expire session objects on commit
:type expire_on_commit: bool
"""
for arg in kwargs:
if arg not in ('autocommit', 'expire_on_commit'):
del kwargs[arg]
return self._session_maker(**kwargs)
@classmethod
def from_config(cls, connection_string, conf,
sqlite_fk=False, autocommit=True, expire_on_commit=False):
"""Initialize EngineFacade using oslo.config config instance options.
:param connection_string: SQLAlchemy connection string
:type connection_string: string
:param conf: oslo.config config instance
:type conf: oslo.config.cfg.ConfigOpts
:param sqlite_fk: enable foreign keys in SQLite
:type sqlite_fk: bool
:param autocommit: use autocommit mode for created Session instances
:type autocommit: bool
:param expire_on_commit: expire session objects on commit
:type expire_on_commit: bool
"""
return cls(sql_connection=connection_string,
sqlite_fk=sqlite_fk,
autocommit=autocommit,
expire_on_commit=expire_on_commit,
**dict(conf.database.items()))

View File

@ -0,0 +1,153 @@
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import functools
import os
import fixtures
import six
from heat.openstack.common.db.sqlalchemy import session
from heat.openstack.common.db.sqlalchemy import utils
from heat.openstack.common.fixture import lockutils
from heat.openstack.common import test
class DbFixture(fixtures.Fixture):
"""Basic database fixture.
Allows to run tests on various db backends, such as SQLite, MySQL and
PostgreSQL. By default use sqlite backend. To override default backend
uri set env variable OS_TEST_DBAPI_CONNECTION with database admin
credentials for specific backend.
"""
def _get_uri(self):
return os.getenv('OS_TEST_DBAPI_CONNECTION', 'sqlite://')
def __init__(self, test):
super(DbFixture, self).__init__()
self.test = test
def setUp(self):
super(DbFixture, self).setUp()
self.test.engine = session.create_engine(self._get_uri())
self.test.sessionmaker = session.get_maker(self.test.engine)
self.addCleanup(self.test.engine.dispose)
class DbTestCase(test.BaseTestCase):
"""Base class for testing of DB code.
Using `DbFixture`. Intended to be the main database test case to use all
the tests on a given backend with user defined uri. Backend specific
tests should be decorated with `backend_specific` decorator.
"""
FIXTURE = DbFixture
def setUp(self):
super(DbTestCase, self).setUp()
self.useFixture(self.FIXTURE(self))
ALLOWED_DIALECTS = ['sqlite', 'mysql', 'postgresql']
def backend_specific(*dialects):
"""Decorator to skip backend specific tests on inappropriate engines.
::dialects: list of dialects names under which the test will be launched.
"""
def wrap(f):
@functools.wraps(f)
def ins_wrap(self):
if not set(dialects).issubset(ALLOWED_DIALECTS):
raise ValueError(
"Please use allowed dialects: %s" % ALLOWED_DIALECTS)
if self.engine.name not in dialects:
msg = ('The test "%s" can be run '
'only on %s. Current engine is %s.')
args = (f.__name__, ' '.join(dialects), self.engine.name)
self.skip(msg % args)
else:
return f(self)
return ins_wrap
return wrap
@six.add_metaclass(abc.ABCMeta)
class OpportunisticFixture(DbFixture):
"""Base fixture to use default CI databases.
The databases exist in OpenStack CI infrastructure. But for the
correct functioning in local environment the databases must be
created manually.
"""
DRIVER = abc.abstractproperty(lambda: None)
DBNAME = PASSWORD = USERNAME = 'openstack_citest'
def _get_uri(self):
return utils.get_connect_string(backend=self.DRIVER,
user=self.USERNAME,
passwd=self.PASSWORD,
database=self.DBNAME)
@six.add_metaclass(abc.ABCMeta)
class OpportunisticTestCase(DbTestCase):
"""Base test case to use default CI databases.
The subclasses of the test case are running only when openstack_citest
database is available otherwise a tests will be skipped.
"""
FIXTURE = abc.abstractproperty(lambda: None)
def setUp(self):
# TODO(bnemec): Remove this once infra is ready for
# https://review.openstack.org/#/c/74963/ to merge.
self.useFixture(lockutils.LockFixture('opportunistic-db'))
credentials = {
'backend': self.FIXTURE.DRIVER,
'user': self.FIXTURE.USERNAME,
'passwd': self.FIXTURE.PASSWORD,
'database': self.FIXTURE.DBNAME}
if self.FIXTURE.DRIVER and not utils.is_backend_avail(**credentials):
msg = '%s backend is not available.' % self.FIXTURE.DRIVER
return self.skip(msg)
super(OpportunisticTestCase, self).setUp()
class MySQLOpportunisticFixture(OpportunisticFixture):
DRIVER = 'mysql'
class PostgreSQLOpportunisticFixture(OpportunisticFixture):
DRIVER = 'postgresql'
class MySQLOpportunisticTestCase(OpportunisticTestCase):
FIXTURE = MySQLOpportunisticFixture
class PostgreSQLOpportunisticTestCase(OpportunisticTestCase):
FIXTURE = PostgreSQLOpportunisticFixture

View File

@ -16,83 +16,43 @@
# under the License. # under the License.
import functools import functools
import logging
import os import os
import subprocess import subprocess
import lockfile import lockfile
from six import moves from six import moves
from six.moves.urllib import parse
import sqlalchemy import sqlalchemy
import sqlalchemy.exc import sqlalchemy.exc
from heat.openstack.common.gettextutils import _ from heat.openstack.common.db.sqlalchemy import utils
from heat.openstack.common import log as logging from heat.openstack.common.gettextutils import _LE
from heat.openstack.common.py3kcompat import urlutils
from heat.openstack.common import test from heat.openstack.common import test
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
def _get_connect_string(backend, user, passwd, database):
"""Get database connection
Try to get a connection with a very specific set of values, if we get
these then we'll run the tests, otherwise they are skipped
"""
if backend == "postgres":
backend = "postgresql+psycopg2"
elif backend == "mysql":
backend = "mysql+mysqldb"
else:
raise Exception("Unrecognized backend: '%s'" % backend)
return ("%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s"
% {'backend': backend, 'user': user, 'passwd': passwd,
'database': database})
def _is_backend_avail(backend, user, passwd, database):
try:
connect_uri = _get_connect_string(backend, user, passwd, database)
engine = sqlalchemy.create_engine(connect_uri)
connection = engine.connect()
except Exception:
# intentionally catch all to handle exceptions even if we don't
# have any backend code loaded.
return False
else:
connection.close()
engine.dispose()
return True
def _have_mysql(user, passwd, database): def _have_mysql(user, passwd, database):
present = os.environ.get('TEST_MYSQL_PRESENT') present = os.environ.get('TEST_MYSQL_PRESENT')
if present is None: if present is None:
return _is_backend_avail('mysql', user, passwd, database) return utils.is_backend_avail(backend='mysql',
user=user,
passwd=passwd,
database=database)
return present.lower() in ('', 'true') return present.lower() in ('', 'true')
def _have_postgresql(user, passwd, database): def _have_postgresql(user, passwd, database):
present = os.environ.get('TEST_POSTGRESQL_PRESENT') present = os.environ.get('TEST_POSTGRESQL_PRESENT')
if present is None: if present is None:
return _is_backend_avail('postgres', user, passwd, database) return utils.is_backend_avail(backend='postgres',
user=user,
passwd=passwd,
database=database)
return present.lower() in ('', 'true') return present.lower() in ('', 'true')
def get_db_connection_info(conn_pieces):
database = conn_pieces.path.strip('/')
loc_pieces = conn_pieces.netloc.split('@')
host = loc_pieces[1]
auth_pieces = loc_pieces[0].split(':')
user = auth_pieces[0]
password = ""
if len(auth_pieces) > 1:
password = auth_pieces[1].strip()
return (user, password, database, host)
def _set_db_lock(lock_path=None, lock_prefix=None): def _set_db_lock(lock_path=None, lock_prefix=None):
def decorator(f): def decorator(f):
@functools.wraps(f) @functools.wraps(f)
@ -101,10 +61,10 @@ def _set_db_lock(lock_path=None, lock_prefix=None):
path = lock_path or os.environ.get("HEAT_LOCK_PATH") path = lock_path or os.environ.get("HEAT_LOCK_PATH")
lock = lockfile.FileLock(os.path.join(path, lock_prefix)) lock = lockfile.FileLock(os.path.join(path, lock_prefix))
with lock: with lock:
LOG.debug(_('Got lock "%s"') % f.__name__) LOG.debug('Got lock "%s"' % f.__name__)
return f(*args, **kwargs) return f(*args, **kwargs)
finally: finally:
LOG.debug(_('Lock released "%s"') % f.__name__) LOG.debug('Lock released "%s"' % f.__name__)
return wrapper return wrapper
return decorator return decorator
@ -167,7 +127,10 @@ class BaseMigrationTestCase(test.BaseTestCase):
"Failed to run: %s\n%s" % (cmd, output)) "Failed to run: %s\n%s" % (cmd, output))
def _reset_pg(self, conn_pieces): def _reset_pg(self, conn_pieces):
(user, password, database, host) = get_db_connection_info(conn_pieces) (user,
password,
database,
host) = utils.get_db_connection_info(conn_pieces)
os.environ['PGPASSWORD'] = password os.environ['PGPASSWORD'] = password
os.environ['PGUSER'] = user os.environ['PGUSER'] = user
# note(boris-42): We must create and drop database, we can't # note(boris-42): We must create and drop database, we can't
@ -191,7 +154,7 @@ class BaseMigrationTestCase(test.BaseTestCase):
def _reset_databases(self): def _reset_databases(self):
for key, engine in self.engines.items(): for key, engine in self.engines.items():
conn_string = self.test_databases[key] conn_string = self.test_databases[key]
conn_pieces = urlutils.urlparse(conn_string) conn_pieces = parse.urlparse(conn_string)
engine.dispose() engine.dispose()
if conn_string.startswith('sqlite'): if conn_string.startswith('sqlite'):
# We can just delete the SQLite database, which is # We can just delete the SQLite database, which is
@ -206,7 +169,7 @@ class BaseMigrationTestCase(test.BaseTestCase):
# the MYSQL database, which is easier and less error-prone # the MYSQL database, which is easier and less error-prone
# than using SQLAlchemy to do this via MetaData...trust me. # than using SQLAlchemy to do this via MetaData...trust me.
(user, password, database, host) = \ (user, password, database, host) = \
get_db_connection_info(conn_pieces) utils.get_db_connection_info(conn_pieces)
sql = ("drop database if exists %(db)s; " sql = ("drop database if exists %(db)s; "
"create database %(db)s;") % {'db': database} "create database %(db)s;") % {'db': database}
cmd = ("mysql -u \"%(user)s\" -p\"%(password)s\" -h %(host)s " cmd = ("mysql -u \"%(user)s\" -p\"%(password)s\" -h %(host)s "
@ -302,6 +265,6 @@ class WalkVersionsMixin(object):
if check: if check:
check(engine, data) check(engine, data)
except Exception: except Exception:
LOG.error("Failed to migrate to version %s on engine %s" % LOG.error(_LE("Failed to migrate to version %s on engine %s") %
(version, engine)) (version, engine))
raise raise

View File

@ -17,9 +17,9 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import logging
import re import re
from migrate.changeset import UniqueConstraint
import sqlalchemy import sqlalchemy
from sqlalchemy import Boolean from sqlalchemy import Boolean
from sqlalchemy import CheckConstraint from sqlalchemy import CheckConstraint
@ -30,16 +30,16 @@ from sqlalchemy import func
from sqlalchemy import Index from sqlalchemy import Index
from sqlalchemy import Integer from sqlalchemy import Integer
from sqlalchemy import MetaData from sqlalchemy import MetaData
from sqlalchemy import or_
from sqlalchemy.sql.expression import literal_column from sqlalchemy.sql.expression import literal_column
from sqlalchemy.sql.expression import UpdateBase from sqlalchemy.sql.expression import UpdateBase
from sqlalchemy.sql import select
from sqlalchemy import String from sqlalchemy import String
from sqlalchemy import Table from sqlalchemy import Table
from sqlalchemy.types import NullType from sqlalchemy.types import NullType
from heat.openstack.common.gettextutils import _ from heat.openstack.common import context as request_context
from heat.openstack.common.db.sqlalchemy import models
from heat.openstack.common import log as logging from heat.openstack.common.gettextutils import _, _LI, _LW
from heat.openstack.common import timeutils from heat.openstack.common import timeutils
@ -95,7 +95,7 @@ def paginate_query(query, model, limit, sort_keys, marker=None,
if 'id' not in sort_keys: if 'id' not in sort_keys:
# TODO(justinsb): If this ever gives a false-positive, check # TODO(justinsb): If this ever gives a false-positive, check
# the actual primary key, rather than assuming its id # the actual primary key, rather than assuming its id
LOG.warning(_('Id not in sort_keys; is sort_keys unique?')) LOG.warning(_LW('Id not in sort_keys; is sort_keys unique?'))
assert(not (sort_dir and sort_dirs)) assert(not (sort_dir and sort_dirs))
@ -158,6 +158,98 @@ def paginate_query(query, model, limit, sort_keys, marker=None,
return query return query
def _read_deleted_filter(query, db_model, read_deleted):
if 'deleted' not in db_model.__table__.columns:
raise ValueError(_("There is no `deleted` column in `%s` table. "
"Project doesn't use soft-deleted feature.")
% db_model.__name__)
default_deleted_value = db_model.__table__.c.deleted.default.arg
if read_deleted == 'no':
query = query.filter(db_model.deleted == default_deleted_value)
elif read_deleted == 'yes':
pass # omit the filter to include deleted and active
elif read_deleted == 'only':
query = query.filter(db_model.deleted != default_deleted_value)
else:
raise ValueError(_("Unrecognized read_deleted value '%s'")
% read_deleted)
return query
def _project_filter(query, db_model, context, project_only):
if project_only and 'project_id' not in db_model.__table__.columns:
raise ValueError(_("There is no `project_id` column in `%s` table.")
% db_model.__name__)
if request_context.is_user_context(context) and project_only:
if project_only == 'allow_none':
is_none = None
query = query.filter(or_(db_model.project_id == context.project_id,
db_model.project_id == is_none))
else:
query = query.filter(db_model.project_id == context.project_id)
return query
def model_query(context, model, session, args=None, project_only=False,
read_deleted=None):
"""Query helper that accounts for context's `read_deleted` field.
:param context: context to query under
:param model: Model to query. Must be a subclass of ModelBase.
:type model: models.ModelBase
:param session: The session to use.
:type session: sqlalchemy.orm.session.Session
:param args: Arguments to query. If None - model is used.
:type args: tuple
:param project_only: If present and context is user-type, then restrict
query to match the context's project_id. If set to
'allow_none', restriction includes project_id = None.
:type project_only: bool
:param read_deleted: If present, overrides context's read_deleted field.
:type read_deleted: bool
Usage:
..code:: python
result = (utils.model_query(context, models.Instance, session=session)
.filter_by(uuid=instance_uuid)
.all())
query = utils.model_query(
context, Node,
session=session,
args=(func.count(Node.id), func.sum(Node.ram))
).filter_by(project_id=project_id)
"""
if not read_deleted:
if hasattr(context, 'read_deleted'):
# NOTE(viktors): some projects use `read_deleted` attribute in
# their contexts instead of `show_deleted`.
read_deleted = context.read_deleted
else:
read_deleted = context.show_deleted
if not issubclass(model, models.ModelBase):
raise TypeError(_("model should be a subclass of ModelBase"))
query = session.query(model) if not args else session.query(*args)
query = _read_deleted_filter(query, model, read_deleted)
query = _project_filter(query, model, context, project_only)
return query
def get_table(engine, name): def get_table(engine, name):
"""Returns an sqlalchemy table dynamically from db. """Returns an sqlalchemy table dynamically from db.
@ -209,6 +301,10 @@ def drop_unique_constraint(migrate_engine, table_name, uc_name, *columns,
**col_name_col_instance): **col_name_col_instance):
"""Drop unique constraint from table. """Drop unique constraint from table.
DEPRECATED: this function is deprecated and will be removed from heat.db
in a few releases. Please use UniqueConstraint.drop() method directly for
sqlalchemy-migrate migration scripts.
This method drops UC from table and works for mysql, postgresql and sqlite. This method drops UC from table and works for mysql, postgresql and sqlite.
In mysql and postgresql we are able to use "alter table" construction. In mysql and postgresql we are able to use "alter table" construction.
Sqlalchemy doesn't support some sqlite column types and replaces their Sqlalchemy doesn't support some sqlite column types and replaces their
@ -225,6 +321,8 @@ def drop_unique_constraint(migrate_engine, table_name, uc_name, *columns,
types by sqlite. For example BigInteger. types by sqlite. For example BigInteger.
""" """
from migrate.changeset import UniqueConstraint
meta = MetaData() meta = MetaData()
meta.bind = migrate_engine meta.bind = migrate_engine
t = Table(table_name, meta, autoload=True) t = Table(table_name, meta, autoload=True)
@ -264,9 +362,9 @@ def drop_old_duplicate_entries_from_table(migrate_engine, table_name,
columns_for_select = [func.max(table.c.id)] columns_for_select = [func.max(table.c.id)]
columns_for_select.extend(columns_for_group_by) columns_for_select.extend(columns_for_group_by)
duplicated_rows_select = select(columns_for_select, duplicated_rows_select = sqlalchemy.sql.select(
group_by=columns_for_group_by, columns_for_select, group_by=columns_for_group_by,
having=func.count(table.c.id) > 1) having=func.count(table.c.id) > 1)
for row in migrate_engine.execute(duplicated_rows_select): for row in migrate_engine.execute(duplicated_rows_select):
# NOTE(boris-42): Do not remove row that has the biggest ID. # NOTE(boris-42): Do not remove row that has the biggest ID.
@ -276,10 +374,11 @@ def drop_old_duplicate_entries_from_table(migrate_engine, table_name,
for name in uc_column_names: for name in uc_column_names:
delete_condition &= table.c[name] == row[name] delete_condition &= table.c[name] == row[name]
rows_to_delete_select = select([table.c.id]).where(delete_condition) rows_to_delete_select = sqlalchemy.sql.select(
[table.c.id]).where(delete_condition)
for row in migrate_engine.execute(rows_to_delete_select).fetchall(): for row in migrate_engine.execute(rows_to_delete_select).fetchall():
LOG.info(_("Deleting duplicated row with id: %(id)s from table: " LOG.info(_LI("Deleting duplicated row with id: %(id)s from table: "
"%(table)s") % dict(id=row[0], table=table_name)) "%(table)s") % dict(id=row[0], table=table_name))
if use_soft_delete: if use_soft_delete:
delete_statement = table.update().\ delete_statement = table.update().\
@ -387,7 +486,7 @@ def _change_deleted_column_type_to_boolean_sqlite(migrate_engine, table_name,
else: else:
c_select.append(table.c.deleted == table.c.id) c_select.append(table.c.deleted == table.c.id)
ins = InsertFromSelect(new_table, select(c_select)) ins = InsertFromSelect(new_table, sqlalchemy.sql.select(c_select))
migrate_engine.execute(ins) migrate_engine.execute(ins)
table.drop() table.drop()
@ -498,3 +597,52 @@ def _change_deleted_column_type_to_id_type_sqlite(migrate_engine, table_name,
where(new_table.c.deleted == deleted).\ where(new_table.c.deleted == deleted).\
values(deleted=default_deleted_value).\ values(deleted=default_deleted_value).\
execute() execute()
def get_connect_string(backend, database, user=None, passwd=None):
"""Get database connection
Try to get a connection with a very specific set of values, if we get
these then we'll run the tests, otherwise they are skipped
"""
args = {'backend': backend,
'user': user,
'passwd': passwd,
'database': database}
if backend == 'sqlite':
template = '%(backend)s:///%(database)s'
else:
template = "%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s"
return template % args
def is_backend_avail(backend, database, user=None, passwd=None):
try:
connect_uri = get_connect_string(backend=backend,
database=database,
user=user,
passwd=passwd)
engine = sqlalchemy.create_engine(connect_uri)
connection = engine.connect()
except Exception:
# intentionally catch all to handle exceptions even if we don't
# have any backend code loaded.
return False
else:
connection.close()
engine.dispose()
return True
def get_db_connection_info(conn_pieces):
database = conn_pieces.path.strip('/')
loc_pieces = conn_pieces.netloc.split('@')
host = loc_pieces[1]
auth_pieces = loc_pieces[0].split(':')
user = auth_pieces[0]
password = ""
if len(auth_pieces) > 1:
password = auth_pieces[1].strip()
return (user, password, database, host)

View File

@ -24,6 +24,7 @@ Usual usage in an openstack.common module:
""" """
import copy import copy
import functools
import gettext import gettext
import locale import locale
from logging import handlers from logging import handlers
@ -36,6 +37,17 @@ import six
_localedir = os.environ.get('heat'.upper() + '_LOCALEDIR') _localedir = os.environ.get('heat'.upper() + '_LOCALEDIR')
_t = gettext.translation('heat', localedir=_localedir, fallback=True) _t = gettext.translation('heat', localedir=_localedir, fallback=True)
# We use separate translation catalogs for each log level, so set up a
# mapping between the log level name and the translator. The domain
# for the log level is project_name + "-log-" + log_level so messages
# for each level end up in their own catalog.
_t_log_levels = dict(
(level, gettext.translation('heat' + '-log-' + level,
localedir=_localedir,
fallback=True))
for level in ['info', 'warning', 'error', 'critical']
)
_AVAILABLE_LANGUAGES = {} _AVAILABLE_LANGUAGES = {}
USE_LAZY = False USE_LAZY = False
@ -61,6 +73,28 @@ def _(msg):
return _t.ugettext(msg) return _t.ugettext(msg)
def _log_translation(msg, level):
"""Build a single translation of a log message
"""
if USE_LAZY:
return Message(msg, domain='heat' + '-log-' + level)
else:
translator = _t_log_levels[level]
if six.PY3:
return translator.gettext(msg)
return translator.ugettext(msg)
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = functools.partial(_log_translation, level='info')
_LW = functools.partial(_log_translation, level='warning')
_LE = functools.partial(_log_translation, level='error')
_LC = functools.partial(_log_translation, level='critical')
def install(domain, lazy=False): def install(domain, lazy=False):
"""Install a _() function using the given translation domain. """Install a _() function using the given translation domain.

View File

@ -17,7 +17,6 @@ import string
import sys import sys
import uuid import uuid
from oslo.config import cfg
import sqlalchemy import sqlalchemy
from heat.common import context from heat.common import context
@ -26,9 +25,9 @@ from heat.db import api as db_api
from heat.engine import environment from heat.engine import environment
from heat.engine import parser from heat.engine import parser
from heat.engine import resource from heat.engine import resource
from heat.openstack.common.db.sqlalchemy import session from heat.openstack.common.db import options
get_engine = session.get_engine get_engine = db_api.get_engine
class UUIDStub(object): class UUIDStub(object):
@ -114,10 +113,10 @@ def wr_delete_after(test_fn):
def setup_dummy_db(): def setup_dummy_db():
cfg.CONF.set_default('sqlite_synchronous', False) options.cfg.set_defaults(options.database_opts, sqlite_synchronous=False)
session.set_defaults(sql_connection="sqlite://", sqlite_db='heat.db') options.set_defaults(sql_connection="sqlite://", sqlite_db='heat.db')
db_api.db_sync()
engine = get_engine() engine = get_engine()
db_api.db_sync(engine)
engine.connect() engine.connect()