Sync the latest DB code from oslo-incubator

This sync contains the following commits from olso-incubator:

7959826 db: move all options into database group
dda24eb Introduce mysql_sql_mode option, remove old warning
0b5af67 Introduce a method to set any MySQL session SQL mode
8dccc7b Handle ibm_db_sa DBDuplicateEntry integrity errors
0f24d82 Fix migration.db_version when no tables
ac84a40 Update log translation domains
c0d357b Add model_query() to db.sqlalchemy.utils module
84254fc Fix a small typo in api.py
b8a676c Remove CONF.database.connection default value
86707cd Remove None for dict.get()
0545121 Fix duplicating of SQL queries in logs
fcf517d Update oslo log messages with translation domains
fa05b7c Restore the ability to load the DB backend lazily
630d395 Don't use cfg.CONF in oslo.db
ce69e7f Don't store engine instances in oslo.db
35dc1d7 py3kcompat: remove
b4f72b2 Don't raise MySQL 2013 'Lost connection' errors
271adfb Format sql in db.sqlalchemy.session docstring
0334cb3 Handle exception messages with six.text_type
eff69ce Drop dependency on log from oslo db code
7a11a04 Automatic retry db.api query if db connection lost
11f2add Clean up docstring in db.sqlalchemy.session
1b5147f Only enable MySQL TRADITIONAL mode if we're running against MySQL
39e1c5c Move db tests base.py to common code
986dafd Fix parsing of UC errors in sqlite 3.7.16+/3.8.2+
bcf6d5e Small edits on help strings
ae01e9a Transition from migrate to alembic

Due to API changes in oslo.db code we have to change Nova code a bit
in order to reuse oslo-incubator changes (oslo.db no longer stores
SQLAlchemy Engine and sessionmaker instances globally and it's up to
applications to create them).

Change-Id: I4aaa7151f66e0292ff66c29330f93d78c6bf78a6
This commit is contained in:
Roman Podoliaka 2014-02-24 18:17:02 +02:00 committed by Sean Dague
parent 5418039348
commit 605749ca12
21 changed files with 850 additions and 459 deletions

View File

@ -1543,17 +1543,6 @@
#s3_listen_port=3333 #s3_listen_port=3333
#
# Options defined in nova.openstack.common.db.sqlalchemy.session
#
# The file name to use with SQLite (string value)
#sqlite_db=nova.sqlite
# If True, SQLite uses synchronous mode (boolean value)
#sqlite_synchronous=true
# #
# Options defined in nova.openstack.common.eventlet_backdoor # Options defined in nova.openstack.common.eventlet_backdoor
# #
@ -2107,7 +2096,7 @@
# The SQLAlchemy connection string used to connect to the # The SQLAlchemy connection string used to connect to the
# bare-metal database (string value) # bare-metal database (string value)
#sql_connection=sqlite:///$state_path/baremetal_$sqlite_db #sql_connection=sqlite:///$state_path/baremetal_nova.sqlite
# #
@ -2428,33 +2417,39 @@
[database] [database]
# #
# Options defined in nova.openstack.common.db.api # Options defined in nova.db.sqlalchemy.api
# #
# The SQLAlchemy connection string used to connect to the
# slave database (string value)
#slave_connection=<None>
#
# Options defined in nova.openstack.common.db.options
#
# The file name to use with SQLite (string value)
#sqlite_db=nova.sqlite
# If True, SQLite uses synchronous mode (boolean value)
#sqlite_synchronous=true
# The backend to use for db (string value) # The backend to use for db (string value)
# Deprecated group/name - [DEFAULT]/db_backend # Deprecated group/name - [DEFAULT]/db_backend
#backend=sqlalchemy #backend=sqlalchemy
# Enable the experimental use of thread pooling for all DB API
# calls (boolean value)
# Deprecated group/name - [DEFAULT]/dbapi_use_tpool
#use_tpool=false
#
# Options defined in nova.openstack.common.db.sqlalchemy.session
#
# The SQLAlchemy connection string used to connect to the # The SQLAlchemy connection string used to connect to the
# database (string value) # database (string value)
# Deprecated group/name - [DEFAULT]/sql_connection # Deprecated group/name - [DEFAULT]/sql_connection
# Deprecated group/name - [DATABASE]/sql_connection # Deprecated group/name - [DATABASE]/sql_connection
# Deprecated group/name - [sql]/connection # Deprecated group/name - [sql]/connection
#connection=sqlite:////nova/openstack/common/db/$sqlite_db #connection=<None>
# The SQLAlchemy connection string used to connect to the # The SQL mode to be used for MySQL sessions (default is
# slave database (string value) # empty, meaning do not override any server-side SQL mode
#slave_connection= # setting) (string value)
#mysql_sql_mode=<None>
# Timeout before idle sql connections are reaped (integer # Timeout before idle sql connections are reaped (integer
# value) # value)
@ -2508,6 +2503,25 @@
# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout # Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
#pool_timeout=<None> #pool_timeout=<None>
# Enable the experimental use of database reconnect on
# connection lost (boolean value)
#use_db_reconnect=false
# seconds between db connection retries (integer value)
#db_retry_interval=1
# Whether to increase interval between db connection retries,
# up to db_max_retry_interval (boolean value)
#db_inc_retry_interval=true
# max seconds between db connection retries, if
# db_inc_retry_interval is enabled (integer value)
#db_max_retry_interval=10
# maximum db connection retries before error is raised.
# (setting -1 implies an infinite retry count) (integer value)
#db_max_retries=20
[docker] [docker]

View File

@ -17,17 +17,17 @@
from oslo.config import cfg from oslo.config import cfg
from nova.openstack.common.db.sqlalchemy import session as db_session from nova.openstack.common.db import options
from nova import paths from nova import paths
from nova import rpc from nova import rpc
from nova import version from nova import version
_DEFAULT_SQL_CONNECTION = 'sqlite:///' + paths.state_path_def('$sqlite_db') _DEFAULT_SQL_CONNECTION = 'sqlite:///' + paths.state_path_def('nova.sqlite')
def parse_args(argv, default_config_files=None): def parse_args(argv, default_config_files=None):
db_session.set_defaults(sql_connection=_DEFAULT_SQL_CONNECTION, options.set_defaults(sql_connection=_DEFAULT_SQL_CONNECTION,
sqlite_db='nova.sqlite') sqlite_db='nova.sqlite')
rpc.set_defaults(control_exchange='nova') rpc.set_defaults(control_exchange='nova')
cfg.CONF(argv[1:], cfg.CONF(argv[1:],
project='nova', project='nova',

View File

@ -49,11 +49,14 @@ db_opts = [
CONF = cfg.CONF CONF = cfg.CONF
CONF.register_opts(db_opts) CONF.register_opts(db_opts)
CONF.import_opt('backend', 'nova.openstack.common.db.options',
group='database')
_BACKEND_MAPPING = {'sqlalchemy': 'nova.db.sqlalchemy.api'} _BACKEND_MAPPING = {'sqlalchemy': 'nova.db.sqlalchemy.api'}
IMPL = db_api.DBAPI(backend_mapping=_BACKEND_MAPPING) IMPL = db_api.DBAPI(CONF.database.backend, backend_mapping=_BACKEND_MAPPING,
lazy=True)
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
# The maximum value a signed INT type may have # The maximum value a signed INT type may have

View File

@ -70,21 +70,57 @@ db_opts = [
'Should be empty, "project" or "global".'), 'Should be empty, "project" or "global".'),
] ]
connection_opts = [
cfg.StrOpt('slave_connection',
secret=True,
help='The SQLAlchemy connection string used to connect to the '
'slave database'),
]
CONF = cfg.CONF CONF = cfg.CONF
CONF.register_opts(db_opts) CONF.register_opts(db_opts)
CONF.register_opts(connection_opts, group='database')
CONF.import_opt('compute_topic', 'nova.compute.rpcapi') CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
CONF.import_opt('connection', CONF.import_opt('connection',
'nova.openstack.common.db.sqlalchemy.session', 'nova.openstack.common.db.options',
group='database') group='database')
CONF.import_opt('slave_connection',
'nova.openstack.common.db.sqlalchemy.session',
group='database')
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
get_engine = db_session.get_engine
get_session = db_session.get_session _MASTER_FACADE = None
_SLAVE_FACADE = None
def _create_facade_lazily(use_slave=False):
global _MASTER_FACADE
global _SLAVE_FACADE
return_slave = use_slave and CONF.database.slave_connection
if not return_slave:
if _MASTER_FACADE is None:
_MASTER_FACADE = db_session.EngineFacade(
CONF.database.connection,
**dict(CONF.database.iteritems())
)
return _MASTER_FACADE
else:
if _SLAVE_FACADE is None:
_SLAVE_FACADE = db_session.EngineFacade(
CONF.database.slave_connection,
**dict(CONF.database.iteritems())
)
return _SLAVE_FACADE
def get_engine(use_slave=False):
facade = _create_facade_lazily(use_slave)
return facade.get_engine()
def get_session(use_slave=False, **kwargs):
facade = _create_facade_lazily(use_slave)
return facade.get_session(**kwargs)
_SHADOW_TABLE_PREFIX = 'shadow_' _SHADOW_TABLE_PREFIX = 'shadow_'
@ -195,7 +231,7 @@ def model_query(context, model, *args, **kwargs):
if CONF.database.slave_connection == '': if CONF.database.slave_connection == '':
use_slave = False use_slave = False
session = kwargs.get('session') or get_session(slave_session=use_slave) session = kwargs.get('session') or get_session(use_slave=use_slave)
read_deleted = kwargs.get('read_deleted') or context.read_deleted read_deleted = kwargs.get('read_deleted') or context.read_deleted
project_only = kwargs.get('project_only', False) project_only = kwargs.get('project_only', False)
@ -1818,7 +1854,7 @@ def instance_get_all_by_filters(context, filters, sort_key, sort_dir,
if CONF.database.slave_connection == '': if CONF.database.slave_connection == '':
use_slave = False use_slave = False
session = get_session(slave_session=use_slave) session = get_session(use_slave=use_slave)
if columns_to_join is None: if columns_to_join is None:
columns_to_join = ['info_cache', 'security_groups'] columns_to_join = ['info_cache', 'security_groups']

View File

@ -21,8 +21,8 @@ from migrate.versioning import api as versioning_api
from migrate.versioning.repository import Repository from migrate.versioning.repository import Repository
import sqlalchemy import sqlalchemy
from nova.db.sqlalchemy import api as db_session
from nova import exception from nova import exception
from nova.openstack.common.db.sqlalchemy import session as db_session
from nova.openstack.common.gettextutils import _ from nova.openstack.common.gettextutils import _
INIT_VERSION = 215 INIT_VERSION = 215

View File

@ -43,6 +43,14 @@ class NovaBase(models.SoftDeleteMixin,
models.ModelBase): models.ModelBase):
metadata = None metadata = None
def save(self, session=None):
from nova.db.sqlalchemy import api
if session is None:
session = api.get_session()
super(NovaBase, self).save(session=session)
class Service(BASE, NovaBase): class Service(BASE, NovaBase):
"""Represents a running service on a host.""" """Represents a running service on a host."""

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2013 Rackspace Hosting # Copyright (c) 2013 Rackspace Hosting
# All Rights Reserved. # All Rights Reserved.
# #
@ -17,90 +15,148 @@
"""Multiple DB API backend support. """Multiple DB API backend support.
Supported configuration options:
The following two parameters are in the 'database' group:
`backend`: DB backend name or full module path to DB backend module.
`use_tpool`: Enable thread pooling of DB API calls.
A DB backend module should implement a method named 'get_backend' which A DB backend module should implement a method named 'get_backend' which
takes no arguments. The method can return any object that implements DB takes no arguments. The method can return any object that implements DB
API methods. API methods.
*NOTE*: There are bugs in eventlet when using tpool combined with
threading locks. The python logging module happens to use such locks. To
work around this issue, be sure to specify thread=False with
eventlet.monkey_patch().
A bug for eventlet has been filed here:
https://bitbucket.org/eventlet/eventlet/issue/137/
""" """
import functools import functools
import logging
import threading
import time
from oslo.config import cfg from nova.openstack.common.db import exception
from nova.openstack.common.gettextutils import _LE
from nova.openstack.common import importutils from nova.openstack.common import importutils
from nova.openstack.common import lockutils
db_opts = [ LOG = logging.getLogger(__name__)
cfg.StrOpt('backend',
default='sqlalchemy',
deprecated_name='db_backend',
deprecated_group='DEFAULT',
help='The backend to use for db'),
cfg.BoolOpt('use_tpool',
default=False,
deprecated_name='dbapi_use_tpool',
deprecated_group='DEFAULT',
help='Enable the experimental use of thread pooling for '
'all DB API calls')
]
CONF = cfg.CONF
CONF.register_opts(db_opts, 'database') def safe_for_db_retry(f):
"""Enable db-retry for decorated function, if config option enabled."""
f.__dict__['enable_retry'] = True
return f
class wrap_db_retry(object):
"""Retry db.api methods, if DBConnectionError() raised
Retry decorated db.api methods. If we enabled `use_db_reconnect`
in config, this decorator will be applied to all db.api functions,
marked with @safe_for_db_retry decorator.
Decorator catchs DBConnectionError() and retries function in a
loop until it succeeds, or until maximum retries count will be reached.
"""
def __init__(self, retry_interval, max_retries, inc_retry_interval,
max_retry_interval):
super(wrap_db_retry, self).__init__()
self.retry_interval = retry_interval
self.max_retries = max_retries
self.inc_retry_interval = inc_retry_interval
self.max_retry_interval = max_retry_interval
def __call__(self, f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
next_interval = self.retry_interval
remaining = self.max_retries
while True:
try:
return f(*args, **kwargs)
except exception.DBConnectionError as e:
if remaining == 0:
LOG.exception(_LE('DB exceeded retry limit.'))
raise exception.DBError(e)
if remaining != -1:
remaining -= 1
LOG.exception(_LE('DB connection error.'))
# NOTE(vsergeyev): We are using patched time module, so
# this effectively yields the execution
# context to another green thread.
time.sleep(next_interval)
if self.inc_retry_interval:
next_interval = min(
next_interval * 2,
self.max_retry_interval
)
return wrapper
class DBAPI(object): class DBAPI(object):
def __init__(self, backend_mapping=None): def __init__(self, backend_name, backend_mapping=None, lazy=False,
if backend_mapping is None: **kwargs):
backend_mapping = {} """Initialize the chosen DB API backend.
self.__backend = None
self.__backend_mapping = backend_mapping :param backend_name: name of the backend to load
:type backend_name: str
:param backend_mapping: backend name -> module/class to load mapping
:type backend_mapping: dict
:param lazy: load the DB backend lazily on the first DB API method call
:type lazy: bool
Keyword arguments:
:keyword use_db_reconnect: retry DB transactions on disconnect or not
:type use_db_reconnect: bool
:keyword retry_interval: seconds between transaction retries
:type retry_interval: int
:keyword inc_retry_interval: increase retry interval or not
:type inc_retry_interval: bool
:keyword max_retry_interval: max interval value between retries
:type max_retry_interval: int
:keyword max_retries: max number of retries before an error is raised
:type max_retries: int
@lockutils.synchronized('dbapi_backend', 'nova-')
def __get_backend(self):
"""Get the actual backend. May be a module or an instance of
a class. Doesn't matter to us. We do this synchronized as it's
possible multiple greenthreads started very quickly trying to do
DB calls and eventlet can switch threads before self.__backend gets
assigned.
""" """
if self.__backend:
# Another thread assigned it self._backend = None
return self.__backend self._backend_name = backend_name
backend_name = CONF.database.backend self._backend_mapping = backend_mapping or {}
self.__use_tpool = CONF.database.use_tpool self._lock = threading.Lock()
if self.__use_tpool:
from eventlet import tpool if not lazy:
self.__tpool = tpool self._load_backend()
# Import the untranslated name if we don't have a
# mapping. self.use_db_reconnect = kwargs.get('use_db_reconnect', False)
backend_path = self.__backend_mapping.get(backend_name, self.retry_interval = kwargs.get('retry_interval', 1)
backend_name) self.inc_retry_interval = kwargs.get('inc_retry_interval', True)
backend_mod = importutils.import_module(backend_path) self.max_retry_interval = kwargs.get('max_retry_interval', 10)
self.__backend = backend_mod.get_backend() self.max_retries = kwargs.get('max_retries', 20)
return self.__backend
def _load_backend(self):
with self._lock:
if not self._backend:
# Import the untranslated name if we don't have a mapping
backend_path = self._backend_mapping.get(self._backend_name,
self._backend_name)
backend_mod = importutils.import_module(backend_path)
self._backend = backend_mod.get_backend()
def __getattr__(self, key): def __getattr__(self, key):
backend = self.__backend or self.__get_backend() if not self._backend:
attr = getattr(backend, key) self._load_backend()
if not self.__use_tpool or not hasattr(attr, '__call__'):
attr = getattr(self._backend, key)
if not hasattr(attr, '__call__'):
return attr return attr
# NOTE(vsergeyev): If `use_db_reconnect` option is set to True, retry
# DB API methods, decorated with @safe_for_db_retry
# on disconnect.
if self.use_db_reconnect and hasattr(attr, 'enable_retry'):
attr = wrap_db_retry(
retry_interval=self.retry_interval,
max_retries=self.max_retries,
inc_retry_interval=self.inc_retry_interval,
max_retry_interval=self.max_retry_interval)(attr)
def tpool_wrapper(*args, **kwargs): return attr
return self.__tpool.execute(attr, *args, **kwargs)
functools.update_wrapper(tpool_wrapper, attr)
return tpool_wrapper

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration. # Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved. # All Rights Reserved.
@ -18,14 +16,16 @@
"""DB related custom exceptions.""" """DB related custom exceptions."""
from nova.openstack.common.gettextutils import _ # noqa import six
from nova.openstack.common.gettextutils import _
class DBError(Exception): class DBError(Exception):
"""Wraps an implementation specific exception.""" """Wraps an implementation specific exception."""
def __init__(self, inner_exception=None): def __init__(self, inner_exception=None):
self.inner_exception = inner_exception self.inner_exception = inner_exception
super(DBError, self).__init__(str(inner_exception)) super(DBError, self).__init__(six.text_type(inner_exception))
class DBDuplicateEntry(DBError): class DBDuplicateEntry(DBError):
@ -43,3 +43,14 @@ class DBDeadlock(DBError):
class DBInvalidUnicodeParameter(Exception): class DBInvalidUnicodeParameter(Exception):
message = _("Invalid Parameter: " message = _("Invalid Parameter: "
"Unicode is not supported by the current database.") "Unicode is not supported by the current database.")
class DbMigrationError(DBError):
"""Wraps migration specific exception."""
def __init__(self, message=None):
super(DbMigrationError, self).__init__(message)
class DBConnectionError(DBError):
"""Wraps connection specific exception."""
pass

View File

@ -0,0 +1,168 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from oslo.config import cfg
database_opts = [
cfg.StrOpt('sqlite_db',
deprecated_group='DEFAULT',
default='nova.sqlite',
help='The file name to use with SQLite'),
cfg.BoolOpt('sqlite_synchronous',
deprecated_group='DEFAULT',
default=True,
help='If True, SQLite uses synchronous mode'),
cfg.StrOpt('backend',
default='sqlalchemy',
deprecated_name='db_backend',
deprecated_group='DEFAULT',
help='The backend to use for db'),
cfg.StrOpt('connection',
help='The SQLAlchemy connection string used to connect to the '
'database',
secret=True,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_connection',
group='DATABASE'),
cfg.DeprecatedOpt('connection',
group='sql'), ]),
cfg.StrOpt('mysql_sql_mode',
help='The SQL mode to be used for MySQL sessions '
'(default is empty, meaning do not override '
'any server-side SQL mode setting)'),
cfg.IntOpt('idle_timeout',
default=3600,
deprecated_opts=[cfg.DeprecatedOpt('sql_idle_timeout',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_idle_timeout',
group='DATABASE'),
cfg.DeprecatedOpt('idle_timeout',
group='sql')],
help='Timeout before idle sql connections are reaped'),
cfg.IntOpt('min_pool_size',
default=1,
deprecated_opts=[cfg.DeprecatedOpt('sql_min_pool_size',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_min_pool_size',
group='DATABASE')],
help='Minimum number of SQL connections to keep open in a '
'pool'),
cfg.IntOpt('max_pool_size',
default=None,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_pool_size',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_max_pool_size',
group='DATABASE')],
help='Maximum number of SQL connections to keep open in a '
'pool'),
cfg.IntOpt('max_retries',
default=10,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_retries',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_max_retries',
group='DATABASE')],
help='Maximum db connection retries during startup. '
'(setting -1 implies an infinite retry count)'),
cfg.IntOpt('retry_interval',
default=10,
deprecated_opts=[cfg.DeprecatedOpt('sql_retry_interval',
group='DEFAULT'),
cfg.DeprecatedOpt('reconnect_interval',
group='DATABASE')],
help='Interval between retries of opening a sql connection'),
cfg.IntOpt('max_overflow',
default=None,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_overflow',
group='DEFAULT'),
cfg.DeprecatedOpt('sqlalchemy_max_overflow',
group='DATABASE')],
help='If set, use this value for max_overflow with sqlalchemy'),
cfg.IntOpt('connection_debug',
default=0,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_debug',
group='DEFAULT')],
help='Verbosity of SQL debugging information. 0=None, '
'100=Everything'),
cfg.BoolOpt('connection_trace',
default=False,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_trace',
group='DEFAULT')],
help='Add python stack traces to SQL as comment strings'),
cfg.IntOpt('pool_timeout',
default=None,
deprecated_opts=[cfg.DeprecatedOpt('sqlalchemy_pool_timeout',
group='DATABASE')],
help='If set, use this value for pool_timeout with sqlalchemy'),
cfg.BoolOpt('use_db_reconnect',
default=False,
help='Enable the experimental use of database reconnect '
'on connection lost'),
cfg.IntOpt('db_retry_interval',
default=1,
help='seconds between db connection retries'),
cfg.BoolOpt('db_inc_retry_interval',
default=True,
help='Whether to increase interval between db connection '
'retries, up to db_max_retry_interval'),
cfg.IntOpt('db_max_retry_interval',
default=10,
help='max seconds between db connection retries, if '
'db_inc_retry_interval is enabled'),
cfg.IntOpt('db_max_retries',
default=20,
help='maximum db connection retries before error is raised. '
'(setting -1 implies an infinite retry count)'),
]
CONF = cfg.CONF
CONF.register_opts(database_opts, 'database')
def set_defaults(sql_connection, sqlite_db, max_pool_size=None,
max_overflow=None, pool_timeout=None):
"""Set defaults for configuration variables."""
cfg.set_defaults(database_opts,
connection=sql_connection,
sqlite_db=sqlite_db)
# Update the QueuePool defaults
if max_pool_size is not None:
cfg.set_defaults(database_opts,
max_pool_size=max_pool_size)
if max_overflow is not None:
cfg.set_defaults(database_opts,
max_overflow=max_overflow)
if pool_timeout is not None:
cfg.set_defaults(database_opts,
pool_timeout=pool_timeout)
def list_opts():
"""Returns a list of oslo.config options available in the library.
The returned list includes all oslo.config options which may be registered
at runtime by the library.
Each element of the list is a tuple. The first element is the name of the
group under which the list of elements in the second element will be
registered. A group name of None corresponds to the [DEFAULT] group in
config files.
The purpose of this is to allow tools like the Oslo sample config file
generator to discover the options exposed to users by this library.
:returns: a list of (group_name, opts) tuples
"""
return [('database', copy.deepcopy(database_opts))]

View File

@ -51,13 +51,9 @@ import sqlalchemy
from sqlalchemy.schema import UniqueConstraint from sqlalchemy.schema import UniqueConstraint
from nova.openstack.common.db import exception from nova.openstack.common.db import exception
from nova.openstack.common.db.sqlalchemy import session as db_session
from nova.openstack.common.gettextutils import _ from nova.openstack.common.gettextutils import _
get_engine = db_session.get_engine
def _get_unique_constraints(self, table): def _get_unique_constraints(self, table):
"""Retrieve information about existing unique constraints of the table """Retrieve information about existing unique constraints of the table
@ -172,11 +168,12 @@ def patch_migrate():
sqlite.SQLiteConstraintGenerator) sqlite.SQLiteConstraintGenerator)
def db_sync(abs_path, version=None, init_version=0): def db_sync(engine, abs_path, version=None, init_version=0):
"""Upgrade or downgrade a database. """Upgrade or downgrade a database.
Function runs the upgrade() or downgrade() functions in change scripts. Function runs the upgrade() or downgrade() functions in change scripts.
:param engine: SQLAlchemy engine instance for a given database
:param abs_path: Absolute path to migrate repository. :param abs_path: Absolute path to migrate repository.
:param version: Database will upgrade/downgrade until this version. :param version: Database will upgrade/downgrade until this version.
If None - database will update to the latest If None - database will update to the latest
@ -190,18 +187,23 @@ def db_sync(abs_path, version=None, init_version=0):
raise exception.DbMigrationError( raise exception.DbMigrationError(
message=_("version should be an integer")) message=_("version should be an integer"))
current_version = db_version(abs_path, init_version) current_version = db_version(engine, abs_path, init_version)
repository = _find_migrate_repo(abs_path) repository = _find_migrate_repo(abs_path)
_db_schema_sanity_check() _db_schema_sanity_check(engine)
if version is None or version > current_version: if version is None or version > current_version:
return versioning_api.upgrade(get_engine(), repository, version) return versioning_api.upgrade(engine, repository, version)
else: else:
return versioning_api.downgrade(get_engine(), repository, return versioning_api.downgrade(engine, repository,
version) version)
def _db_schema_sanity_check(): def _db_schema_sanity_check(engine):
engine = get_engine() """Ensure all database tables were created with required parameters.
:param engine: SQLAlchemy engine instance for a given database
"""
if engine.name == 'mysql': if engine.name == 'mysql':
onlyutf8_sql = ('SELECT TABLE_NAME,TABLE_COLLATION ' onlyutf8_sql = ('SELECT TABLE_NAME,TABLE_COLLATION '
'from information_schema.TABLES ' 'from information_schema.TABLES '
@ -216,23 +218,23 @@ def _db_schema_sanity_check():
) % ','.join(table_names)) ) % ','.join(table_names))
def db_version(abs_path, init_version): def db_version(engine, abs_path, init_version):
"""Show the current version of the repository. """Show the current version of the repository.
:param engine: SQLAlchemy engine instance for a given database
:param abs_path: Absolute path to migrate repository :param abs_path: Absolute path to migrate repository
:param version: Initial database version :param version: Initial database version
""" """
repository = _find_migrate_repo(abs_path) repository = _find_migrate_repo(abs_path)
try: try:
return versioning_api.db_version(get_engine(), repository) return versioning_api.db_version(engine, repository)
except versioning_exceptions.DatabaseNotControlledError: except versioning_exceptions.DatabaseNotControlledError:
meta = sqlalchemy.MetaData() meta = sqlalchemy.MetaData()
engine = get_engine()
meta.reflect(bind=engine) meta.reflect(bind=engine)
tables = meta.tables tables = meta.tables
if len(tables) == 0 or 'alembic_version' in tables: if len(tables) == 0 or 'alembic_version' in tables:
db_version_control(abs_path, init_version) db_version_control(engine, abs_path, version=init_version)
return versioning_api.db_version(get_engine(), repository) return versioning_api.db_version(engine, repository)
else: else:
raise exception.DbMigrationError( raise exception.DbMigrationError(
message=_( message=_(
@ -241,17 +243,18 @@ def db_version(abs_path, init_version):
"manually.")) "manually."))
def db_version_control(abs_path, version=None): def db_version_control(engine, abs_path, version=None):
"""Mark a database as under this repository's version control. """Mark a database as under this repository's version control.
Once a database is under version control, schema changes should Once a database is under version control, schema changes should
only be done via change scripts in this repository. only be done via change scripts in this repository.
:param engine: SQLAlchemy engine instance for a given database
:param abs_path: Absolute path to migrate repository :param abs_path: Absolute path to migrate repository
:param version: Initial database version :param version: Initial database version
""" """
repository = _find_migrate_repo(abs_path) repository = _find_migrate_repo(abs_path)
versioning_api.version_control(get_engine(), repository, version) versioning_api.version_control(engine, repository, version)
return version return version

View File

@ -26,7 +26,6 @@ from sqlalchemy import Column, Integer
from sqlalchemy import DateTime from sqlalchemy import DateTime
from sqlalchemy.orm import object_mapper from sqlalchemy.orm import object_mapper
from nova.openstack.common.db.sqlalchemy import session as sa
from nova.openstack.common import timeutils from nova.openstack.common import timeutils
@ -34,10 +33,9 @@ class ModelBase(object):
"""Base class for models.""" """Base class for models."""
__table_initialized__ = False __table_initialized__ = False
def save(self, session=None): def save(self, session):
"""Save this object.""" """Save this object."""
if not session:
session = sa.get_session()
# NOTE(boris-42): This part of code should be look like: # NOTE(boris-42): This part of code should be look like:
# session.add(self) # session.add(self)
# session.flush() # session.flush()
@ -110,7 +108,7 @@ class SoftDeleteMixin(object):
deleted_at = Column(DateTime) deleted_at = Column(DateTime)
deleted = Column(Integer, default=0) deleted = Column(Integer, default=0)
def soft_delete(self, session=None): def soft_delete(self, session):
"""Mark this object as deleted.""" """Mark this object as deleted."""
self.deleted = self.id self.deleted = self.id
self.deleted_at = timeutils.utcnow() self.deleted_at = timeutils.utcnow()

View File

@ -16,19 +16,6 @@
"""Session Handling for SQLAlchemy backend. """Session Handling for SQLAlchemy backend.
Initializing:
* Call `set_defaults()` with the minimal of the following kwargs:
``sql_connection``, ``sqlite_db``
Example:
.. code:: python
session.set_defaults(
sql_connection="sqlite:///var/lib/nova/sqlite.db",
sqlite_db="/var/lib/nova/sqlite.db")
Recommended ways to use sessions within this framework: Recommended ways to use sessions within this framework:
* Don't use them explicitly; this is like running with ``AUTOCOMMIT=1``. * Don't use them explicitly; this is like running with ``AUTOCOMMIT=1``.
@ -87,7 +74,7 @@ Recommended ways to use sessions within this framework:
.. code:: python .. code:: python
def create_many_foo(context, foos): def create_many_foo(context, foos):
session = get_session() session = sessionmaker()
with session.begin(): with session.begin():
for foo in foos: for foo in foos:
foo_ref = models.Foo() foo_ref = models.Foo()
@ -95,7 +82,7 @@ Recommended ways to use sessions within this framework:
session.add(foo_ref) session.add(foo_ref)
def update_bar(context, foo_id, newbar): def update_bar(context, foo_id, newbar):
session = get_session() session = sessionmaker()
with session.begin(): with session.begin():
foo_ref = (model_query(context, models.Foo, session). foo_ref = (model_query(context, models.Foo, session).
filter_by(id=foo_id). filter_by(id=foo_id).
@ -124,7 +111,9 @@ Recommended ways to use sessions within this framework:
filter_by(id=subq.as_scalar()). filter_by(id=subq.as_scalar()).
update({'bar': newbar})) update({'bar': newbar}))
For reference, this emits approximately the following SQL statement:: For reference, this emits approximately the following SQL statement:
.. code:: sql
UPDATE bar SET bar = ${newbar} UPDATE bar SET bar = ${newbar}
WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1); WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1);
@ -140,7 +129,7 @@ Recommended ways to use sessions within this framework:
foo1 = models.Foo() foo1 = models.Foo()
foo2 = models.Foo() foo2 = models.Foo()
foo1.id = foo2.id = 1 foo1.id = foo2.id = 1
session = get_session() session = sessionmaker()
try: try:
with session.begin(): with session.begin():
session.add(foo1) session.add(foo1)
@ -166,7 +155,7 @@ Recommended ways to use sessions within this framework:
.. code:: python .. code:: python
def myfunc(foo): def myfunc(foo):
session = get_session() session = sessionmaker()
with session.begin(): with session.begin():
# do some database things # do some database things
bar = _private_func(foo, session) bar = _private_func(foo, session)
@ -174,7 +163,7 @@ Recommended ways to use sessions within this framework:
def _private_func(foo, session=None): def _private_func(foo, session=None):
if not session: if not session:
session = get_session() session = sessionmaker()
with session.begin(subtransaction=True): with session.begin(subtransaction=True):
# do some other database things # do some other database things
return bar return bar
@ -238,7 +227,7 @@ Efficient use of soft deletes:
def complex_soft_delete_with_synchronization_bar(session=None): def complex_soft_delete_with_synchronization_bar(session=None):
if session is None: if session is None:
session = get_session() session = sessionmaker()
with session.begin(subtransactions=True): with session.begin(subtransactions=True):
count = (model_query(BarModel). count = (model_query(BarModel).
find(some_condition). find(some_condition).
@ -255,7 +244,7 @@ Efficient use of soft deletes:
.. code:: python .. code:: python
def soft_delete_bar_model(): def soft_delete_bar_model():
session = get_session() session = sessionmaker()
with session.begin(): with session.begin():
bar_ref = model_query(BarModel).find(some_condition).first() bar_ref = model_query(BarModel).find(some_condition).first()
# Work with bar_ref # Work with bar_ref
@ -267,7 +256,7 @@ Efficient use of soft deletes:
.. code:: python .. code:: python
def soft_delete_multi_models(): def soft_delete_multi_models():
session = get_session() session = sessionmaker()
with session.begin(): with session.begin():
query = (model_query(BarModel, session=session). query = (model_query(BarModel, session=session).
find(some_condition)) find(some_condition))
@ -291,11 +280,9 @@ Efficient use of soft deletes:
import functools import functools
import logging import logging
import os.path
import re import re
import time import time
from oslo.config import cfg
import six import six
from sqlalchemy import exc as sqla_exc from sqlalchemy import exc as sqla_exc
from sqlalchemy.interfaces import PoolListener from sqlalchemy.interfaces import PoolListener
@ -304,150 +291,12 @@ from sqlalchemy.pool import NullPool, StaticPool
from sqlalchemy.sql.expression import literal_column from sqlalchemy.sql.expression import literal_column
from nova.openstack.common.db import exception from nova.openstack.common.db import exception
from nova.openstack.common.gettextutils import _ from nova.openstack.common.gettextutils import _LE, _LW, _LI
from nova.openstack.common import timeutils from nova.openstack.common import timeutils
sqlite_db_opts = [
cfg.StrOpt('sqlite_db',
default='nova.sqlite',
help='The file name to use with SQLite'),
cfg.BoolOpt('sqlite_synchronous',
default=True,
help='If True, SQLite uses synchronous mode'),
]
database_opts = [
cfg.StrOpt('connection',
default='sqlite:///' +
os.path.abspath(os.path.join(os.path.dirname(__file__),
'../', '$sqlite_db')),
help='The SQLAlchemy connection string used to connect to the '
'database',
secret=True,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_connection',
group='DATABASE'),
cfg.DeprecatedOpt('connection',
group='sql'), ]),
cfg.StrOpt('slave_connection',
default='',
secret=True,
help='The SQLAlchemy connection string used to connect to the '
'slave database'),
cfg.IntOpt('idle_timeout',
default=3600,
deprecated_opts=[cfg.DeprecatedOpt('sql_idle_timeout',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_idle_timeout',
group='DATABASE'),
cfg.DeprecatedOpt('idle_timeout',
group='sql')],
help='Timeout before idle sql connections are reaped'),
cfg.IntOpt('min_pool_size',
default=1,
deprecated_opts=[cfg.DeprecatedOpt('sql_min_pool_size',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_min_pool_size',
group='DATABASE')],
help='Minimum number of SQL connections to keep open in a '
'pool'),
cfg.IntOpt('max_pool_size',
default=None,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_pool_size',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_max_pool_size',
group='DATABASE')],
help='Maximum number of SQL connections to keep open in a '
'pool'),
cfg.IntOpt('max_retries',
default=10,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_retries',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_max_retries',
group='DATABASE')],
help='Maximum db connection retries during startup. '
'(setting -1 implies an infinite retry count)'),
cfg.IntOpt('retry_interval',
default=10,
deprecated_opts=[cfg.DeprecatedOpt('sql_retry_interval',
group='DEFAULT'),
cfg.DeprecatedOpt('reconnect_interval',
group='DATABASE')],
help='Interval between retries of opening a sql connection'),
cfg.IntOpt('max_overflow',
default=None,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_overflow',
group='DEFAULT'),
cfg.DeprecatedOpt('sqlalchemy_max_overflow',
group='DATABASE')],
help='If set, use this value for max_overflow with sqlalchemy'),
cfg.IntOpt('connection_debug',
default=0,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_debug',
group='DEFAULT')],
help='Verbosity of SQL debugging information. 0=None, '
'100=Everything'),
cfg.BoolOpt('connection_trace',
default=False,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_trace',
group='DEFAULT')],
help='Add python stack traces to SQL as comment strings'),
cfg.IntOpt('pool_timeout',
default=None,
deprecated_opts=[cfg.DeprecatedOpt('sqlalchemy_pool_timeout',
group='DATABASE')],
help='If set, use this value for pool_timeout with sqlalchemy'),
]
CONF = cfg.CONF
CONF.register_opts(sqlite_db_opts)
CONF.register_opts(database_opts, 'database')
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
_ENGINE = None
_MAKER = None
_SLAVE_ENGINE = None
_SLAVE_MAKER = None
def set_defaults(sql_connection, sqlite_db, max_pool_size=None,
max_overflow=None, pool_timeout=None):
"""Set defaults for configuration variables."""
cfg.set_defaults(database_opts,
connection=sql_connection)
cfg.set_defaults(sqlite_db_opts,
sqlite_db=sqlite_db)
# Update the QueuePool defaults
if max_pool_size is not None:
cfg.set_defaults(database_opts,
max_pool_size=max_pool_size)
if max_overflow is not None:
cfg.set_defaults(database_opts,
max_overflow=max_overflow)
if pool_timeout is not None:
cfg.set_defaults(database_opts,
pool_timeout=pool_timeout)
def cleanup():
global _ENGINE, _MAKER
global _SLAVE_ENGINE, _SLAVE_MAKER
if _MAKER:
_MAKER.close_all()
_MAKER = None
if _ENGINE:
_ENGINE.dispose()
_ENGINE = None
if _SLAVE_MAKER:
_SLAVE_MAKER.close_all()
_SLAVE_MAKER = None
if _SLAVE_ENGINE:
_SLAVE_ENGINE.dispose()
_SLAVE_ENGINE = None
class SqliteForeignKeysListener(PoolListener): class SqliteForeignKeysListener(PoolListener):
"""Ensures that the foreign key constraints are enforced in SQLite. """Ensures that the foreign key constraints are enforced in SQLite.
@ -460,30 +309,6 @@ class SqliteForeignKeysListener(PoolListener):
dbapi_con.execute('pragma foreign_keys=ON') dbapi_con.execute('pragma foreign_keys=ON')
def get_session(autocommit=True, expire_on_commit=False, sqlite_fk=False,
slave_session=False, mysql_traditional_mode=False):
"""Return a SQLAlchemy session."""
global _MAKER
global _SLAVE_MAKER
maker = _MAKER
if slave_session:
maker = _SLAVE_MAKER
if maker is None:
engine = get_engine(sqlite_fk=sqlite_fk, slave_engine=slave_session,
mysql_traditional_mode=mysql_traditional_mode)
maker = get_maker(engine, autocommit, expire_on_commit)
if slave_session:
_SLAVE_MAKER = maker
else:
_MAKER = maker
session = maker()
return session
# note(boris-42): In current versions of DB backends unique constraint # note(boris-42): In current versions of DB backends unique constraint
# violation messages follow the structure: # violation messages follow the structure:
# #
@ -507,11 +332,20 @@ def get_session(autocommit=True, expire_on_commit=False, sqlite_fk=False,
# 'c1'") # 'c1'")
# N columns - (IntegrityError) (1062, "Duplicate entry 'values joined # N columns - (IntegrityError) (1062, "Duplicate entry 'values joined
# with -' for key 'name_of_our_constraint'") # with -' for key 'name_of_our_constraint'")
#
# ibm_db_sa:
# N columns - (IntegrityError) SQL0803N One or more values in the INSERT
# statement, UPDATE statement, or foreign key update caused by a
# DELETE statement are not valid because the primary key, unique
# constraint or unique index identified by "2" constrains table
# "NOVA.KEY_PAIRS" from having duplicate values for the index
# key.
_DUP_KEY_RE_DB = { _DUP_KEY_RE_DB = {
"sqlite": (re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"), "sqlite": (re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"),
re.compile(r"^.*UNIQUE\s+constraint\s+failed:\s+(.+)$")), re.compile(r"^.*UNIQUE\s+constraint\s+failed:\s+(.+)$")),
"postgresql": (re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"),), "postgresql": (re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"),),
"mysql": (re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$"),) "mysql": (re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$"),),
"ibm_db_sa": (re.compile(r"^.*SQL0803N.*$"),),
} }
@ -533,7 +367,7 @@ def _raise_if_duplicate_entry_error(integrity_error, engine_name):
return [columns] return [columns]
return columns[len(uniqbase):].split("0")[1:] return columns[len(uniqbase):].split("0")[1:]
if engine_name not in ["mysql", "sqlite", "postgresql"]: if engine_name not in ["ibm_db_sa", "mysql", "sqlite", "postgresql"]:
return return
# FIXME(johannes): The usage of the .message attribute has been # FIXME(johannes): The usage of the .message attribute has been
@ -548,7 +382,12 @@ def _raise_if_duplicate_entry_error(integrity_error, engine_name):
else: else:
return return
columns = match.group(1) # NOTE(mriedem): The ibm_db_sa integrity error message doesn't provide the
# columns so we have to omit that from the DBDuplicateEntry error.
columns = ''
if engine_name != 'ibm_db_sa':
columns = match.group(1)
if engine_name == "sqlite": if engine_name == "sqlite":
columns = [c.split('.')[-1] for c in columns.strip().split(", ")] columns = [c.split('.')[-1] for c in columns.strip().split(", ")]
@ -589,57 +428,39 @@ def _raise_if_deadlock_error(operational_error, engine_name):
def _wrap_db_error(f): def _wrap_db_error(f):
#TODO(rpodolyaka): in a subsequent commit make this a class decorator to
# ensure it can only applied to Session subclasses instances (as we use
# Session instance bind attribute below)
@functools.wraps(f) @functools.wraps(f)
def _wrap(*args, **kwargs): def _wrap(self, *args, **kwargs):
try: try:
return f(*args, **kwargs) return f(self, *args, **kwargs)
except UnicodeEncodeError: except UnicodeEncodeError:
raise exception.DBInvalidUnicodeParameter() raise exception.DBInvalidUnicodeParameter()
# note(boris-42): We should catch unique constraint violation and
# wrap it by our own DBDuplicateEntry exception. Unique constraint
# violation is wrapped by IntegrityError.
except sqla_exc.OperationalError as e: except sqla_exc.OperationalError as e:
_raise_if_deadlock_error(e, get_engine().name) _raise_if_db_connection_lost(e, self.bind)
_raise_if_deadlock_error(e, self.bind.dialect.name)
# NOTE(comstud): A lot of code is checking for OperationalError # NOTE(comstud): A lot of code is checking for OperationalError
# so let's not wrap it for now. # so let's not wrap it for now.
raise raise
# note(boris-42): We should catch unique constraint violation and
# wrap it by our own DBDuplicateEntry exception. Unique constraint
# violation is wrapped by IntegrityError.
except sqla_exc.IntegrityError as e: except sqla_exc.IntegrityError as e:
# note(boris-42): SqlAlchemy doesn't unify errors from different # note(boris-42): SqlAlchemy doesn't unify errors from different
# DBs so we must do this. Also in some tables (for example # DBs so we must do this. Also in some tables (for example
# instance_types) there are more than one unique constraint. This # instance_types) there are more than one unique constraint. This
# means we should get names of columns, which values violate # means we should get names of columns, which values violate
# unique constraint, from error message. # unique constraint, from error message.
_raise_if_duplicate_entry_error(e, get_engine().name) _raise_if_duplicate_entry_error(e, self.bind.dialect.name)
raise exception.DBError(e) raise exception.DBError(e)
except Exception as e: except Exception as e:
LOG.exception(_('DB exception wrapped.')) LOG.exception(_LE('DB exception wrapped.'))
raise exception.DBError(e) raise exception.DBError(e)
return _wrap return _wrap
def get_engine(sqlite_fk=False, slave_engine=False,
mysql_traditional_mode=False):
"""Return a SQLAlchemy engine."""
global _ENGINE
global _SLAVE_ENGINE
engine = _ENGINE
db_uri = CONF.database.connection
if slave_engine:
engine = _SLAVE_ENGINE
db_uri = CONF.database.slave_connection
if engine is None:
engine = create_engine(db_uri, sqlite_fk=sqlite_fk,
mysql_traditional_mode=mysql_traditional_mode)
if slave_engine:
_SLAVE_ENGINE = engine
else:
_ENGINE = engine
return engine
def _synchronous_switch_listener(dbapi_conn, connection_rec): def _synchronous_switch_listener(dbapi_conn, connection_rec):
"""Switch sqlite connections to non-synchronous mode.""" """Switch sqlite connections to non-synchronous mode."""
dbapi_conn.execute("PRAGMA synchronous = OFF") dbapi_conn.execute("PRAGMA synchronous = OFF")
@ -681,7 +502,7 @@ def _ping_listener(engine, dbapi_conn, connection_rec, connection_proxy):
cursor.execute(ping_sql) cursor.execute(ping_sql)
except Exception as ex: except Exception as ex:
if engine.dialect.is_disconnect(ex, dbapi_conn, cursor): if engine.dialect.is_disconnect(ex, dbapi_conn, cursor):
msg = _('Database server has gone away: %s') % ex msg = _LW('Database server has gone away: %s') % ex
LOG.warning(msg) LOG.warning(msg)
raise sqla_exc.DisconnectionError(msg) raise sqla_exc.DisconnectionError(msg)
else: else:
@ -696,7 +517,44 @@ def _set_mode_traditional(dbapi_con, connection_rec, connection_proxy):
than a declared field just with warning. That is fraught with data than a declared field just with warning. That is fraught with data
corruption. corruption.
""" """
dbapi_con.cursor().execute("SET SESSION sql_mode = TRADITIONAL;") _set_session_sql_mode(dbapi_con, connection_rec,
connection_proxy, 'TRADITIONAL')
def _set_session_sql_mode(dbapi_con, connection_rec,
connection_proxy, sql_mode=None):
"""Set the sql_mode session variable.
MySQL supports several server modes. The default is None, but sessions
may choose to enable server modes like TRADITIONAL, ANSI,
several STRICT_* modes and others.
Note: passing in '' (empty string) for sql_mode clears
the SQL mode for the session, overriding a potentially set
server default. Passing in None (the default) makes this
a no-op, meaning if a server-side SQL mode is set, it still applies.
"""
cursor = dbapi_con.cursor()
if sql_mode is not None:
cursor.execute("SET SESSION sql_mode = %s", [sql_mode])
# Check against the real effective SQL mode. Even when unset by
# our own config, the server may still be operating in a specific
# SQL mode as set by the server configuration
cursor.execute("SHOW VARIABLES LIKE 'sql_mode'")
row = cursor.fetchone()
if row is None:
LOG.warning(_LW('Unable to detect effective SQL mode'))
return
realmode = row[1]
LOG.info(_LI('MySQL server mode set to %s') % realmode)
# 'TRADITIONAL' mode enables several other modes, so
# we need a substring match here
if not ('TRADITIONAL' in realmode.upper() or
'STRICT_ALL_TABLES' in realmode.upper()):
LOG.warning(_LW("MySQL SQL mode is '%s', "
"consider enabling TRADITIONAL or STRICT_ALL_TABLES")
% realmode)
def _is_db_connection_error(args): def _is_db_connection_error(args):
@ -711,69 +569,79 @@ def _is_db_connection_error(args):
return False return False
def create_engine(sql_connection, sqlite_fk=False, def _raise_if_db_connection_lost(error, engine):
mysql_traditional_mode=False): # NOTE(vsergeyev): Function is_disconnect(e, connection, cursor)
# requires connection and cursor in incoming parameters,
# but we have no possibility to create connection if DB
# is not available, so in such case reconnect fails.
# But is_disconnect() ignores these parameters, so it
# makes sense to pass to function None as placeholder
# instead of connection and cursor.
if engine.dialect.is_disconnect(error, None, None):
raise exception.DBConnectionError(error)
def create_engine(sql_connection, sqlite_fk=False, mysql_sql_mode=None,
mysql_traditional_mode=False, idle_timeout=3600,
connection_debug=0, max_pool_size=None, max_overflow=None,
pool_timeout=None, sqlite_synchronous=True,
connection_trace=False, max_retries=10, retry_interval=10):
"""Return a new SQLAlchemy engine.""" """Return a new SQLAlchemy engine."""
# NOTE(geekinutah): At this point we could be connecting to the normal
# db handle or the slave db handle. Things like
# _wrap_db_error aren't going to work well if their
# backends don't match. Let's check.
_assert_matching_drivers()
connection_dict = sqlalchemy.engine.url.make_url(sql_connection) connection_dict = sqlalchemy.engine.url.make_url(sql_connection)
engine_args = { engine_args = {
"pool_recycle": CONF.database.idle_timeout, "pool_recycle": idle_timeout,
"echo": False,
'convert_unicode': True, 'convert_unicode': True,
} }
# Map our SQL debug level to SQLAlchemy's options logger = logging.getLogger('sqlalchemy.engine')
if CONF.database.connection_debug >= 100:
engine_args['echo'] = 'debug' # Map SQL debug level to Python log level
elif CONF.database.connection_debug >= 50: if connection_debug >= 100:
engine_args['echo'] = True logger.setLevel(logging.DEBUG)
elif connection_debug >= 50:
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.WARNING)
if "sqlite" in connection_dict.drivername: if "sqlite" in connection_dict.drivername:
if sqlite_fk: if sqlite_fk:
engine_args["listeners"] = [SqliteForeignKeysListener()] engine_args["listeners"] = [SqliteForeignKeysListener()]
engine_args["poolclass"] = NullPool engine_args["poolclass"] = NullPool
if CONF.database.connection == "sqlite://": if sql_connection == "sqlite://":
engine_args["poolclass"] = StaticPool engine_args["poolclass"] = StaticPool
engine_args["connect_args"] = {'check_same_thread': False} engine_args["connect_args"] = {'check_same_thread': False}
else: else:
if CONF.database.max_pool_size is not None: if max_pool_size is not None:
engine_args['pool_size'] = CONF.database.max_pool_size engine_args['pool_size'] = max_pool_size
if CONF.database.max_overflow is not None: if max_overflow is not None:
engine_args['max_overflow'] = CONF.database.max_overflow engine_args['max_overflow'] = max_overflow
if CONF.database.pool_timeout is not None: if pool_timeout is not None:
engine_args['pool_timeout'] = CONF.database.pool_timeout engine_args['pool_timeout'] = pool_timeout
engine = sqlalchemy.create_engine(sql_connection, **engine_args) engine = sqlalchemy.create_engine(sql_connection, **engine_args)
sqlalchemy.event.listen(engine, 'checkin', _thread_yield) sqlalchemy.event.listen(engine, 'checkin', _thread_yield)
if engine.name in ['mysql', 'ibm_db_sa']: if engine.name in ['mysql', 'ibm_db_sa']:
callback = functools.partial(_ping_listener, engine) ping_callback = functools.partial(_ping_listener, engine)
sqlalchemy.event.listen(engine, 'checkout', callback) sqlalchemy.event.listen(engine, 'checkout', ping_callback)
if engine.name == 'mysql': if engine.name == 'mysql':
if mysql_traditional_mode: if mysql_traditional_mode:
sqlalchemy.event.listen(engine, 'checkout', mysql_sql_mode = 'TRADITIONAL'
_set_mode_traditional) if mysql_sql_mode:
else: mode_callback = functools.partial(_set_session_sql_mode,
LOG.warning(_("This application has not enabled MySQL " sql_mode=mysql_sql_mode)
"traditional mode, which means silent " sqlalchemy.event.listen(engine, 'checkout', mode_callback)
"data corruption may occur. "
"Please encourage the application "
"developers to enable this mode."))
elif 'sqlite' in connection_dict.drivername: elif 'sqlite' in connection_dict.drivername:
if not CONF.sqlite_synchronous: if not sqlite_synchronous:
sqlalchemy.event.listen(engine, 'connect', sqlalchemy.event.listen(engine, 'connect',
_synchronous_switch_listener) _synchronous_switch_listener)
sqlalchemy.event.listen(engine, 'connect', _add_regexp_listener) sqlalchemy.event.listen(engine, 'connect', _add_regexp_listener)
if (CONF.database.connection_trace and if connection_trace and engine.dialect.dbapi.__name__ == 'MySQLdb':
engine.dialect.dbapi.__name__ == 'MySQLdb'):
_patch_mysqldb_with_stacktrace_comments() _patch_mysqldb_with_stacktrace_comments()
try: try:
@ -782,15 +650,15 @@ def create_engine(sql_connection, sqlite_fk=False,
if not _is_db_connection_error(e.args[0]): if not _is_db_connection_error(e.args[0]):
raise raise
remaining = CONF.database.max_retries remaining = max_retries
if remaining == -1: if remaining == -1:
remaining = 'infinite' remaining = 'infinite'
while True: while True:
msg = _('SQL connection failed. %s attempts left.') msg = _LW('SQL connection failed. %s attempts left.')
LOG.warning(msg % remaining) LOG.warning(msg % remaining)
if remaining != 'infinite': if remaining != 'infinite':
remaining -= 1 remaining -= 1
time.sleep(CONF.database.retry_interval) time.sleep(retry_interval)
try: try:
engine.connect() engine.connect()
break break
@ -877,13 +745,116 @@ def _patch_mysqldb_with_stacktrace_comments():
setattr(MySQLdb.cursors.BaseCursor, '_do_query', _do_query) setattr(MySQLdb.cursors.BaseCursor, '_do_query', _do_query)
def _assert_matching_drivers(): class EngineFacade(object):
"""Make sure slave handle and normal handle have the same driver.""" """A helper class for removing of global engine instances from nova.db.
# NOTE(geekinutah): There's no use case for writing to one backend and
# reading from another. Who knows what the future holds?
if CONF.database.slave_connection == '':
return
normal = sqlalchemy.engine.url.make_url(CONF.database.connection) As a library, nova.db can't decide where to store/when to create engine
slave = sqlalchemy.engine.url.make_url(CONF.database.slave_connection) and sessionmaker instances, so this must be left for a target application.
assert normal.drivername == slave.drivername
On the other hand, in order to simplify the adoption of nova.db changes,
we'll provide a helper class, which creates engine and sessionmaker
on its instantiation and provides get_engine()/get_session() methods
that are compatible with corresponding utility functions that currently
exist in target projects, e.g. in Nova.
engine/sessionmaker instances will still be global (and they are meant to
be global), but they will be stored in the app context, rather that in the
nova.db context.
Note: using of this helper is completely optional and you are encouraged to
integrate engine/sessionmaker instances into your apps any way you like
(e.g. one might want to bind a session to a request context). Two important
things to remember:
1. An Engine instance is effectively a pool of DB connections, so it's
meant to be shared (and it's thread-safe).
2. A Session instance is not meant to be shared and represents a DB
transactional context (i.e. it's not thread-safe). sessionmaker is
a factory of sessions.
"""
def __init__(self, sql_connection,
sqlite_fk=False, mysql_sql_mode=None,
autocommit=True, expire_on_commit=False, **kwargs):
"""Initialize engine and sessionmaker instances.
:param sqlite_fk: enable foreign keys in SQLite
:type sqlite_fk: bool
:param mysql_sql_mode: set SQL mode in MySQL
:type mysql_sql_mode: string
:param autocommit: use autocommit mode for created Session instances
:type autocommit: bool
:param expire_on_commit: expire session objects on commit
:type expire_on_commit: bool
Keyword arguments:
:keyword idle_timeout: timeout before idle sql connections are reaped
(defaults to 3600)
:keyword connection_debug: verbosity of SQL debugging information.
0=None, 100=Everything (defaults to 0)
:keyword max_pool_size: maximum number of SQL connections to keep open
in a pool (defaults to SQLAlchemy settings)
:keyword max_overflow: if set, use this value for max_overflow with
sqlalchemy (defaults to SQLAlchemy settings)
:keyword pool_timeout: if set, use this value for pool_timeout with
sqlalchemy (defaults to SQLAlchemy settings)
:keyword sqlite_synchronous: if True, SQLite uses synchronous mode
(defaults to True)
:keyword connection_trace: add python stack traces to SQL as comment
strings (defaults to False)
:keyword max_retries: maximum db connection retries during startup.
(setting -1 implies an infinite retry count)
(defaults to 10)
:keyword retry_interval: interval between retries of opening a sql
connection (defaults to 10)
"""
super(EngineFacade, self).__init__()
self._engine = create_engine(
sql_connection=sql_connection,
sqlite_fk=sqlite_fk,
mysql_sql_mode=mysql_sql_mode,
idle_timeout=kwargs.get('idle_timeout', 3600),
connection_debug=kwargs.get('connection_debug', 0),
max_pool_size=kwargs.get('max_pool_size'),
max_overflow=kwargs.get('max_overflow'),
pool_timeout=kwargs.get('pool_timeout'),
sqlite_synchronous=kwargs.get('sqlite_synchronous', True),
connection_trace=kwargs.get('connection_trace', False),
max_retries=kwargs.get('max_retries', 10),
retry_interval=kwargs.get('retry_interval', 10))
self._session_maker = get_maker(
engine=self._engine,
autocommit=autocommit,
expire_on_commit=expire_on_commit)
def get_engine(self):
"""Get the engine instance (note, that it's shared)."""
return self._engine
def get_session(self, **kwargs):
"""Get a Session instance.
If passed, keyword arguments values override the ones used when the
sessionmaker instance was created.
:keyword autocommit: use autocommit mode for created Session instances
:type autocommit: bool
:keyword expire_on_commit: expire session objects on commit
:type expire_on_commit: bool
"""
for arg in kwargs:
if arg not in ('autocommit', 'expire_on_commit'):
del kwargs[arg]
return self._session_maker(**kwargs)

View File

@ -18,7 +18,6 @@ import functools
import os import os
import fixtures import fixtures
from oslo.config import cfg
import six import six
from nova.openstack.common.db.sqlalchemy import session from nova.openstack.common.db.sqlalchemy import session
@ -38,18 +37,17 @@ class DbFixture(fixtures.Fixture):
def _get_uri(self): def _get_uri(self):
return os.getenv('OS_TEST_DBAPI_CONNECTION', 'sqlite://') return os.getenv('OS_TEST_DBAPI_CONNECTION', 'sqlite://')
def __init__(self): def __init__(self, test):
super(DbFixture, self).__init__() super(DbFixture, self).__init__()
self.conf = cfg.CONF
self.conf.import_opt('connection', self.test = test
'nova.openstack.common.db.sqlalchemy.session',
group='database')
def setUp(self): def setUp(self):
super(DbFixture, self).setUp() super(DbFixture, self).setUp()
self.conf.set_default('connection', self._get_uri(), group='database') self.test.engine = session.create_engine(self._get_uri())
self.addCleanup(self.conf.reset) self.test.sessionmaker = session.get_maker(self.test.engine)
self.addCleanup(self.test.engine.dispose)
class DbTestCase(test.BaseTestCase): class DbTestCase(test.BaseTestCase):
@ -64,9 +62,7 @@ class DbTestCase(test.BaseTestCase):
def setUp(self): def setUp(self):
super(DbTestCase, self).setUp() super(DbTestCase, self).setUp()
self.useFixture(self.FIXTURE()) self.useFixture(self.FIXTURE(self))
self.addCleanup(session.cleanup)
ALLOWED_DIALECTS = ['sqlite', 'mysql', 'postgresql'] ALLOWED_DIALECTS = ['sqlite', 'mysql', 'postgresql']
@ -83,11 +79,10 @@ def backend_specific(*dialects):
if not set(dialects).issubset(ALLOWED_DIALECTS): if not set(dialects).issubset(ALLOWED_DIALECTS):
raise ValueError( raise ValueError(
"Please use allowed dialects: %s" % ALLOWED_DIALECTS) "Please use allowed dialects: %s" % ALLOWED_DIALECTS)
engine = session.get_engine() if self.engine.name not in dialects:
if engine.name not in dialects:
msg = ('The test "%s" can be run ' msg = ('The test "%s" can be run '
'only on %s. Current engine is %s.') 'only on %s. Current engine is %s.')
args = (f.__name__, ' '.join(dialects), engine.name) args = (f.__name__, ' '.join(dialects), self.engine.name)
self.skip(msg % args) self.skip(msg % args)
else: else:
return f(self) return f(self)

View File

@ -21,12 +21,12 @@ import subprocess
import lockfile import lockfile
from six import moves from six import moves
from six.moves.urllib import parse
import sqlalchemy import sqlalchemy
import sqlalchemy.exc import sqlalchemy.exc
from nova.openstack.common.db.sqlalchemy import utils from nova.openstack.common.db.sqlalchemy import utils
from nova.openstack.common.gettextutils import _ from nova.openstack.common.gettextutils import _LE
from nova.openstack.common.py3kcompat import urlutils
from nova.openstack.common import test from nova.openstack.common import test
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -60,10 +60,10 @@ def _set_db_lock(lock_path=None, lock_prefix=None):
path = lock_path or os.environ.get("NOVA_LOCK_PATH") path = lock_path or os.environ.get("NOVA_LOCK_PATH")
lock = lockfile.FileLock(os.path.join(path, lock_prefix)) lock = lockfile.FileLock(os.path.join(path, lock_prefix))
with lock: with lock:
LOG.debug(_('Got lock "%s"') % f.__name__) LOG.debug('Got lock "%s"' % f.__name__)
return f(*args, **kwargs) return f(*args, **kwargs)
finally: finally:
LOG.debug(_('Lock released "%s"') % f.__name__) LOG.debug('Lock released "%s"' % f.__name__)
return wrapper return wrapper
return decorator return decorator
@ -153,7 +153,7 @@ class BaseMigrationTestCase(test.BaseTestCase):
def _reset_databases(self): def _reset_databases(self):
for key, engine in self.engines.items(): for key, engine in self.engines.items():
conn_string = self.test_databases[key] conn_string = self.test_databases[key]
conn_pieces = urlutils.urlparse(conn_string) conn_pieces = parse.urlparse(conn_string)
engine.dispose() engine.dispose()
if conn_string.startswith('sqlite'): if conn_string.startswith('sqlite'):
# We can just delete the SQLite database, which is # We can just delete the SQLite database, which is
@ -264,6 +264,6 @@ class WalkVersionsMixin(object):
if check: if check:
check(engine, data) check(engine, data)
except Exception: except Exception:
LOG.error("Failed to migrate to version %s on engine %s" % LOG.error(_LE("Failed to migrate to version %s on engine %s") %
(version, engine)) (version, engine))
raise raise

View File

@ -30,6 +30,7 @@ from sqlalchemy import func
from sqlalchemy import Index from sqlalchemy import Index
from sqlalchemy import Integer from sqlalchemy import Integer
from sqlalchemy import MetaData from sqlalchemy import MetaData
from sqlalchemy import or_
from sqlalchemy.sql.expression import literal_column from sqlalchemy.sql.expression import literal_column
from sqlalchemy.sql.expression import UpdateBase from sqlalchemy.sql.expression import UpdateBase
from sqlalchemy.sql import select from sqlalchemy.sql import select
@ -37,7 +38,9 @@ from sqlalchemy import String
from sqlalchemy import Table from sqlalchemy import Table
from sqlalchemy.types import NullType from sqlalchemy.types import NullType
from nova.openstack.common.gettextutils import _ from nova.openstack.common import context as request_context
from nova.openstack.common.db.sqlalchemy import models
from nova.openstack.common.gettextutils import _, _LI, _LW
from nova.openstack.common import timeutils from nova.openstack.common import timeutils
@ -93,7 +96,7 @@ def paginate_query(query, model, limit, sort_keys, marker=None,
if 'id' not in sort_keys: if 'id' not in sort_keys:
# TODO(justinsb): If this ever gives a false-positive, check # TODO(justinsb): If this ever gives a false-positive, check
# the actual primary key, rather than assuming its id # the actual primary key, rather than assuming its id
LOG.warning(_('Id not in sort_keys; is sort_keys unique?')) LOG.warning(_LW('Id not in sort_keys; is sort_keys unique?'))
assert(not (sort_dir and sort_dirs)) assert(not (sort_dir and sort_dirs))
@ -156,6 +159,94 @@ def paginate_query(query, model, limit, sort_keys, marker=None,
return query return query
def _read_deleted_filter(query, db_model, read_deleted):
if 'deleted' not in db_model.__table__.columns:
raise ValueError(_("There is no `deleted` column in `%s` table. "
"Project doesn't use soft-deleted feature.")
% db_model.__name__)
default_deleted_value = db_model.__table__.c.deleted.default.arg
if read_deleted == 'no':
query = query.filter(db_model.deleted == default_deleted_value)
elif read_deleted == 'yes':
pass # omit the filter to include deleted and active
elif read_deleted == 'only':
query = query.filter(db_model.deleted != default_deleted_value)
else:
raise ValueError(_("Unrecognized read_deleted value '%s'")
% read_deleted)
return query
def _project_filter(query, db_model, context, project_only):
if project_only and 'project_id' not in db_model.__table__.columns:
raise ValueError(_("There is no `project_id` column in `%s` table.")
% db_model.__name__)
if request_context.is_user_context(context) and project_only:
if project_only == 'allow_none':
is_none = None
query = query.filter(or_(db_model.project_id == context.project_id,
db_model.project_id == is_none))
else:
query = query.filter(db_model.project_id == context.project_id)
return query
def model_query(context, model, session, args=None, project_only=False,
read_deleted=None):
"""Query helper that accounts for context's `read_deleted` field.
:param context: context to query under
:param model: Model to query. Must be a subclass of ModelBase.
:type model: models.ModelBase
:param session: The session to use.
:type session: sqlalchemy.orm.session.Session
:param args: Arguments to query. If None - model is used.
:type args: tuple
:param project_only: If present and context is user-type, then restrict
query to match the context's project_id. If set to
'allow_none', restriction includes project_id = None.
:type project_only: bool
:param read_deleted: If present, overrides context's read_deleted field.
:type read_deleted: bool
Usage:
result = (utils.model_query(context, models.Instance, session=session)
.filter_by(uuid=instance_uuid)
.all())
query = utils.model_query(
context, Node,
session=session,
args=(func.count(Node.id), func.sum(Node.ram))
).filter_by(project_id=project_id)
"""
if not read_deleted:
if hasattr(context, 'read_deleted'):
# NOTE(viktors): some projects use `read_deleted` attribute in
# their contexts instead of `show_deleted`.
read_deleted = context.read_deleted
else:
read_deleted = context.show_deleted
if not issubclass(model, models.ModelBase):
raise TypeError(_("model should be a subclass of ModelBase"))
query = session.query(model) if not args else session.query(*args)
query = _read_deleted_filter(query, model, read_deleted)
query = _project_filter(query, model, context, project_only)
return query
def get_table(engine, name): def get_table(engine, name):
"""Returns an sqlalchemy table dynamically from db. """Returns an sqlalchemy table dynamically from db.
@ -276,8 +367,8 @@ def drop_old_duplicate_entries_from_table(migrate_engine, table_name,
rows_to_delete_select = select([table.c.id]).where(delete_condition) rows_to_delete_select = select([table.c.id]).where(delete_condition)
for row in migrate_engine.execute(rows_to_delete_select).fetchall(): for row in migrate_engine.execute(rows_to_delete_select).fetchall():
LOG.info(_("Deleting duplicated row with id: %(id)s from table: " LOG.info(_LI("Deleting duplicated row with id: %(id)s from table: "
"%(table)s") % dict(id=row[0], table=table_name)) "%(table)s") % dict(id=row[0], table=table_name))
if use_soft_delete: if use_soft_delete:
delete_statement = table.update().\ delete_statement = table.update().\

View File

@ -23,6 +23,7 @@ Usual usage in an openstack.common module:
""" """
import copy import copy
import functools
import gettext import gettext
import locale import locale
from logging import handlers from logging import handlers
@ -35,6 +36,17 @@ import six
_localedir = os.environ.get('nova'.upper() + '_LOCALEDIR') _localedir = os.environ.get('nova'.upper() + '_LOCALEDIR')
_t = gettext.translation('nova', localedir=_localedir, fallback=True) _t = gettext.translation('nova', localedir=_localedir, fallback=True)
# We use separate translation catalogs for each log level, so set up a
# mapping between the log level name and the translator. The domain
# for the log level is project_name + "-log-" + log_level so messages
# for each level end up in their own catalog.
_t_log_levels = dict(
(level, gettext.translation('nova' + '-log-' + level,
localedir=_localedir,
fallback=True))
for level in ['info', 'warning', 'error', 'critical']
)
_AVAILABLE_LANGUAGES = {} _AVAILABLE_LANGUAGES = {}
USE_LAZY = False USE_LAZY = False
@ -60,6 +72,28 @@ def _(msg):
return _t.ugettext(msg) return _t.ugettext(msg)
def _log_translation(msg, level):
"""Build a single translation of a log message
"""
if USE_LAZY:
return Message(msg, domain='nova' + '-log-' + level)
else:
translator = _t_log_levels[level]
if six.PY3:
return translator.gettext(msg)
return translator.ugettext(msg)
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = functools.partial(_log_translation, level='info')
_LW = functools.partial(_log_translation, level='warning')
_LE = functools.partial(_log_translation, level='error')
_LC = functools.partial(_log_translation, level='critical')
def install(domain, lazy=False): def install(domain, lazy=False):
"""Install a _() function using the given translation domain. """Install a _() function using the given translation domain.

View File

@ -39,9 +39,9 @@ import testtools
from nova import context from nova import context
from nova import db from nova import db
from nova.db import migration from nova.db import migration
from nova.db.sqlalchemy import api as session
from nova.network import manager as network_manager from nova.network import manager as network_manager
from nova.objects import base as objects_base from nova.objects import base as objects_base
from nova.openstack.common.db.sqlalchemy import session
from nova.openstack.common.fixture import logging as log_fixture from nova.openstack.common.fixture import logging as log_fixture
from nova.openstack.common.fixture import moxstubout from nova.openstack.common.fixture import moxstubout
from nova.openstack.common import log as logging from nova.openstack.common import log as logging
@ -62,9 +62,10 @@ test_opts = [
CONF = cfg.CONF CONF = cfg.CONF
CONF.register_opts(test_opts) CONF.register_opts(test_opts)
CONF.import_opt('connection', CONF.import_opt('connection',
'nova.openstack.common.db.sqlalchemy.session', 'nova.openstack.common.db.options',
group='database')
CONF.import_opt('sqlite_db', 'nova.openstack.common.db.options',
group='database') group='database')
CONF.import_opt('sqlite_db', 'nova.openstack.common.db.sqlalchemy.session')
CONF.import_opt('enabled', 'nova.api.openstack', group='osapi_v3') CONF.import_opt('enabled', 'nova.api.openstack', group='osapi_v3')
CONF.set_override('use_stderr', False) CONF.set_override('use_stderr', False)
@ -254,7 +255,7 @@ class TestCase(testtools.TestCase):
if not _DB_CACHE: if not _DB_CACHE:
_DB_CACHE = Database(session, migration, _DB_CACHE = Database(session, migration,
sql_connection=CONF.database.connection, sql_connection=CONF.database.connection,
sqlite_db=CONF.sqlite_db, sqlite_db=CONF.database.sqlite_db,
sqlite_clean_db=CONF.sqlite_clean_db) sqlite_clean_db=CONF.sqlite_clean_db)
self.useFixture(_DB_CACHE) self.useFixture(_DB_CACHE)

View File

@ -58,7 +58,7 @@ class ConfFixture(config_fixture.Config):
self.conf.set_default('network_size', 8) self.conf.set_default('network_size', 8)
self.conf.set_default('num_networks', 2) self.conf.set_default('num_networks', 2)
self.conf.set_default('connection', "sqlite://", group='database') self.conf.set_default('connection', "sqlite://", group='database')
self.conf.set_default('sqlite_synchronous', False) self.conf.set_default('sqlite_synchronous', False, group='database')
self.conf.set_default('use_ipv6', True) self.conf.set_default('use_ipv6', True)
self.conf.set_default('verbose', True) self.conf.set_default('verbose', True)
self.conf.set_default('vlan_interface', 'eth0') self.conf.set_default('vlan_interface', 'eth0')

View File

@ -45,7 +45,6 @@ from nova.db.sqlalchemy import models
from nova.db.sqlalchemy import utils as db_utils from nova.db.sqlalchemy import utils as db_utils
from nova import exception from nova import exception
from nova.openstack.common.db import exception as db_exc from nova.openstack.common.db import exception as db_exc
from nova.openstack.common.db.sqlalchemy import session as db_session
from nova.openstack.common import jsonutils from nova.openstack.common import jsonutils
from nova.openstack.common import timeutils from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils from nova.openstack.common import uuidutils
@ -59,8 +58,8 @@ CONF = cfg.CONF
CONF.import_opt('reserved_host_memory_mb', 'nova.compute.resource_tracker') CONF.import_opt('reserved_host_memory_mb', 'nova.compute.resource_tracker')
CONF.import_opt('reserved_host_disk_mb', 'nova.compute.resource_tracker') CONF.import_opt('reserved_host_disk_mb', 'nova.compute.resource_tracker')
get_engine = db_session.get_engine get_engine = sqlalchemy_api.get_engine
get_session = db_session.get_session get_session = sqlalchemy_api.get_session
def _reservation_get(context, uuid): def _reservation_get(context, uuid):
@ -1205,8 +1204,8 @@ class SecurityGroupTestCase(test.TestCase, ModelsObjectComparatorMixin):
session = get_session() session = get_session()
self.mox.StubOutWithMock(sqlalchemy_api, 'get_session') self.mox.StubOutWithMock(sqlalchemy_api, 'get_session')
sqlalchemy_api.get_session(slave_session=False).AndReturn(session) sqlalchemy_api.get_session(use_slave=False).AndReturn(session)
sqlalchemy_api.get_session(slave_session=False).AndReturn(session) sqlalchemy_api.get_session(use_slave=False).AndReturn(session)
self.mox.ReplayAll() self.mox.ReplayAll()
security_group = db.security_group_get(self.ctxt, sid, security_group = db.security_group_get(self.ctxt, sid,

View File

@ -18,9 +18,9 @@ import time
from nova.compute import flavors from nova.compute import flavors
from nova import context from nova import context
from nova import db from nova import db
from nova.db.sqlalchemy import api as sql_session
from nova.db.sqlalchemy import models from nova.db.sqlalchemy import models
from nova import exception from nova import exception
from nova.openstack.common.db.sqlalchemy import session as sql_session
from nova import test from nova import test

View File

@ -19,13 +19,13 @@
from oslo.config import cfg from oslo.config import cfg
from nova.openstack.common.db.sqlalchemy import session as nova_session from nova.openstack.common.db.sqlalchemy import session as db_session
from nova import paths from nova import paths
opts = [ opts = [
cfg.StrOpt('sql_connection', cfg.StrOpt('sql_connection',
default=('sqlite:///' + default=('sqlite:///' +
paths.state_path_def('baremetal_$sqlite_db')), paths.state_path_def('baremetal_nova.sqlite')),
help='The SQLAlchemy connection string used to connect to the ' help='The SQLAlchemy connection string used to connect to the '
'bare-metal database'), 'bare-metal database'),
] ]
@ -37,27 +37,30 @@ CONF = cfg.CONF
CONF.register_group(baremetal_group) CONF.register_group(baremetal_group)
CONF.register_opts(opts, baremetal_group) CONF.register_opts(opts, baremetal_group)
CONF.import_opt('sqlite_db', 'nova.openstack.common.db.sqlalchemy.session')
_ENGINE = None _FACADE = None
_MAKER = None
def _create_facade_lazily():
global _FACADE
if _FACADE is None:
_FACADE = db_session.EngineFacade(CONF.baremetal.sql_connection,
**dict(CONF.database.iteritems()))
return _FACADE
def get_session(autocommit=True, expire_on_commit=False): def get_session(autocommit=True, expire_on_commit=False):
"""Return a SQLAlchemy session.""" """Return a SQLAlchemy session."""
global _MAKER
if _MAKER is None: facade = _create_facade_lazily()
engine = get_engine() return facade.get_session(autocommit=autocommit,
_MAKER = nova_session.get_maker(engine, autocommit, expire_on_commit) expire_on_commit=expire_on_commit)
session = _MAKER()
return session
def get_engine(): def get_engine():
"""Return a SQLAlchemy engine.""" """Return a SQLAlchemy engine."""
global _ENGINE
if _ENGINE is None: facade = _create_facade_lazily()
_ENGINE = nova_session.create_engine(CONF.baremetal.sql_connection) return facade.get_engine()
return _ENGINE