Merge "Sync the latest DB code from oslo-incubator"

This commit is contained in:
Jenkins 2014-06-08 15:10:55 +00:00 committed by Gerrit Code Review
commit 53e7d93ae4
20 changed files with 1257 additions and 518 deletions

View File

@ -86,17 +86,6 @@
#db_driver=sahara.db #db_driver=sahara.db
#
# Options defined in sahara.openstack.common.db.sqlalchemy.session
#
# The file name to use with SQLite (string value)
#sqlite_db=sahara.sqlite
# If True, SQLite uses synchronous mode (boolean value)
#sqlite_synchronous=true
# #
# Options defined in sahara.openstack.common.lockutils # Options defined in sahara.openstack.common.lockutils
# #
@ -316,28 +305,32 @@
[database] [database]
# #
# Options defined in sahara.openstack.common.db.api # Options defined in sahara.openstack.common.db.options
# #
# The file name to use with SQLite (string value)
#sqlite_db=sahara.sqlite
# If True, SQLite uses synchronous mode (boolean value)
#sqlite_synchronous=true
# The backend to use for db (string value) # The backend to use for db (string value)
# Deprecated group/name - [DEFAULT]/db_backend # Deprecated group/name - [DEFAULT]/db_backend
#backend=sqlalchemy #backend=sqlalchemy
#
# Options defined in sahara.openstack.common.db.sqlalchemy.session
#
# The SQLAlchemy connection string used to connect to the # The SQLAlchemy connection string used to connect to the
# database (string value) # database (string value)
# Deprecated group/name - [DEFAULT]/sql_connection # Deprecated group/name - [DEFAULT]/sql_connection
# Deprecated group/name - [DATABASE]/sql_connection # Deprecated group/name - [DATABASE]/sql_connection
# Deprecated group/name - [sql]/connection # Deprecated group/name - [sql]/connection
#connection=sqlite:////sahara/openstack/common/db/$sqlite_db #connection=<None>
# The SQLAlchemy connection string used to connect to the # The SQL mode to be used for MySQL sessions. This option,
# slave database (string value) # including the default, overrides any server-set SQL mode. To
#slave_connection= # use whatever SQL mode is set by the server configuration,
# set this to no value. Example: mysql_sql_mode= (string
# value)
#mysql_sql_mode=TRADITIONAL
# Timeout before idle sql connections are reaped (integer # Timeout before idle sql connections are reaped (integer
# value) # value)
@ -391,4 +384,23 @@
# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout # Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
#pool_timeout=<None> #pool_timeout=<None>
# Enable the experimental use of database reconnect on
# connection lost (boolean value)
#use_db_reconnect=false
# seconds between db connection retries (integer value)
#db_retry_interval=1
# Whether to increase interval between db connection retries,
# up to db_max_retry_interval (boolean value)
#db_inc_retry_interval=true
# max seconds between db connection retries, if
# db_inc_retry_interval is enabled (integer value)
#db_max_retry_interval=10
# maximum db connection retries before error is raised.
# (setting -1 implies an infinite retry count) (integer value)
#db_max_retries=20

View File

@ -107,5 +107,8 @@
#plugins=vanilla,hdp #plugins=vanilla,hdp
[database] [database]
#connection=sqlite:////sahara/openstack/common/db/$sqlite_db
# The SQLAlchemy connection string used to connect to the
# database (string value)
#connection=<None>

View File

@ -4,6 +4,7 @@ eventlet>=0.13.0
Flask>=0.10,<1.0 Flask>=0.10,<1.0
iso8601>=0.1.9 iso8601>=0.1.9
jsonschema>=2.0.0,<3.0.0 jsonschema>=2.0.0,<3.0.0
lockfile>=0.8
oslo.config>=1.2.0 oslo.config>=1.2.0
oslo.messaging>=1.3.0 oslo.messaging>=1.3.0
paramiko>=1.13.0 paramiko>=1.13.0

View File

@ -39,11 +39,14 @@ from sahara.openstack.common import log as logging
CONF = cfg.CONF CONF = cfg.CONF
CONF.import_opt('backend', 'sahara.openstack.common.db.options',
group='database')
_BACKEND_MAPPING = { _BACKEND_MAPPING = {
'sqlalchemy': 'sahara.db.sqlalchemy.api', 'sqlalchemy': 'sahara.db.sqlalchemy.api',
} }
IMPL = db_api.DBAPI(backend_mapping=_BACKEND_MAPPING) IMPL = db_api.DBAPI(CONF.database.backend, backend_mapping=_BACKEND_MAPPING)
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)

View File

@ -36,4 +36,5 @@ class Base(object):
def is_mysql_avail(): def is_mysql_avail():
return CONF.database.connection.startswith('mysql') connection = CONF.database.connection
return connection and connection.startswith('mysql')

View File

@ -30,11 +30,38 @@ from sahara.openstack.common import log as logging
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
get_engine = db_session.get_engine
get_session = db_session.get_session
CONF = cfg.CONF CONF = cfg.CONF
_FACADE = None
def _create_facade_lazily():
global _FACADE
if _FACADE is None:
params = dict(CONF.database.iteritems())
params["sqlite_fk"] = True
_FACADE = db_session.EngineFacade(
CONF.database.connection,
**params
)
return _FACADE
def get_engine():
facade = _create_facade_lazily()
return facade.get_engine()
def get_session(**kwargs):
facade = _create_facade_lazily()
return facade.get_session(**kwargs)
def cleanup():
global _FACADE
_FACADE = None
def get_backend(): def get_backend():
"""The backend is this module itself.""" """The backend is this module itself."""
@ -72,7 +99,7 @@ def count_query(model, context, session=None, project_only=None):
def setup_db(): def setup_db():
try: try:
engine = db_session.get_engine(sqlite_fk=True) engine = get_engine()
m.Cluster.metadata.create_all(engine) m.Cluster.metadata.create_all(engine)
except sa.exc.OperationalError as e: except sa.exc.OperationalError as e:
LOG.exception("Database registration exception: %s", e) LOG.exception("Database registration exception: %s", e)
@ -82,7 +109,7 @@ def setup_db():
def drop_db(): def drop_db():
try: try:
engine = db_session.get_engine(sqlite_fk=True) engine = get_engine()
m.Cluster.metadata.drop_all(engine) m.Cluster.metadata.drop_all(engine)
except Exception as e: except Exception as e:
LOG.exception("Database shutdown exception: %s", e) LOG.exception("Database shutdown exception: %s", e)
@ -400,11 +427,13 @@ def node_group_template_create(context, values):
node_group_template = m.NodeGroupTemplate() node_group_template = m.NodeGroupTemplate()
node_group_template.update(values) node_group_template.update(values)
try: session = get_session()
node_group_template.save() with session.begin():
except db_exc.DBDuplicateEntry as e: try:
raise ex.DBDuplicateEntry("Duplicate entry for NodeGroupTemplate: %s" node_group_template.save(session=session)
% e.columns) except db_exc.DBDuplicateEntry as e:
raise ex.DBDuplicateEntry("Duplicate entry for NodeGroupTemplate: "
"%s" % e.columns)
return node_group_template return node_group_template
@ -442,11 +471,13 @@ def data_source_create(context, values):
data_source = m.DataSource() data_source = m.DataSource()
data_source.update(values) data_source.update(values)
try: session = get_session()
data_source.save() with session.begin():
except db_exc.DBDuplicateEntry as e: try:
raise ex.DBDuplicateEntry("Duplicate entry for DataSource: %s" data_source.save(session=session)
% e.columns) except db_exc.DBDuplicateEntry as e:
raise ex.DBDuplicateEntry("Duplicate entry for DataSource: %s"
% e.columns)
return data_source return data_source
@ -494,7 +525,7 @@ def job_execution_create(context, values):
job_ex = m.JobExecution() job_ex = m.JobExecution()
job_ex.update(values) job_ex.update(values)
try: try:
job_ex.save() job_ex.save(session=session)
except db_exc.DBDuplicateEntry as e: except db_exc.DBDuplicateEntry as e:
raise ex.DBDuplicateEntry("Duplicate entry for JobExecution: %s" raise ex.DBDuplicateEntry("Duplicate entry for JobExecution: %s"
% e.columns) % e.columns)
@ -635,11 +666,13 @@ def job_binary_create(context, values):
job_binary = m.JobBinary() job_binary = m.JobBinary()
job_binary.update(values) job_binary.update(values)
try: session = get_session()
job_binary.save() with session.begin():
except db_exc.DBDuplicateEntry as e: try:
raise ex.DBDuplicateEntry("Duplicate entry for JobBinary: %s" job_binary.save(session=session)
% e.columns) except db_exc.DBDuplicateEntry as e:
raise ex.DBDuplicateEntry("Duplicate entry for JobBinary: %s"
% e.columns)
return job_binary return job_binary
@ -728,11 +761,13 @@ def job_binary_internal_create(context, values):
job_binary_int = m.JobBinaryInternal() job_binary_int = m.JobBinaryInternal()
job_binary_int.update(values) job_binary_int.update(values)
try: session = get_session()
job_binary_int.save() with session.begin():
except db_exc.DBDuplicateEntry as e: try:
raise ex.DBDuplicateEntry("Duplicate entry for JobBinaryInternal: %s" job_binary_int.save(session=session)
% e.columns) except db_exc.DBDuplicateEntry as e:
raise ex.DBDuplicateEntry("Duplicate entry for JobBinaryInternal: "
"%s" % e.columns)
return job_binary_internal_get(context, job_binary_int.id) return job_binary_internal_get(context, job_binary_int.id)

View File

@ -0,0 +1,111 @@
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Simple class that stores security context information in the web request.
Projects should subclass this class if they wish to enhance the request
context or provide additional information in their specific WSGI pipeline.
"""
import itertools
import uuid
def generate_request_id():
return b'req-' + str(uuid.uuid4()).encode('ascii')
class RequestContext(object):
"""Helper class to represent useful information about a request context.
Stores information about the security context under which the user
accesses the system, as well as additional request information.
"""
user_idt_format = '{user} {tenant} {domain} {user_domain} {p_domain}'
def __init__(self, auth_token=None, user=None, tenant=None, domain=None,
user_domain=None, project_domain=None, is_admin=False,
read_only=False, show_deleted=False, request_id=None,
instance_uuid=None):
self.auth_token = auth_token
self.user = user
self.tenant = tenant
self.domain = domain
self.user_domain = user_domain
self.project_domain = project_domain
self.is_admin = is_admin
self.read_only = read_only
self.show_deleted = show_deleted
self.instance_uuid = instance_uuid
if not request_id:
request_id = generate_request_id()
self.request_id = request_id
def to_dict(self):
user_idt = (
self.user_idt_format.format(user=self.user or '-',
tenant=self.tenant or '-',
domain=self.domain or '-',
user_domain=self.user_domain or '-',
p_domain=self.project_domain or '-'))
return {'user': self.user,
'tenant': self.tenant,
'domain': self.domain,
'user_domain': self.user_domain,
'project_domain': self.project_domain,
'is_admin': self.is_admin,
'read_only': self.read_only,
'show_deleted': self.show_deleted,
'auth_token': self.auth_token,
'request_id': self.request_id,
'instance_uuid': self.instance_uuid,
'user_identity': user_idt}
def get_admin_context(show_deleted=False):
context = RequestContext(None,
tenant=None,
is_admin=True,
show_deleted=show_deleted)
return context
def get_context_from_function_and_args(function, args, kwargs):
"""Find an arg of type RequestContext and return it.
This is useful in a couple of decorators where we don't
know much about the function we're wrapping.
"""
for arg in itertools.chain(kwargs.values(), args):
if isinstance(arg, RequestContext):
return arg
return None
def is_user_context(context):
"""Indicates if the request context is a normal user."""
if not context:
return False
if context.is_admin:
return False
if not context.user_id or not context.project_id:
return False
return True

View File

@ -15,43 +15,148 @@
"""Multiple DB API backend support. """Multiple DB API backend support.
Supported configuration options:
The following two parameters are in the 'database' group:
`backend`: DB backend name or full module path to DB backend module.
A DB backend module should implement a method named 'get_backend' which A DB backend module should implement a method named 'get_backend' which
takes no arguments. The method can return any object that implements DB takes no arguments. The method can return any object that implements DB
API methods. API methods.
""" """
from oslo.config import cfg import functools
import logging
import threading
import time
from sahara.openstack.common.db import exception
from sahara.openstack.common.gettextutils import _LE
from sahara.openstack.common import importutils from sahara.openstack.common import importutils
db_opts = [ LOG = logging.getLogger(__name__)
cfg.StrOpt('backend',
default='sqlalchemy',
deprecated_name='db_backend',
deprecated_group='DEFAULT',
help='The backend to use for db'),
]
CONF = cfg.CONF
CONF.register_opts(db_opts, 'database') def safe_for_db_retry(f):
"""Enable db-retry for decorated function, if config option enabled."""
f.__dict__['enable_retry'] = True
return f
class wrap_db_retry(object):
"""Retry db.api methods, if DBConnectionError() raised
Retry decorated db.api methods. If we enabled `use_db_reconnect`
in config, this decorator will be applied to all db.api functions,
marked with @safe_for_db_retry decorator.
Decorator catchs DBConnectionError() and retries function in a
loop until it succeeds, or until maximum retries count will be reached.
"""
def __init__(self, retry_interval, max_retries, inc_retry_interval,
max_retry_interval):
super(wrap_db_retry, self).__init__()
self.retry_interval = retry_interval
self.max_retries = max_retries
self.inc_retry_interval = inc_retry_interval
self.max_retry_interval = max_retry_interval
def __call__(self, f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
next_interval = self.retry_interval
remaining = self.max_retries
while True:
try:
return f(*args, **kwargs)
except exception.DBConnectionError as e:
if remaining == 0:
LOG.exception(_LE('DB exceeded retry limit.'))
raise exception.DBError(e)
if remaining != -1:
remaining -= 1
LOG.exception(_LE('DB connection error.'))
# NOTE(vsergeyev): We are using patched time module, so
# this effectively yields the execution
# context to another green thread.
time.sleep(next_interval)
if self.inc_retry_interval:
next_interval = min(
next_interval * 2,
self.max_retry_interval
)
return wrapper
class DBAPI(object): class DBAPI(object):
def __init__(self, backend_mapping=None): def __init__(self, backend_name, backend_mapping=None, lazy=False,
if backend_mapping is None: **kwargs):
backend_mapping = {} """Initialize the chosen DB API backend.
backend_name = CONF.database.backend
# Import the untranslated name if we don't have a :param backend_name: name of the backend to load
# mapping. :type backend_name: str
backend_path = backend_mapping.get(backend_name, backend_name)
backend_mod = importutils.import_module(backend_path) :param backend_mapping: backend name -> module/class to load mapping
self.__backend = backend_mod.get_backend() :type backend_mapping: dict
:param lazy: load the DB backend lazily on the first DB API method call
:type lazy: bool
Keyword arguments:
:keyword use_db_reconnect: retry DB transactions on disconnect or not
:type use_db_reconnect: bool
:keyword retry_interval: seconds between transaction retries
:type retry_interval: int
:keyword inc_retry_interval: increase retry interval or not
:type inc_retry_interval: bool
:keyword max_retry_interval: max interval value between retries
:type max_retry_interval: int
:keyword max_retries: max number of retries before an error is raised
:type max_retries: int
"""
self._backend = None
self._backend_name = backend_name
self._backend_mapping = backend_mapping or {}
self._lock = threading.Lock()
if not lazy:
self._load_backend()
self.use_db_reconnect = kwargs.get('use_db_reconnect', False)
self.retry_interval = kwargs.get('retry_interval', 1)
self.inc_retry_interval = kwargs.get('inc_retry_interval', True)
self.max_retry_interval = kwargs.get('max_retry_interval', 10)
self.max_retries = kwargs.get('max_retries', 20)
def _load_backend(self):
with self._lock:
if not self._backend:
# Import the untranslated name if we don't have a mapping
backend_path = self._backend_mapping.get(self._backend_name,
self._backend_name)
backend_mod = importutils.import_module(backend_path)
self._backend = backend_mod.get_backend()
def __getattr__(self, key): def __getattr__(self, key):
return getattr(self.__backend, key) if not self._backend:
self._load_backend()
attr = getattr(self._backend, key)
if not hasattr(attr, '__call__'):
return attr
# NOTE(vsergeyev): If `use_db_reconnect` option is set to True, retry
# DB API methods, decorated with @safe_for_db_retry
# on disconnect.
if self.use_db_reconnect and hasattr(attr, 'enable_retry'):
attr = wrap_db_retry(
retry_interval=self.retry_interval,
max_retries=self.max_retries,
inc_retry_interval=self.inc_retry_interval,
max_retry_interval=self.max_retry_interval)(attr)
return attr

View File

@ -16,6 +16,8 @@
"""DB related custom exceptions.""" """DB related custom exceptions."""
import six
from sahara.openstack.common.gettextutils import _ from sahara.openstack.common.gettextutils import _
@ -23,7 +25,7 @@ class DBError(Exception):
"""Wraps an implementation specific exception.""" """Wraps an implementation specific exception."""
def __init__(self, inner_exception=None): def __init__(self, inner_exception=None):
self.inner_exception = inner_exception self.inner_exception = inner_exception
super(DBError, self).__init__(str(inner_exception)) super(DBError, self).__init__(six.text_type(inner_exception))
class DBDuplicateEntry(DBError): class DBDuplicateEntry(DBError):
@ -46,7 +48,7 @@ class DBInvalidUnicodeParameter(Exception):
class DbMigrationError(DBError): class DbMigrationError(DBError):
"""Wraps migration specific exception.""" """Wraps migration specific exception."""
def __init__(self, message=None): def __init__(self, message=None):
super(DbMigrationError, self).__init__(str(message)) super(DbMigrationError, self).__init__(message)
class DBConnectionError(DBError): class DBConnectionError(DBError):

View File

@ -0,0 +1,171 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from oslo.config import cfg
database_opts = [
cfg.StrOpt('sqlite_db',
deprecated_group='DEFAULT',
default='sahara.sqlite',
help='The file name to use with SQLite'),
cfg.BoolOpt('sqlite_synchronous',
deprecated_group='DEFAULT',
default=True,
help='If True, SQLite uses synchronous mode'),
cfg.StrOpt('backend',
default='sqlalchemy',
deprecated_name='db_backend',
deprecated_group='DEFAULT',
help='The backend to use for db'),
cfg.StrOpt('connection',
help='The SQLAlchemy connection string used to connect to the '
'database',
secret=True,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_connection',
group='DATABASE'),
cfg.DeprecatedOpt('connection',
group='sql'), ]),
cfg.StrOpt('mysql_sql_mode',
default='TRADITIONAL',
help='The SQL mode to be used for MySQL sessions. '
'This option, including the default, overrides any '
'server-set SQL mode. To use whatever SQL mode '
'is set by the server configuration, '
'set this to no value. Example: mysql_sql_mode='),
cfg.IntOpt('idle_timeout',
default=3600,
deprecated_opts=[cfg.DeprecatedOpt('sql_idle_timeout',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_idle_timeout',
group='DATABASE'),
cfg.DeprecatedOpt('idle_timeout',
group='sql')],
help='Timeout before idle sql connections are reaped'),
cfg.IntOpt('min_pool_size',
default=1,
deprecated_opts=[cfg.DeprecatedOpt('sql_min_pool_size',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_min_pool_size',
group='DATABASE')],
help='Minimum number of SQL connections to keep open in a '
'pool'),
cfg.IntOpt('max_pool_size',
default=None,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_pool_size',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_max_pool_size',
group='DATABASE')],
help='Maximum number of SQL connections to keep open in a '
'pool'),
cfg.IntOpt('max_retries',
default=10,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_retries',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_max_retries',
group='DATABASE')],
help='Maximum db connection retries during startup. '
'(setting -1 implies an infinite retry count)'),
cfg.IntOpt('retry_interval',
default=10,
deprecated_opts=[cfg.DeprecatedOpt('sql_retry_interval',
group='DEFAULT'),
cfg.DeprecatedOpt('reconnect_interval',
group='DATABASE')],
help='Interval between retries of opening a sql connection'),
cfg.IntOpt('max_overflow',
default=None,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_overflow',
group='DEFAULT'),
cfg.DeprecatedOpt('sqlalchemy_max_overflow',
group='DATABASE')],
help='If set, use this value for max_overflow with sqlalchemy'),
cfg.IntOpt('connection_debug',
default=0,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_debug',
group='DEFAULT')],
help='Verbosity of SQL debugging information. 0=None, '
'100=Everything'),
cfg.BoolOpt('connection_trace',
default=False,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_trace',
group='DEFAULT')],
help='Add python stack traces to SQL as comment strings'),
cfg.IntOpt('pool_timeout',
default=None,
deprecated_opts=[cfg.DeprecatedOpt('sqlalchemy_pool_timeout',
group='DATABASE')],
help='If set, use this value for pool_timeout with sqlalchemy'),
cfg.BoolOpt('use_db_reconnect',
default=False,
help='Enable the experimental use of database reconnect '
'on connection lost'),
cfg.IntOpt('db_retry_interval',
default=1,
help='seconds between db connection retries'),
cfg.BoolOpt('db_inc_retry_interval',
default=True,
help='Whether to increase interval between db connection '
'retries, up to db_max_retry_interval'),
cfg.IntOpt('db_max_retry_interval',
default=10,
help='max seconds between db connection retries, if '
'db_inc_retry_interval is enabled'),
cfg.IntOpt('db_max_retries',
default=20,
help='maximum db connection retries before error is raised. '
'(setting -1 implies an infinite retry count)'),
]
CONF = cfg.CONF
CONF.register_opts(database_opts, 'database')
def set_defaults(sql_connection, sqlite_db, max_pool_size=None,
max_overflow=None, pool_timeout=None):
"""Set defaults for configuration variables."""
cfg.set_defaults(database_opts,
connection=sql_connection,
sqlite_db=sqlite_db)
# Update the QueuePool defaults
if max_pool_size is not None:
cfg.set_defaults(database_opts,
max_pool_size=max_pool_size)
if max_overflow is not None:
cfg.set_defaults(database_opts,
max_overflow=max_overflow)
if pool_timeout is not None:
cfg.set_defaults(database_opts,
pool_timeout=pool_timeout)
def list_opts():
"""Returns a list of oslo.config options available in the library.
The returned list includes all oslo.config options which may be registered
at runtime by the library.
Each element of the list is a tuple. The first element is the name of the
group under which the list of elements in the second element will be
registered. A group name of None corresponds to the [DEFAULT] group in
config files.
The purpose of this is to allow tools like the Oslo sample config file
generator to discover the options exposed to users by this library.
:returns: a list of (group_name, opts) tuples
"""
return [('database', copy.deepcopy(database_opts))]

View File

@ -51,13 +51,9 @@ import sqlalchemy
from sqlalchemy.schema import UniqueConstraint from sqlalchemy.schema import UniqueConstraint
from sahara.openstack.common.db import exception from sahara.openstack.common.db import exception
from sahara.openstack.common.db.sqlalchemy import session as db_session
from sahara.openstack.common.gettextutils import _ from sahara.openstack.common.gettextutils import _
get_engine = db_session.get_engine
def _get_unique_constraints(self, table): def _get_unique_constraints(self, table):
"""Retrieve information about existing unique constraints of the table """Retrieve information about existing unique constraints of the table
@ -172,17 +168,20 @@ def patch_migrate():
sqlite.SQLiteConstraintGenerator) sqlite.SQLiteConstraintGenerator)
def db_sync(abs_path, version=None, init_version=0): def db_sync(engine, abs_path, version=None, init_version=0, sanity_check=True):
"""Upgrade or downgrade a database. """Upgrade or downgrade a database.
Function runs the upgrade() or downgrade() functions in change scripts. Function runs the upgrade() or downgrade() functions in change scripts.
:param engine: SQLAlchemy engine instance for a given database
:param abs_path: Absolute path to migrate repository. :param abs_path: Absolute path to migrate repository.
:param version: Database will upgrade/downgrade until this version. :param version: Database will upgrade/downgrade until this version.
If None - database will update to the latest If None - database will update to the latest
available version. available version.
:param init_version: Initial database version :param init_version: Initial database version
:param sanity_check: Require schema sanity checking for all tables
""" """
if version is not None: if version is not None:
try: try:
version = int(version) version = int(version)
@ -190,49 +189,62 @@ def db_sync(abs_path, version=None, init_version=0):
raise exception.DbMigrationError( raise exception.DbMigrationError(
message=_("version should be an integer")) message=_("version should be an integer"))
current_version = db_version(abs_path, init_version) current_version = db_version(engine, abs_path, init_version)
repository = _find_migrate_repo(abs_path) repository = _find_migrate_repo(abs_path)
_db_schema_sanity_check() if sanity_check:
_db_schema_sanity_check(engine)
if version is None or version > current_version: if version is None or version > current_version:
return versioning_api.upgrade(get_engine(), repository, version) return versioning_api.upgrade(engine, repository, version)
else: else:
return versioning_api.downgrade(get_engine(), repository, return versioning_api.downgrade(engine, repository,
version) version)
def _db_schema_sanity_check(): def _db_schema_sanity_check(engine):
engine = get_engine() """Ensure all database tables were created with required parameters.
:param engine: SQLAlchemy engine instance for a given database
"""
if engine.name == 'mysql': if engine.name == 'mysql':
onlyutf8_sql = ('SELECT TABLE_NAME,TABLE_COLLATION ' onlyutf8_sql = ('SELECT TABLE_NAME,TABLE_COLLATION '
'from information_schema.TABLES ' 'from information_schema.TABLES '
'where TABLE_SCHEMA=%s and ' 'where TABLE_SCHEMA=%s and '
'TABLE_COLLATION NOT LIKE "%%utf8%%"') 'TABLE_COLLATION NOT LIKE "%%utf8%%"')
table_names = [res[0] for res in engine.execute(onlyutf8_sql, # NOTE(morganfainberg): exclude the sqlalchemy-migrate and alembic
engine.url.database)] # versioning tables from the tables we need to verify utf8 status on.
# Non-standard table names are not supported.
EXCLUDED_TABLES = ['migrate_version', 'alembic_version']
table_names = [res[0] for res in
engine.execute(onlyutf8_sql, engine.url.database) if
res[0].lower() not in EXCLUDED_TABLES]
if len(table_names) > 0: if len(table_names) > 0:
raise ValueError(_('Tables "%s" have non utf8 collation, ' raise ValueError(_('Tables "%s" have non utf8 collation, '
'please make sure all tables are CHARSET=utf8' 'please make sure all tables are CHARSET=utf8'
) % ','.join(table_names)) ) % ','.join(table_names))
def db_version(abs_path, init_version): def db_version(engine, abs_path, init_version):
"""Show the current version of the repository. """Show the current version of the repository.
:param engine: SQLAlchemy engine instance for a given database
:param abs_path: Absolute path to migrate repository :param abs_path: Absolute path to migrate repository
:param version: Initial database version :param version: Initial database version
""" """
repository = _find_migrate_repo(abs_path) repository = _find_migrate_repo(abs_path)
try: try:
return versioning_api.db_version(get_engine(), repository) return versioning_api.db_version(engine, repository)
except versioning_exceptions.DatabaseNotControlledError: except versioning_exceptions.DatabaseNotControlledError:
meta = sqlalchemy.MetaData() meta = sqlalchemy.MetaData()
engine = get_engine()
meta.reflect(bind=engine) meta.reflect(bind=engine)
tables = meta.tables tables = meta.tables
if len(tables) == 0 or 'alembic_version' in tables: if len(tables) == 0 or 'alembic_version' in tables:
db_version_control(abs_path, init_version) db_version_control(engine, abs_path, version=init_version)
return versioning_api.db_version(get_engine(), repository) return versioning_api.db_version(engine, repository)
else: else:
raise exception.DbMigrationError( raise exception.DbMigrationError(
message=_( message=_(
@ -241,17 +253,18 @@ def db_version(abs_path, init_version):
"manually.")) "manually."))
def db_version_control(abs_path, version=None): def db_version_control(engine, abs_path, version=None):
"""Mark a database as under this repository's version control. """Mark a database as under this repository's version control.
Once a database is under version control, schema changes should Once a database is under version control, schema changes should
only be done via change scripts in this repository. only be done via change scripts in this repository.
:param engine: SQLAlchemy engine instance for a given database
:param abs_path: Absolute path to migrate repository :param abs_path: Absolute path to migrate repository
:param version: Initial database version :param version: Initial database version
""" """
repository = _find_migrate_repo(abs_path) repository = _find_migrate_repo(abs_path)
versioning_api.version_control(get_engine(), repository, version) versioning_api.version_control(engine, repository, version)
return version return version

View File

@ -26,18 +26,16 @@ from sqlalchemy import Column, Integer
from sqlalchemy import DateTime from sqlalchemy import DateTime
from sqlalchemy.orm import object_mapper from sqlalchemy.orm import object_mapper
from sahara.openstack.common.db.sqlalchemy import session as sa
from sahara.openstack.common import timeutils from sahara.openstack.common import timeutils
class ModelBase(object): class ModelBase(six.Iterator):
"""Base class for models.""" """Base class for models."""
__table_initialized__ = False __table_initialized__ = False
def save(self, session=None): def save(self, session):
"""Save this object.""" """Save this object."""
if not session:
session = sa.get_session()
# NOTE(boris-42): This part of code should be look like: # NOTE(boris-42): This part of code should be look like:
# session.add(self) # session.add(self)
# session.flush() # session.flush()
@ -72,7 +70,7 @@ class ModelBase(object):
return [] return []
def __iter__(self): def __iter__(self):
columns = dict(object_mapper(self).columns).keys() columns = list(dict(object_mapper(self).columns).keys())
# NOTE(russellb): Allow models to specify other keys that can be looked # NOTE(russellb): Allow models to specify other keys that can be looked
# up, beyond the actual db columns. An example would be the 'name' # up, beyond the actual db columns. An example would be the 'name'
# property for an Instance. # property for an Instance.
@ -80,10 +78,14 @@ class ModelBase(object):
self._i = iter(columns) self._i = iter(columns)
return self return self
def next(self): # In Python 3, __next__() has replaced next().
def __next__(self):
n = six.advance_iterator(self._i) n = six.advance_iterator(self._i)
return n, getattr(self, n) return n, getattr(self, n)
def next(self):
return self.__next__()
def update(self, values): def update(self, values):
"""Make the model object behave like a dict.""" """Make the model object behave like a dict."""
for k, v in six.iteritems(values): for k, v in six.iteritems(values):
@ -110,7 +112,7 @@ class SoftDeleteMixin(object):
deleted_at = Column(DateTime) deleted_at = Column(DateTime)
deleted = Column(Integer, default=0) deleted = Column(Integer, default=0)
def soft_delete(self, session=None): def soft_delete(self, session):
"""Mark this object as deleted.""" """Mark this object as deleted."""
self.deleted = self.id self.deleted = self.id
self.deleted_at = timeutils.utcnow() self.deleted_at = timeutils.utcnow()

View File

@ -16,6 +16,7 @@
"""Provision test environment for specific DB backends""" """Provision test environment for specific DB backends"""
import argparse import argparse
import logging
import os import os
import random import random
import string import string
@ -26,23 +27,12 @@ import sqlalchemy
from sahara.openstack.common.db import exception as exc from sahara.openstack.common.db import exception as exc
SQL_CONNECTION = os.getenv('OS_TEST_DBAPI_ADMIN_CONNECTION', 'sqlite://') LOG = logging.getLogger(__name__)
def _gen_credentials(*names): def get_engine(uri):
"""Generate credentials."""
auth_dict = {}
for name in names:
val = ''.join(random.choice(string.ascii_lowercase)
for i in moves.range(10))
auth_dict[name] = val
return auth_dict
def _get_engine(uri=SQL_CONNECTION):
"""Engine creation """Engine creation
By default the uri is SQL_CONNECTION which is admin credentials.
Call the function without arguments to get admin connection. Admin Call the function without arguments to get admin connection. Admin
connection required to create temporary user and database for each connection required to create temporary user and database for each
particular test. Otherwise use existing connection to recreate connection particular test. Otherwise use existing connection to recreate connection
@ -62,50 +52,43 @@ def _execute_sql(engine, sql, driver):
except sqlalchemy.exc.OperationalError: except sqlalchemy.exc.OperationalError:
msg = ('%s does not match database admin ' msg = ('%s does not match database admin '
'credentials or database does not exist.') 'credentials or database does not exist.')
raise exc.DBConnectionError(msg % SQL_CONNECTION) LOG.exception(msg % engine.url)
raise exc.DBConnectionError(msg % engine.url)
def create_database(engine): def create_database(engine):
"""Provide temporary user and database for each particular test.""" """Provide temporary user and database for each particular test."""
driver = engine.name driver = engine.name
auth = _gen_credentials('database', 'user', 'passwd') auth = {
'database': ''.join(random.choice(string.ascii_lowercase)
sqls = { for i in moves.range(10)),
'mysql': [ 'user': engine.url.username,
"drop database if exists %(database)s;", 'passwd': engine.url.password,
"grant all on %(database)s.* to '%(user)s'@'localhost'"
" identified by '%(passwd)s';",
"create database %(database)s;",
],
'postgresql': [
"drop database if exists %(database)s;",
"drop user if exists %(user)s;",
"create user %(user)s with password '%(passwd)s';",
"create database %(database)s owner %(user)s;",
]
} }
sqls = [
"drop database if exists %(database)s;",
"create database %(database)s;"
]
if driver == 'sqlite': if driver == 'sqlite':
return 'sqlite:////tmp/%s' % auth['database'] return 'sqlite:////tmp/%s' % auth['database']
elif driver in ['mysql', 'postgresql']:
try: sql_query = map(lambda x: x % auth, sqls)
sql_rows = sqls[driver] _execute_sql(engine, sql_query, driver)
except KeyError: else:
raise ValueError('Unsupported RDBMS %s' % driver) raise ValueError('Unsupported RDBMS %s' % driver)
sql_query = map(lambda x: x % auth, sql_rows)
_execute_sql(engine, sql_query, driver)
params = auth.copy() params = auth.copy()
params['backend'] = driver params['backend'] = driver
return "%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s" % params return "%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s" % params
def drop_database(engine, current_uri): def drop_database(admin_engine, current_uri):
"""Drop temporary database and user after each particular test.""" """Drop temporary database and user after each particular test."""
engine = _get_engine(current_uri)
admin_engine = _get_engine() engine = get_engine(current_uri)
driver = engine.name driver = engine.name
auth = {'database': engine.url.database, 'user': engine.url.username} auth = {'database': engine.url.database, 'user': engine.url.username}
@ -114,26 +97,11 @@ def drop_database(engine, current_uri):
os.remove(auth['database']) os.remove(auth['database'])
except OSError: except OSError:
pass pass
return elif driver in ['mysql', 'postgresql']:
sql = "drop database if exists %(database)s;"
sqls = { _execute_sql(admin_engine, [sql % auth], driver)
'mysql': [ else:
"drop database if exists %(database)s;",
"drop user '%(user)s'@'localhost';",
],
'postgresql': [
"drop database if exists %(database)s;",
"drop user if exists %(user)s;",
]
}
try:
sql_rows = sqls[driver]
except KeyError:
raise ValueError('Unsupported RDBMS %s' % driver) raise ValueError('Unsupported RDBMS %s' % driver)
sql_query = map(lambda x: x % auth, sql_rows)
_execute_sql(admin_engine, sql_query, driver)
def main(): def main():
@ -172,7 +140,9 @@ def main():
args = parser.parse_args() args = parser.parse_args()
engine = _get_engine() connection_string = os.getenv('OS_TEST_DBAPI_ADMIN_CONNECTION',
'sqlite://')
engine = get_engine(connection_string)
which = args.which which = args.which
if which == "create": if which == "create":

View File

@ -16,33 +16,24 @@
"""Session Handling for SQLAlchemy backend. """Session Handling for SQLAlchemy backend.
Initializing:
* Call set_defaults with the minimal of the following kwargs:
sql_connection, sqlite_db
Example::
session.set_defaults(
sql_connection="sqlite:///var/lib/sahara/sqlite.db",
sqlite_db="/var/lib/sahara/sqlite.db")
Recommended ways to use sessions within this framework: Recommended ways to use sessions within this framework:
* Don't use them explicitly; this is like running with AUTOCOMMIT=1. * Don't use them explicitly; this is like running with ``AUTOCOMMIT=1``.
model_query() will implicitly use a session when called without one `model_query()` will implicitly use a session when called without one
supplied. This is the ideal situation because it will allow queries supplied. This is the ideal situation because it will allow queries
to be automatically retried if the database connection is interrupted. to be automatically retried if the database connection is interrupted.
Note: Automatic retry will be enabled in a future patch. .. note:: Automatic retry will be enabled in a future patch.
It is generally fine to issue several queries in a row like this. Even though It is generally fine to issue several queries in a row like this. Even though
they may be run in separate transactions and/or separate sessions, each one they may be run in separate transactions and/or separate sessions, each one
will see the data from the prior calls. If needed, undo- or rollback-like will see the data from the prior calls. If needed, undo- or rollback-like
functionality should be handled at a logical level. For an example, look at functionality should be handled at a logical level. For an example, look at
the code around quotas and reservation_rollback(). the code around quotas and `reservation_rollback()`.
Examples:: Examples:
.. code:: python
def get_foo(context, foo): def get_foo(context, foo):
return (model_query(context, models.Foo). return (model_query(context, models.Foo).
@ -61,28 +52,29 @@ Recommended ways to use sessions within this framework:
return foo_ref return foo_ref
* Within the scope of a single method, keeping all the reads and writes within * Within the scope of a single method, keep all the reads and writes within
the context managed by a single session. In this way, the session's __exit__ the context managed by a single session. In this way, the session's
handler will take care of calling flush() and commit() for you. `__exit__` handler will take care of calling `flush()` and `commit()` for
If using this approach, you should not explicitly call flush() or commit(). you. If using this approach, you should not explicitly call `flush()` or
Any error within the context of the session will cause the session to emit `commit()`. Any error within the context of the session will cause the
a ROLLBACK. Database Errors like IntegrityError will be raised in session to emit a `ROLLBACK`. Database errors like `IntegrityError` will be
session's __exit__ handler, and any try/except within the context managed raised in `session`'s `__exit__` handler, and any try/except within the
by session will not be triggered. And catching other non-database errors in context managed by `session` will not be triggered. And catching other
the session will not trigger the ROLLBACK, so exception handlers should non-database errors in the session will not trigger the ROLLBACK, so
always be outside the session, unless the developer wants to do a partial exception handlers should always be outside the session, unless the
commit on purpose. If the connection is dropped before this is possible, developer wants to do a partial commit on purpose. If the connection is
the database will implicitly roll back the transaction. dropped before this is possible, the database will implicitly roll back the
transaction.
Note: statements in the session scope will not be automatically retried. .. note:: Statements in the session scope will not be automatically retried.
If you create models within the session, they need to be added, but you If you create models within the session, they need to be added, but you
do not need to call model.save() do not need to call `model.save()`:
:: .. code:: python
def create_many_foo(context, foos): def create_many_foo(context, foos):
session = get_session() session = sessionmaker()
with session.begin(): with session.begin():
for foo in foos: for foo in foos:
foo_ref = models.Foo() foo_ref = models.Foo()
@ -90,7 +82,7 @@ Recommended ways to use sessions within this framework:
session.add(foo_ref) session.add(foo_ref)
def update_bar(context, foo_id, newbar): def update_bar(context, foo_id, newbar):
session = get_session() session = sessionmaker()
with session.begin(): with session.begin():
foo_ref = (model_query(context, models.Foo, session). foo_ref = (model_query(context, models.Foo, session).
filter_by(id=foo_id). filter_by(id=foo_id).
@ -99,11 +91,16 @@ Recommended ways to use sessions within this framework:
filter_by(id=foo_ref['bar_id']). filter_by(id=foo_ref['bar_id']).
update({'bar': newbar})) update({'bar': newbar}))
Note: update_bar is a trivially simple example of using "with session.begin". .. note:: `update_bar` is a trivially simple example of using
Whereas create_many_foo is a good example of when a transaction is needed, ``with session.begin``. Whereas `create_many_foo` is a good example of
it is always best to use as few queries as possible. The two queries in when a transaction is needed, it is always best to use as few queries as
update_bar can be better expressed using a single query which avoids possible.
the need for an explicit transaction. It can be expressed like so::
The two queries in `update_bar` can be better expressed using a single query
which avoids the need for an explicit transaction. It can be expressed like
so:
.. code:: python
def update_bar(context, foo_id, newbar): def update_bar(context, foo_id, newbar):
subq = (model_query(context, models.Foo.id). subq = (model_query(context, models.Foo.id).
@ -114,21 +111,25 @@ Recommended ways to use sessions within this framework:
filter_by(id=subq.as_scalar()). filter_by(id=subq.as_scalar()).
update({'bar': newbar})) update({'bar': newbar}))
For reference, this emits approximately the following SQL statement:: For reference, this emits approximately the following SQL statement:
.. code:: sql
UPDATE bar SET bar = ${newbar} UPDATE bar SET bar = ${newbar}
WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1); WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1);
Note: create_duplicate_foo is a trivially simple example of catching an .. note:: `create_duplicate_foo` is a trivially simple example of catching an
exception while using "with session.begin". Here create two duplicate exception while using ``with session.begin``. Here create two duplicate
instances with same primary key, must catch the exception out of context instances with same primary key, must catch the exception out of context
managed by a single session: managed by a single session:
.. code:: python
def create_duplicate_foo(context): def create_duplicate_foo(context):
foo1 = models.Foo() foo1 = models.Foo()
foo2 = models.Foo() foo2 = models.Foo()
foo1.id = foo2.id = 1 foo1.id = foo2.id = 1
session = get_session() session = sessionmaker()
try: try:
with session.begin(): with session.begin():
session.add(foo1) session.add(foo1)
@ -138,7 +139,7 @@ Recommended ways to use sessions within this framework:
* Passing an active session between methods. Sessions should only be passed * Passing an active session between methods. Sessions should only be passed
to private methods. The private method must use a subtransaction; otherwise to private methods. The private method must use a subtransaction; otherwise
SQLAlchemy will throw an error when you call session.begin() on an existing SQLAlchemy will throw an error when you call `session.begin()` on an existing
transaction. Public methods should not accept a session parameter and should transaction. Public methods should not accept a session parameter and should
not be involved in sessions within the caller's scope. not be involved in sessions within the caller's scope.
@ -151,10 +152,10 @@ Recommended ways to use sessions within this framework:
becomes less clear in this situation. When this is needed for code clarity, becomes less clear in this situation. When this is needed for code clarity,
it should be clearly documented. it should be clearly documented.
:: .. code:: python
def myfunc(foo): def myfunc(foo):
session = get_session() session = sessionmaker()
with session.begin(): with session.begin():
# do some database things # do some database things
bar = _private_func(foo, session) bar = _private_func(foo, session)
@ -162,7 +163,7 @@ Recommended ways to use sessions within this framework:
def _private_func(foo, session=None): def _private_func(foo, session=None):
if not session: if not session:
session = get_session() session = sessionmaker()
with session.begin(subtransaction=True): with session.begin(subtransaction=True):
# do some other database things # do some other database things
return bar return bar
@ -172,13 +173,13 @@ There are some things which it is best to avoid:
* Don't keep a transaction open any longer than necessary. * Don't keep a transaction open any longer than necessary.
This means that your "with session.begin()" block should be as short This means that your ``with session.begin()`` block should be as short
as possible, while still containing all the related calls for that as possible, while still containing all the related calls for that
transaction. transaction.
* Avoid "with_lockmode('UPDATE')" when possible. * Avoid ``with_lockmode('UPDATE')`` when possible.
In MySQL/InnoDB, when a "SELECT ... FOR UPDATE" query does not match In MySQL/InnoDB, when a ``SELECT ... FOR UPDATE`` query does not match
any rows, it will take a gap-lock. This is a form of write-lock on the any rows, it will take a gap-lock. This is a form of write-lock on the
"gap" where no rows exist, and prevents any other writes to that space. "gap" where no rows exist, and prevents any other writes to that space.
This can effectively prevent any INSERT into a table by locking the gap This can effectively prevent any INSERT into a table by locking the gap
@ -189,15 +190,18 @@ There are some things which it is best to avoid:
number of rows matching a query, and if only one row is returned, number of rows matching a query, and if only one row is returned,
then issue the SELECT FOR UPDATE. then issue the SELECT FOR UPDATE.
The better long-term solution is to use INSERT .. ON DUPLICATE KEY UPDATE. The better long-term solution is to use
``INSERT .. ON DUPLICATE KEY UPDATE``.
However, this can not be done until the "deleted" columns are removed and However, this can not be done until the "deleted" columns are removed and
proper UNIQUE constraints are added to the tables. proper UNIQUE constraints are added to the tables.
Enabling soft deletes: Enabling soft deletes:
* To use/enable soft-deletes, the SoftDeleteMixin must be added * To use/enable soft-deletes, the `SoftDeleteMixin` must be added
to your model class. For example:: to your model class. For example:
.. code:: python
class NovaBase(models.SoftDeleteMixin, models.ModelBase): class NovaBase(models.SoftDeleteMixin, models.ModelBase):
pass pass
@ -205,15 +209,16 @@ Enabling soft deletes:
Efficient use of soft deletes: Efficient use of soft deletes:
* There are two possible ways to mark a record as deleted:: * There are two possible ways to mark a record as deleted:
`model.soft_delete()` and `query.soft_delete()`.
model.soft_delete() and query.soft_delete(). The `model.soft_delete()` method works with a single already-fetched entry.
`query.soft_delete()` makes only one db request for all entries that
correspond to the query.
model.soft_delete() method works with single already fetched entry. * In almost all cases you should use `query.soft_delete()`. Some examples:
query.soft_delete() makes only one db request for all entries that correspond
to query.
* In almost all cases you should use query.soft_delete(). Some examples:: .. code:: python
def soft_delete_bar(): def soft_delete_bar():
count = model_query(BarModel).find(some_condition).soft_delete() count = model_query(BarModel).find(some_condition).soft_delete()
@ -222,7 +227,7 @@ Efficient use of soft deletes:
def complex_soft_delete_with_synchronization_bar(session=None): def complex_soft_delete_with_synchronization_bar(session=None):
if session is None: if session is None:
session = get_session() session = sessionmaker()
with session.begin(subtransactions=True): with session.begin(subtransactions=True):
count = (model_query(BarModel). count = (model_query(BarModel).
find(some_condition). find(some_condition).
@ -232,24 +237,26 @@ Efficient use of soft deletes:
if count == 0: if count == 0:
raise Exception("0 entries were soft deleted") raise Exception("0 entries were soft deleted")
* There is only one situation where model.soft_delete() is appropriate: when * There is only one situation where `model.soft_delete()` is appropriate: when
you fetch a single record, work with it, and mark it as deleted in the same you fetch a single record, work with it, and mark it as deleted in the same
transaction. transaction.
:: .. code:: python
def soft_delete_bar_model(): def soft_delete_bar_model():
session = get_session() session = sessionmaker()
with session.begin(): with session.begin():
bar_ref = model_query(BarModel).find(some_condition).first() bar_ref = model_query(BarModel).find(some_condition).first()
# Work with bar_ref # Work with bar_ref
bar_ref.soft_delete(session=session) bar_ref.soft_delete(session=session)
However, if you need to work with all entries that correspond to query and However, if you need to work with all entries that correspond to query and
then soft delete them you should use query.soft_delete() method:: then soft delete them you should use the `query.soft_delete()` method:
.. code:: python
def soft_delete_multi_models(): def soft_delete_multi_models():
session = get_session() session = sessionmaker()
with session.begin(): with session.begin():
query = (model_query(BarModel, session=session). query = (model_query(BarModel, session=session).
find(some_condition)) find(some_condition))
@ -260,22 +267,22 @@ Efficient use of soft deletes:
# session and these entries are not used after this. # session and these entries are not used after this.
When working with many rows, it is very important to use query.soft_delete, When working with many rows, it is very important to use query.soft_delete,
which issues a single query. Using model.soft_delete(), as in the following which issues a single query. Using `model.soft_delete()`, as in the following
example, is very inefficient. example, is very inefficient.
:: .. code:: python
for bar_ref in bar_refs: for bar_ref in bar_refs:
bar_ref.soft_delete(session=session) bar_ref.soft_delete(session=session)
# This will produce count(bar_refs) db requests. # This will produce count(bar_refs) db requests.
""" """
import functools import functools
import os.path import logging
import re import re
import time import time
from oslo.config import cfg
import six import six
from sqlalchemy import exc as sqla_exc from sqlalchemy import exc as sqla_exc
from sqlalchemy.interfaces import PoolListener from sqlalchemy.interfaces import PoolListener
@ -284,151 +291,12 @@ from sqlalchemy.pool import NullPool, StaticPool
from sqlalchemy.sql.expression import literal_column from sqlalchemy.sql.expression import literal_column
from sahara.openstack.common.db import exception from sahara.openstack.common.db import exception
from sahara.openstack.common.gettextutils import _ from sahara.openstack.common.gettextutils import _LE, _LW
from sahara.openstack.common import log as logging
from sahara.openstack.common import timeutils from sahara.openstack.common import timeutils
sqlite_db_opts = [
cfg.StrOpt('sqlite_db',
default='sahara.sqlite',
help='The file name to use with SQLite'),
cfg.BoolOpt('sqlite_synchronous',
default=True,
help='If True, SQLite uses synchronous mode'),
]
database_opts = [
cfg.StrOpt('connection',
default='sqlite:///' +
os.path.abspath(os.path.join(os.path.dirname(__file__),
'../', '$sqlite_db')),
help='The SQLAlchemy connection string used to connect to the '
'database',
secret=True,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_connection',
group='DATABASE'),
cfg.DeprecatedOpt('connection',
group='sql'), ]),
cfg.StrOpt('slave_connection',
default='',
secret=True,
help='The SQLAlchemy connection string used to connect to the '
'slave database'),
cfg.IntOpt('idle_timeout',
default=3600,
deprecated_opts=[cfg.DeprecatedOpt('sql_idle_timeout',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_idle_timeout',
group='DATABASE'),
cfg.DeprecatedOpt('idle_timeout',
group='sql')],
help='Timeout before idle sql connections are reaped'),
cfg.IntOpt('min_pool_size',
default=1,
deprecated_opts=[cfg.DeprecatedOpt('sql_min_pool_size',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_min_pool_size',
group='DATABASE')],
help='Minimum number of SQL connections to keep open in a '
'pool'),
cfg.IntOpt('max_pool_size',
default=None,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_pool_size',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_max_pool_size',
group='DATABASE')],
help='Maximum number of SQL connections to keep open in a '
'pool'),
cfg.IntOpt('max_retries',
default=10,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_retries',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_max_retries',
group='DATABASE')],
help='Maximum db connection retries during startup. '
'(setting -1 implies an infinite retry count)'),
cfg.IntOpt('retry_interval',
default=10,
deprecated_opts=[cfg.DeprecatedOpt('sql_retry_interval',
group='DEFAULT'),
cfg.DeprecatedOpt('reconnect_interval',
group='DATABASE')],
help='Interval between retries of opening a sql connection'),
cfg.IntOpt('max_overflow',
default=None,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_overflow',
group='DEFAULT'),
cfg.DeprecatedOpt('sqlalchemy_max_overflow',
group='DATABASE')],
help='If set, use this value for max_overflow with sqlalchemy'),
cfg.IntOpt('connection_debug',
default=0,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_debug',
group='DEFAULT')],
help='Verbosity of SQL debugging information. 0=None, '
'100=Everything'),
cfg.BoolOpt('connection_trace',
default=False,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_trace',
group='DEFAULT')],
help='Add python stack traces to SQL as comment strings'),
cfg.IntOpt('pool_timeout',
default=None,
deprecated_opts=[cfg.DeprecatedOpt('sqlalchemy_pool_timeout',
group='DATABASE')],
help='If set, use this value for pool_timeout with sqlalchemy'),
]
CONF = cfg.CONF
CONF.register_opts(sqlite_db_opts)
CONF.register_opts(database_opts, 'database')
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
_ENGINE = None
_MAKER = None
_SLAVE_ENGINE = None
_SLAVE_MAKER = None
def set_defaults(sql_connection, sqlite_db, max_pool_size=None,
max_overflow=None, pool_timeout=None):
"""Set defaults for configuration variables."""
cfg.set_defaults(database_opts,
connection=sql_connection)
cfg.set_defaults(sqlite_db_opts,
sqlite_db=sqlite_db)
# Update the QueuePool defaults
if max_pool_size is not None:
cfg.set_defaults(database_opts,
max_pool_size=max_pool_size)
if max_overflow is not None:
cfg.set_defaults(database_opts,
max_overflow=max_overflow)
if pool_timeout is not None:
cfg.set_defaults(database_opts,
pool_timeout=pool_timeout)
def cleanup():
global _ENGINE, _MAKER
global _SLAVE_ENGINE, _SLAVE_MAKER
if _MAKER:
_MAKER.close_all()
_MAKER = None
if _ENGINE:
_ENGINE.dispose()
_ENGINE = None
if _SLAVE_MAKER:
_SLAVE_MAKER.close_all()
_SLAVE_MAKER = None
if _SLAVE_ENGINE:
_SLAVE_ENGINE.dispose()
_SLAVE_ENGINE = None
class SqliteForeignKeysListener(PoolListener): class SqliteForeignKeysListener(PoolListener):
"""Ensures that the foreign key constraints are enforced in SQLite. """Ensures that the foreign key constraints are enforced in SQLite.
@ -441,30 +309,6 @@ class SqliteForeignKeysListener(PoolListener):
dbapi_con.execute('pragma foreign_keys=ON') dbapi_con.execute('pragma foreign_keys=ON')
def get_session(autocommit=True, expire_on_commit=False, sqlite_fk=False,
slave_session=False, mysql_traditional_mode=False):
"""Return a SQLAlchemy session."""
global _MAKER
global _SLAVE_MAKER
maker = _MAKER
if slave_session:
maker = _SLAVE_MAKER
if maker is None:
engine = get_engine(sqlite_fk=sqlite_fk, slave_engine=slave_session,
mysql_traditional_mode=mysql_traditional_mode)
maker = get_maker(engine, autocommit, expire_on_commit)
if slave_session:
_SLAVE_MAKER = maker
else:
_MAKER = maker
session = maker()
return session
# note(boris-42): In current versions of DB backends unique constraint # note(boris-42): In current versions of DB backends unique constraint
# violation messages follow the structure: # violation messages follow the structure:
# #
@ -473,9 +317,9 @@ def get_session(autocommit=True, expire_on_commit=False, sqlite_fk=False,
# N columns - (IntegrityError) column c1, c2, ..., N are not unique # N columns - (IntegrityError) column c1, c2, ..., N are not unique
# #
# sqlite since 3.7.16: # sqlite since 3.7.16:
# 1 column - (IntegrityError) UNIQUE constraint failed: k1 # 1 column - (IntegrityError) UNIQUE constraint failed: tbl.k1
# #
# N columns - (IntegrityError) UNIQUE constraint failed: k1, k2 # N columns - (IntegrityError) UNIQUE constraint failed: tbl.k1, tbl.k2
# #
# postgres: # postgres:
# 1 column - (IntegrityError) duplicate key value violates unique # 1 column - (IntegrityError) duplicate key value violates unique
@ -488,11 +332,20 @@ def get_session(autocommit=True, expire_on_commit=False, sqlite_fk=False,
# 'c1'") # 'c1'")
# N columns - (IntegrityError) (1062, "Duplicate entry 'values joined # N columns - (IntegrityError) (1062, "Duplicate entry 'values joined
# with -' for key 'name_of_our_constraint'") # with -' for key 'name_of_our_constraint'")
#
# ibm_db_sa:
# N columns - (IntegrityError) SQL0803N One or more values in the INSERT
# statement, UPDATE statement, or foreign key update caused by a
# DELETE statement are not valid because the primary key, unique
# constraint or unique index identified by "2" constrains table
# "NOVA.KEY_PAIRS" from having duplicate values for the index
# key.
_DUP_KEY_RE_DB = { _DUP_KEY_RE_DB = {
"sqlite": (re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"), "sqlite": (re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"),
re.compile(r"^.*UNIQUE\s+constraint\s+failed:\s+(.+)$")), re.compile(r"^.*UNIQUE\s+constraint\s+failed:\s+(.+)$")),
"postgresql": (re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"),), "postgresql": (re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"),),
"mysql": (re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$"),) "mysql": (re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$"),),
"ibm_db_sa": (re.compile(r"^.*SQL0803N.*$"),),
} }
@ -514,7 +367,7 @@ def _raise_if_duplicate_entry_error(integrity_error, engine_name):
return [columns] return [columns]
return columns[len(uniqbase):].split("0")[1:] return columns[len(uniqbase):].split("0")[1:]
if engine_name not in ["mysql", "sqlite", "postgresql"]: if engine_name not in ("ibm_db_sa", "mysql", "sqlite", "postgresql"):
return return
# FIXME(johannes): The usage of the .message attribute has been # FIXME(johannes): The usage of the .message attribute has been
@ -529,10 +382,15 @@ def _raise_if_duplicate_entry_error(integrity_error, engine_name):
else: else:
return return
columns = match.group(1) # NOTE(mriedem): The ibm_db_sa integrity error message doesn't provide the
# columns so we have to omit that from the DBDuplicateEntry error.
columns = ''
if engine_name != 'ibm_db_sa':
columns = match.group(1)
if engine_name == "sqlite": if engine_name == "sqlite":
columns = columns.strip().split(", ") columns = [c.split('.')[-1] for c in columns.strip().split(", ")]
else: else:
columns = get_columns_from_uniq_cons_or_name(columns) columns = get_columns_from_uniq_cons_or_name(columns)
raise exception.DBDuplicateEntry(columns, integrity_error) raise exception.DBDuplicateEntry(columns, integrity_error)
@ -571,56 +429,39 @@ def _raise_if_deadlock_error(operational_error, engine_name):
def _wrap_db_error(f): def _wrap_db_error(f):
@functools.wraps(f) @functools.wraps(f)
def _wrap(*args, **kwargs): def _wrap(self, *args, **kwargs):
try: try:
return f(*args, **kwargs) assert issubclass(
self.__class__, sqlalchemy.orm.session.Session
), ('_wrap_db_error() can only be applied to methods of '
'subclasses of sqlalchemy.orm.session.Session.')
return f(self, *args, **kwargs)
except UnicodeEncodeError: except UnicodeEncodeError:
raise exception.DBInvalidUnicodeParameter() raise exception.DBInvalidUnicodeParameter()
# note(boris-42): We should catch unique constraint violation and
# wrap it by our own DBDuplicateEntry exception. Unique constraint
# violation is wrapped by IntegrityError.
except sqla_exc.OperationalError as e: except sqla_exc.OperationalError as e:
_raise_if_deadlock_error(e, get_engine().name) _raise_if_db_connection_lost(e, self.bind)
_raise_if_deadlock_error(e, self.bind.dialect.name)
# NOTE(comstud): A lot of code is checking for OperationalError # NOTE(comstud): A lot of code is checking for OperationalError
# so let's not wrap it for now. # so let's not wrap it for now.
raise raise
# note(boris-42): We should catch unique constraint violation and
# wrap it by our own DBDuplicateEntry exception. Unique constraint
# violation is wrapped by IntegrityError.
except sqla_exc.IntegrityError as e: except sqla_exc.IntegrityError as e:
# note(boris-42): SqlAlchemy doesn't unify errors from different # note(boris-42): SqlAlchemy doesn't unify errors from different
# DBs so we must do this. Also in some tables (for example # DBs so we must do this. Also in some tables (for example
# instance_types) there are more than one unique constraint. This # instance_types) there are more than one unique constraint. This
# means we should get names of columns, which values violate # means we should get names of columns, which values violate
# unique constraint, from error message. # unique constraint, from error message.
_raise_if_duplicate_entry_error(e, get_engine().name) _raise_if_duplicate_entry_error(e, self.bind.dialect.name)
raise exception.DBError(e) raise exception.DBError(e)
except Exception as e: except Exception as e:
LOG.exception(_('DB exception wrapped.')) LOG.exception(_LE('DB exception wrapped.'))
raise exception.DBError(e) raise exception.DBError(e)
return _wrap return _wrap
def get_engine(sqlite_fk=False, slave_engine=False,
mysql_traditional_mode=False):
"""Return a SQLAlchemy engine."""
global _ENGINE
global _SLAVE_ENGINE
engine = _ENGINE
db_uri = CONF.database.connection
if slave_engine:
engine = _SLAVE_ENGINE
db_uri = CONF.database.slave_connection
if engine is None:
engine = create_engine(db_uri, sqlite_fk=sqlite_fk,
mysql_traditional_mode=mysql_traditional_mode)
if slave_engine:
_SLAVE_ENGINE = engine
else:
_ENGINE = engine
return engine
def _synchronous_switch_listener(dbapi_conn, connection_rec): def _synchronous_switch_listener(dbapi_conn, connection_rec):
"""Switch sqlite connections to non-synchronous mode.""" """Switch sqlite connections to non-synchronous mode."""
dbapi_conn.execute("PRAGMA synchronous = OFF") dbapi_conn.execute("PRAGMA synchronous = OFF")
@ -648,7 +489,7 @@ def _thread_yield(dbapi_con, con_record):
def _ping_listener(engine, dbapi_conn, connection_rec, connection_proxy): def _ping_listener(engine, dbapi_conn, connection_rec, connection_proxy):
"""Ensures that MySQL and DB2 connections are alive. """Ensures that MySQL, PostgreSQL or DB2 connections are alive.
Borrowed from: Borrowed from:
http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f
@ -662,22 +503,78 @@ def _ping_listener(engine, dbapi_conn, connection_rec, connection_proxy):
cursor.execute(ping_sql) cursor.execute(ping_sql)
except Exception as ex: except Exception as ex:
if engine.dialect.is_disconnect(ex, dbapi_conn, cursor): if engine.dialect.is_disconnect(ex, dbapi_conn, cursor):
msg = _('Database server has gone away: %s') % ex msg = _LW('Database server has gone away: %s') % ex
LOG.warning(msg) LOG.warning(msg)
# if the database server has gone away, all connections in the pool
# have become invalid and we can safely close all of them here,
# rather than waste time on checking of every single connection
engine.dispose()
# this will be handled by SQLAlchemy and will force it to create
# a new connection and retry the original action
raise sqla_exc.DisconnectionError(msg) raise sqla_exc.DisconnectionError(msg)
else: else:
raise raise
def _set_mode_traditional(dbapi_con, connection_rec, connection_proxy): def _set_session_sql_mode(dbapi_con, connection_rec, sql_mode=None):
"""Set engine mode to 'traditional'. """Set the sql_mode session variable.
Required to prevent silent truncates at insert or update operations MySQL supports several server modes. The default is None, but sessions
under MySQL. By default MySQL truncates inserted string if it longer may choose to enable server modes like TRADITIONAL, ANSI,
than a declared field just with warning. That is fraught with data several STRICT_* modes and others.
corruption.
Note: passing in '' (empty string) for sql_mode clears
the SQL mode for the session, overriding a potentially set
server default.
""" """
dbapi_con.cursor().execute("SET SESSION sql_mode = TRADITIONAL;")
cursor = dbapi_con.cursor()
cursor.execute("SET SESSION sql_mode = %s", [sql_mode])
def _mysql_get_effective_sql_mode(engine):
"""Returns the effective SQL mode for connections from the engine pool.
Returns ``None`` if the mode isn't available, otherwise returns the mode.
"""
# Get the real effective SQL mode. Even when unset by
# our own config, the server may still be operating in a specific
# SQL mode as set by the server configuration.
# Also note that the checkout listener will be called on execute to
# set the mode if it's registered.
row = engine.execute("SHOW VARIABLES LIKE 'sql_mode'").fetchone()
if row is None:
return
return row[1]
def _mysql_check_effective_sql_mode(engine):
"""Logs a message based on the effective SQL mode for MySQL connections."""
realmode = _mysql_get_effective_sql_mode(engine)
if realmode is None:
LOG.warning(_LW('Unable to detect effective SQL mode'))
return
LOG.debug('MySQL server mode set to %s', realmode)
# 'TRADITIONAL' mode enables several other modes, so
# we need a substring match here
if not ('TRADITIONAL' in realmode.upper() or
'STRICT_ALL_TABLES' in realmode.upper()):
LOG.warning(_LW("MySQL SQL mode is '%s', "
"consider enabling TRADITIONAL or STRICT_ALL_TABLES"),
realmode)
def _mysql_set_mode_callback(engine, sql_mode):
if sql_mode is not None:
mode_callback = functools.partial(_set_session_sql_mode,
sql_mode=sql_mode)
sqlalchemy.event.listen(engine, 'connect', mode_callback)
_mysql_check_effective_sql_mode(engine)
def _is_db_connection_error(args): def _is_db_connection_error(args):
@ -692,66 +589,75 @@ def _is_db_connection_error(args):
return False return False
def create_engine(sql_connection, sqlite_fk=False, def _raise_if_db_connection_lost(error, engine):
mysql_traditional_mode=False): # NOTE(vsergeyev): Function is_disconnect(e, connection, cursor)
# requires connection and cursor in incoming parameters,
# but we have no possibility to create connection if DB
# is not available, so in such case reconnect fails.
# But is_disconnect() ignores these parameters, so it
# makes sense to pass to function None as placeholder
# instead of connection and cursor.
if engine.dialect.is_disconnect(error, None, None):
raise exception.DBConnectionError(error)
def create_engine(sql_connection, sqlite_fk=False, mysql_sql_mode=None,
idle_timeout=3600,
connection_debug=0, max_pool_size=None, max_overflow=None,
pool_timeout=None, sqlite_synchronous=True,
connection_trace=False, max_retries=10, retry_interval=10):
"""Return a new SQLAlchemy engine.""" """Return a new SQLAlchemy engine."""
# NOTE(geekinutah): At this point we could be connecting to the normal
# db handle or the slave db handle. Things like
# _wrap_db_error aren't going to work well if their
# backends don't match. Let's check.
_assert_matching_drivers()
connection_dict = sqlalchemy.engine.url.make_url(sql_connection) connection_dict = sqlalchemy.engine.url.make_url(sql_connection)
engine_args = { engine_args = {
"pool_recycle": CONF.database.idle_timeout, "pool_recycle": idle_timeout,
"echo": False,
'convert_unicode': True, 'convert_unicode': True,
} }
# Map our SQL debug level to SQLAlchemy's options logger = logging.getLogger('sqlalchemy.engine')
if CONF.database.connection_debug >= 100:
engine_args['echo'] = 'debug' # Map SQL debug level to Python log level
elif CONF.database.connection_debug >= 50: if connection_debug >= 100:
engine_args['echo'] = True logger.setLevel(logging.DEBUG)
elif connection_debug >= 50:
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.WARNING)
if "sqlite" in connection_dict.drivername: if "sqlite" in connection_dict.drivername:
if sqlite_fk: if sqlite_fk:
engine_args["listeners"] = [SqliteForeignKeysListener()] engine_args["listeners"] = [SqliteForeignKeysListener()]
engine_args["poolclass"] = NullPool engine_args["poolclass"] = NullPool
if CONF.database.connection == "sqlite://": if sql_connection == "sqlite://":
engine_args["poolclass"] = StaticPool engine_args["poolclass"] = StaticPool
engine_args["connect_args"] = {'check_same_thread': False} engine_args["connect_args"] = {'check_same_thread': False}
else: else:
if CONF.database.max_pool_size is not None: if max_pool_size is not None:
engine_args['pool_size'] = CONF.database.max_pool_size engine_args['pool_size'] = max_pool_size
if CONF.database.max_overflow is not None: if max_overflow is not None:
engine_args['max_overflow'] = CONF.database.max_overflow engine_args['max_overflow'] = max_overflow
if CONF.database.pool_timeout is not None: if pool_timeout is not None:
engine_args['pool_timeout'] = CONF.database.pool_timeout engine_args['pool_timeout'] = pool_timeout
engine = sqlalchemy.create_engine(sql_connection, **engine_args) engine = sqlalchemy.create_engine(sql_connection, **engine_args)
sqlalchemy.event.listen(engine, 'checkin', _thread_yield) sqlalchemy.event.listen(engine, 'checkin', _thread_yield)
if engine.name in ['mysql', 'ibm_db_sa']: if engine.name in ('ibm_db_sa', 'mysql', 'postgresql'):
callback = functools.partial(_ping_listener, engine) ping_callback = functools.partial(_ping_listener, engine)
sqlalchemy.event.listen(engine, 'checkout', callback) sqlalchemy.event.listen(engine, 'checkout', ping_callback)
if mysql_traditional_mode: if engine.name == 'mysql':
sqlalchemy.event.listen(engine, 'checkout', _set_mode_traditional) if mysql_sql_mode:
else: _mysql_set_mode_callback(engine, mysql_sql_mode)
LOG.warning(_("This application has not enabled MySQL traditional"
" mode, which means silent data corruption may"
" occur. Please encourage the application"
" developers to enable this mode."))
elif 'sqlite' in connection_dict.drivername: elif 'sqlite' in connection_dict.drivername:
if not CONF.sqlite_synchronous: if not sqlite_synchronous:
sqlalchemy.event.listen(engine, 'connect', sqlalchemy.event.listen(engine, 'connect',
_synchronous_switch_listener) _synchronous_switch_listener)
sqlalchemy.event.listen(engine, 'connect', _add_regexp_listener) sqlalchemy.event.listen(engine, 'connect', _add_regexp_listener)
if (CONF.database.connection_trace and if connection_trace and engine.dialect.dbapi.__name__ == 'MySQLdb':
engine.dialect.dbapi.__name__ == 'MySQLdb'):
_patch_mysqldb_with_stacktrace_comments() _patch_mysqldb_with_stacktrace_comments()
try: try:
@ -760,15 +666,15 @@ def create_engine(sql_connection, sqlite_fk=False,
if not _is_db_connection_error(e.args[0]): if not _is_db_connection_error(e.args[0]):
raise raise
remaining = CONF.database.max_retries remaining = max_retries
if remaining == -1: if remaining == -1:
remaining = 'infinite' remaining = 'infinite'
while True: while True:
msg = _('SQL connection failed. %s attempts left.') msg = _LW('SQL connection failed. %s attempts left.')
LOG.warning(msg % remaining) LOG.warning(msg % remaining)
if remaining != 'infinite': if remaining != 'infinite':
remaining -= 1 remaining -= 1
time.sleep(CONF.database.retry_interval) time.sleep(retry_interval)
try: try:
engine.connect() engine.connect()
break break
@ -855,13 +761,144 @@ def _patch_mysqldb_with_stacktrace_comments():
setattr(MySQLdb.cursors.BaseCursor, '_do_query', _do_query) setattr(MySQLdb.cursors.BaseCursor, '_do_query', _do_query)
def _assert_matching_drivers(): class EngineFacade(object):
"""Make sure slave handle and normal handle have the same driver.""" """A helper class for removing of global engine instances from sahara.db.
# NOTE(geekinutah): There's no use case for writing to one backend and
# reading from another. Who knows what the future holds?
if CONF.database.slave_connection == '':
return
normal = sqlalchemy.engine.url.make_url(CONF.database.connection) As a library, sahara.db can't decide where to store/when to create engine
slave = sqlalchemy.engine.url.make_url(CONF.database.slave_connection) and sessionmaker instances, so this must be left for a target application.
assert normal.drivername == slave.drivername
On the other hand, in order to simplify the adoption of sahara.db changes,
we'll provide a helper class, which creates engine and sessionmaker
on its instantiation and provides get_engine()/get_session() methods
that are compatible with corresponding utility functions that currently
exist in target projects, e.g. in Nova.
engine/sessionmaker instances will still be global (and they are meant to
be global), but they will be stored in the app context, rather that in the
sahara.db context.
Note: using of this helper is completely optional and you are encouraged to
integrate engine/sessionmaker instances into your apps any way you like
(e.g. one might want to bind a session to a request context). Two important
things to remember:
1. An Engine instance is effectively a pool of DB connections, so it's
meant to be shared (and it's thread-safe).
2. A Session instance is not meant to be shared and represents a DB
transactional context (i.e. it's not thread-safe). sessionmaker is
a factory of sessions.
"""
def __init__(self, sql_connection,
sqlite_fk=False, autocommit=True,
expire_on_commit=False, **kwargs):
"""Initialize engine and sessionmaker instances.
:param sqlite_fk: enable foreign keys in SQLite
:type sqlite_fk: bool
:param autocommit: use autocommit mode for created Session instances
:type autocommit: bool
:param expire_on_commit: expire session objects on commit
:type expire_on_commit: bool
Keyword arguments:
:keyword mysql_sql_mode: the SQL mode to be used for MySQL sessions.
(defaults to TRADITIONAL)
:keyword idle_timeout: timeout before idle sql connections are reaped
(defaults to 3600)
:keyword connection_debug: verbosity of SQL debugging information.
0=None, 100=Everything (defaults to 0)
:keyword max_pool_size: maximum number of SQL connections to keep open
in a pool (defaults to SQLAlchemy settings)
:keyword max_overflow: if set, use this value for max_overflow with
sqlalchemy (defaults to SQLAlchemy settings)
:keyword pool_timeout: if set, use this value for pool_timeout with
sqlalchemy (defaults to SQLAlchemy settings)
:keyword sqlite_synchronous: if True, SQLite uses synchronous mode
(defaults to True)
:keyword connection_trace: add python stack traces to SQL as comment
strings (defaults to False)
:keyword max_retries: maximum db connection retries during startup.
(setting -1 implies an infinite retry count)
(defaults to 10)
:keyword retry_interval: interval between retries of opening a sql
connection (defaults to 10)
"""
super(EngineFacade, self).__init__()
self._engine = create_engine(
sql_connection=sql_connection,
sqlite_fk=sqlite_fk,
mysql_sql_mode=kwargs.get('mysql_sql_mode', 'TRADITIONAL'),
idle_timeout=kwargs.get('idle_timeout', 3600),
connection_debug=kwargs.get('connection_debug', 0),
max_pool_size=kwargs.get('max_pool_size'),
max_overflow=kwargs.get('max_overflow'),
pool_timeout=kwargs.get('pool_timeout'),
sqlite_synchronous=kwargs.get('sqlite_synchronous', True),
connection_trace=kwargs.get('connection_trace', False),
max_retries=kwargs.get('max_retries', 10),
retry_interval=kwargs.get('retry_interval', 10))
self._session_maker = get_maker(
engine=self._engine,
autocommit=autocommit,
expire_on_commit=expire_on_commit)
def get_engine(self):
"""Get the engine instance (note, that it's shared)."""
return self._engine
def get_session(self, **kwargs):
"""Get a Session instance.
If passed, keyword arguments values override the ones used when the
sessionmaker instance was created.
:keyword autocommit: use autocommit mode for created Session instances
:type autocommit: bool
:keyword expire_on_commit: expire session objects on commit
:type expire_on_commit: bool
"""
for arg in kwargs:
if arg not in ('autocommit', 'expire_on_commit'):
del kwargs[arg]
return self._session_maker(**kwargs)
@classmethod
def from_config(cls, connection_string, conf,
sqlite_fk=False, autocommit=True, expire_on_commit=False):
"""Initialize EngineFacade using oslo.config config instance options.
:param connection_string: SQLAlchemy connection string
:type connection_string: string
:param conf: oslo.config config instance
:type conf: oslo.config.cfg.ConfigOpts
:param sqlite_fk: enable foreign keys in SQLite
:type sqlite_fk: bool
:param autocommit: use autocommit mode for created Session instances
:type autocommit: bool
:param expire_on_commit: expire session objects on commit
:type expire_on_commit: bool
"""
return cls(sql_connection=connection_string,
sqlite_fk=sqlite_fk,
autocommit=autocommit,
expire_on_commit=expire_on_commit,
**dict(conf.database.items()))

View File

@ -0,0 +1,165 @@
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import functools
import os
import fixtures
from oslotest import base as test_base
import six
from sahara.openstack.common.db.sqlalchemy import provision
from sahara.openstack.common.db.sqlalchemy import session
from sahara.openstack.common.db.sqlalchemy import utils
class DbFixture(fixtures.Fixture):
"""Basic database fixture.
Allows to run tests on various db backends, such as SQLite, MySQL and
PostgreSQL. By default use sqlite backend. To override default backend
uri set env variable OS_TEST_DBAPI_CONNECTION with database admin
credentials for specific backend.
"""
def _get_uri(self):
return os.getenv('OS_TEST_DBAPI_CONNECTION', 'sqlite://')
def __init__(self, test):
super(DbFixture, self).__init__()
self.test = test
def cleanUp(self):
self.test.engine.dispose()
def setUp(self):
super(DbFixture, self).setUp()
self.test.engine = session.create_engine(self._get_uri())
self.test.sessionmaker = session.get_maker(self.test.engine)
class DbTestCase(test_base.BaseTestCase):
"""Base class for testing of DB code.
Using `DbFixture`. Intended to be the main database test case to use all
the tests on a given backend with user defined uri. Backend specific
tests should be decorated with `backend_specific` decorator.
"""
FIXTURE = DbFixture
def setUp(self):
super(DbTestCase, self).setUp()
self.useFixture(self.FIXTURE(self))
ALLOWED_DIALECTS = ['sqlite', 'mysql', 'postgresql']
def backend_specific(*dialects):
"""Decorator to skip backend specific tests on inappropriate engines.
::dialects: list of dialects names under which the test will be launched.
"""
def wrap(f):
@functools.wraps(f)
def ins_wrap(self):
if not set(dialects).issubset(ALLOWED_DIALECTS):
raise ValueError(
"Please use allowed dialects: %s" % ALLOWED_DIALECTS)
if self.engine.name not in dialects:
msg = ('The test "%s" can be run '
'only on %s. Current engine is %s.')
args = (f.__name__, ' '.join(dialects), self.engine.name)
self.skip(msg % args)
else:
return f(self)
return ins_wrap
return wrap
@six.add_metaclass(abc.ABCMeta)
class OpportunisticFixture(DbFixture):
"""Base fixture to use default CI databases.
The databases exist in OpenStack CI infrastructure. But for the
correct functioning in local environment the databases must be
created manually.
"""
DRIVER = abc.abstractproperty(lambda: None)
DBNAME = PASSWORD = USERNAME = 'openstack_citest'
def setUp(self):
self._provisioning_engine = provision.get_engine(
utils.get_connect_string(backend=self.DRIVER,
user=self.USERNAME,
passwd=self.PASSWORD,
database=self.DBNAME)
)
self._uri = provision.create_database(self._provisioning_engine)
super(OpportunisticFixture, self).setUp()
def cleanUp(self):
super(OpportunisticFixture, self).cleanUp()
provision.drop_database(self._provisioning_engine, self._uri)
def _get_uri(self):
return self._uri
@six.add_metaclass(abc.ABCMeta)
class OpportunisticTestCase(DbTestCase):
"""Base test case to use default CI databases.
The subclasses of the test case are running only when openstack_citest
database is available otherwise a tests will be skipped.
"""
FIXTURE = abc.abstractproperty(lambda: None)
def setUp(self):
credentials = {
'backend': self.FIXTURE.DRIVER,
'user': self.FIXTURE.USERNAME,
'passwd': self.FIXTURE.PASSWORD,
'database': self.FIXTURE.DBNAME}
if self.FIXTURE.DRIVER and not utils.is_backend_avail(**credentials):
msg = '%s backend is not available.' % self.FIXTURE.DRIVER
return self.skip(msg)
super(OpportunisticTestCase, self).setUp()
class MySQLOpportunisticFixture(OpportunisticFixture):
DRIVER = 'mysql'
class PostgreSQLOpportunisticFixture(OpportunisticFixture):
DRIVER = 'postgresql'
class MySQLOpportunisticTestCase(OpportunisticTestCase):
FIXTURE = MySQLOpportunisticFixture
class PostgreSQLOpportunisticTestCase(OpportunisticTestCase):
FIXTURE = PostgreSQLOpportunisticFixture

View File

@ -15,19 +15,19 @@
# under the License. # under the License.
import functools import functools
import logging
import os import os
import subprocess import subprocess
import lockfile import lockfile
from oslotest import base as test_base
from six import moves from six import moves
from six.moves.urllib import parse
import sqlalchemy import sqlalchemy
import sqlalchemy.exc import sqlalchemy.exc
from sahara.openstack.common.db.sqlalchemy import utils from sahara.openstack.common.db.sqlalchemy import utils
from sahara.openstack.common.gettextutils import _ from sahara.openstack.common.gettextutils import _LE
from sahara.openstack.common import log as logging
from sahara.openstack.common.py3kcompat import urlutils
from sahara.openstack.common import test
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -60,15 +60,15 @@ def _set_db_lock(lock_path=None, lock_prefix=None):
path = lock_path or os.environ.get("SAHARA_LOCK_PATH") path = lock_path or os.environ.get("SAHARA_LOCK_PATH")
lock = lockfile.FileLock(os.path.join(path, lock_prefix)) lock = lockfile.FileLock(os.path.join(path, lock_prefix))
with lock: with lock:
LOG.debug(_('Got lock "%s"') % f.__name__) LOG.debug('Got lock "%s"' % f.__name__)
return f(*args, **kwargs) return f(*args, **kwargs)
finally: finally:
LOG.debug(_('Lock released "%s"') % f.__name__) LOG.debug('Lock released "%s"' % f.__name__)
return wrapper return wrapper
return decorator return decorator
class BaseMigrationTestCase(test.BaseTestCase): class BaseMigrationTestCase(test_base.BaseTestCase):
"""Base class fort testing of migration utils.""" """Base class fort testing of migration utils."""
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
@ -153,7 +153,7 @@ class BaseMigrationTestCase(test.BaseTestCase):
def _reset_databases(self): def _reset_databases(self):
for key, engine in self.engines.items(): for key, engine in self.engines.items():
conn_string = self.test_databases[key] conn_string = self.test_databases[key]
conn_pieces = urlutils.urlparse(conn_string) conn_pieces = parse.urlparse(conn_string)
engine.dispose() engine.dispose()
if conn_string.startswith('sqlite'): if conn_string.startswith('sqlite'):
# We can just delete the SQLite database, which is # We can just delete the SQLite database, which is
@ -264,6 +264,6 @@ class WalkVersionsMixin(object):
if check: if check:
check(engine, data) check(engine, data)
except Exception: except Exception:
LOG.error("Failed to migrate to version %s on engine %s" % LOG.error(_LE("Failed to migrate to version %s on engine %s") %
(version, engine)) (version, engine))
raise raise

View File

@ -16,9 +16,9 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import logging
import re import re
from migrate.changeset import UniqueConstraint
import sqlalchemy import sqlalchemy
from sqlalchemy import Boolean from sqlalchemy import Boolean
from sqlalchemy import CheckConstraint from sqlalchemy import CheckConstraint
@ -29,16 +29,16 @@ from sqlalchemy import func
from sqlalchemy import Index from sqlalchemy import Index
from sqlalchemy import Integer from sqlalchemy import Integer
from sqlalchemy import MetaData from sqlalchemy import MetaData
from sqlalchemy import or_
from sqlalchemy.sql.expression import literal_column from sqlalchemy.sql.expression import literal_column
from sqlalchemy.sql.expression import UpdateBase from sqlalchemy.sql.expression import UpdateBase
from sqlalchemy.sql import select
from sqlalchemy import String from sqlalchemy import String
from sqlalchemy import Table from sqlalchemy import Table
from sqlalchemy.types import NullType from sqlalchemy.types import NullType
from sahara.openstack.common.gettextutils import _ from sahara.openstack.common import context as request_context
from sahara.openstack.common.db.sqlalchemy import models
from sahara.openstack.common import log as logging from sahara.openstack.common.gettextutils import _, _LI, _LW
from sahara.openstack.common import timeutils from sahara.openstack.common import timeutils
@ -94,7 +94,7 @@ def paginate_query(query, model, limit, sort_keys, marker=None,
if 'id' not in sort_keys: if 'id' not in sort_keys:
# TODO(justinsb): If this ever gives a false-positive, check # TODO(justinsb): If this ever gives a false-positive, check
# the actual primary key, rather than assuming its id # the actual primary key, rather than assuming its id
LOG.warning(_('Id not in sort_keys; is sort_keys unique?')) LOG.warning(_LW('Id not in sort_keys; is sort_keys unique?'))
assert(not (sort_dir and sort_dirs)) assert(not (sort_dir and sort_dirs))
@ -157,11 +157,111 @@ def paginate_query(query, model, limit, sort_keys, marker=None,
return query return query
def _read_deleted_filter(query, db_model, read_deleted):
if 'deleted' not in db_model.__table__.columns:
raise ValueError(_("There is no `deleted` column in `%s` table. "
"Project doesn't use soft-deleted feature.")
% db_model.__name__)
default_deleted_value = db_model.__table__.c.deleted.default.arg
if read_deleted == 'no':
query = query.filter(db_model.deleted == default_deleted_value)
elif read_deleted == 'yes':
pass # omit the filter to include deleted and active
elif read_deleted == 'only':
query = query.filter(db_model.deleted != default_deleted_value)
else:
raise ValueError(_("Unrecognized read_deleted value '%s'")
% read_deleted)
return query
def _project_filter(query, db_model, context, project_only):
if project_only and 'project_id' not in db_model.__table__.columns:
raise ValueError(_("There is no `project_id` column in `%s` table.")
% db_model.__name__)
if request_context.is_user_context(context) and project_only:
if project_only == 'allow_none':
is_none = None
query = query.filter(or_(db_model.project_id == context.project_id,
db_model.project_id == is_none))
else:
query = query.filter(db_model.project_id == context.project_id)
return query
def model_query(context, model, session, args=None, project_only=False,
read_deleted=None):
"""Query helper that accounts for context's `read_deleted` field.
:param context: context to query under
:param model: Model to query. Must be a subclass of ModelBase.
:type model: models.ModelBase
:param session: The session to use.
:type session: sqlalchemy.orm.session.Session
:param args: Arguments to query. If None - model is used.
:type args: tuple
:param project_only: If present and context is user-type, then restrict
query to match the context's project_id. If set to
'allow_none', restriction includes project_id = None.
:type project_only: bool
:param read_deleted: If present, overrides context's read_deleted field.
:type read_deleted: bool
Usage:
..code:: python
result = (utils.model_query(context, models.Instance, session=session)
.filter_by(uuid=instance_uuid)
.all())
query = utils.model_query(
context, Node,
session=session,
args=(func.count(Node.id), func.sum(Node.ram))
).filter_by(project_id=project_id)
"""
if not read_deleted:
if hasattr(context, 'read_deleted'):
# NOTE(viktors): some projects use `read_deleted` attribute in
# their contexts instead of `show_deleted`.
read_deleted = context.read_deleted
else:
read_deleted = context.show_deleted
if not issubclass(model, models.ModelBase):
raise TypeError(_("model should be a subclass of ModelBase"))
query = session.query(model) if not args else session.query(*args)
query = _read_deleted_filter(query, model, read_deleted)
query = _project_filter(query, model, context, project_only)
return query
def get_table(engine, name): def get_table(engine, name):
"""Returns an sqlalchemy table dynamically from db. """Returns an sqlalchemy table dynamically from db.
Needed because the models don't work for us in migrations Needed because the models don't work for us in migrations
as models will be far out of sync with the current data. as models will be far out of sync with the current data.
.. warning::
Do not use this method when creating ForeignKeys in database migrations
because sqlalchemy needs the same MetaData object to hold information
about the parent table and the reference table in the ForeignKey. This
method uses a unique MetaData object per table object so it won't work
with ForeignKey creation.
""" """
metadata = MetaData() metadata = MetaData()
metadata.bind = engine metadata.bind = engine
@ -208,6 +308,10 @@ def drop_unique_constraint(migrate_engine, table_name, uc_name, *columns,
**col_name_col_instance): **col_name_col_instance):
"""Drop unique constraint from table. """Drop unique constraint from table.
DEPRECATED: this function is deprecated and will be removed from sahara.db
in a few releases. Please use UniqueConstraint.drop() method directly for
sqlalchemy-migrate migration scripts.
This method drops UC from table and works for mysql, postgresql and sqlite. This method drops UC from table and works for mysql, postgresql and sqlite.
In mysql and postgresql we are able to use "alter table" construction. In mysql and postgresql we are able to use "alter table" construction.
Sqlalchemy doesn't support some sqlite column types and replaces their Sqlalchemy doesn't support some sqlite column types and replaces their
@ -224,6 +328,8 @@ def drop_unique_constraint(migrate_engine, table_name, uc_name, *columns,
types by sqlite. For example BigInteger. types by sqlite. For example BigInteger.
""" """
from migrate.changeset import UniqueConstraint
meta = MetaData() meta = MetaData()
meta.bind = migrate_engine meta.bind = migrate_engine
t = Table(table_name, meta, autoload=True) t = Table(table_name, meta, autoload=True)
@ -263,9 +369,9 @@ def drop_old_duplicate_entries_from_table(migrate_engine, table_name,
columns_for_select = [func.max(table.c.id)] columns_for_select = [func.max(table.c.id)]
columns_for_select.extend(columns_for_group_by) columns_for_select.extend(columns_for_group_by)
duplicated_rows_select = select(columns_for_select, duplicated_rows_select = sqlalchemy.sql.select(
group_by=columns_for_group_by, columns_for_select, group_by=columns_for_group_by,
having=func.count(table.c.id) > 1) having=func.count(table.c.id) > 1)
for row in migrate_engine.execute(duplicated_rows_select): for row in migrate_engine.execute(duplicated_rows_select):
# NOTE(boris-42): Do not remove row that has the biggest ID. # NOTE(boris-42): Do not remove row that has the biggest ID.
@ -275,10 +381,11 @@ def drop_old_duplicate_entries_from_table(migrate_engine, table_name,
for name in uc_column_names: for name in uc_column_names:
delete_condition &= table.c[name] == row[name] delete_condition &= table.c[name] == row[name]
rows_to_delete_select = select([table.c.id]).where(delete_condition) rows_to_delete_select = sqlalchemy.sql.select(
[table.c.id]).where(delete_condition)
for row in migrate_engine.execute(rows_to_delete_select).fetchall(): for row in migrate_engine.execute(rows_to_delete_select).fetchall():
LOG.info(_("Deleting duplicated row with id: %(id)s from table: " LOG.info(_LI("Deleting duplicated row with id: %(id)s from table: "
"%(table)s") % dict(id=row[0], table=table_name)) "%(table)s") % dict(id=row[0], table=table_name))
if use_soft_delete: if use_soft_delete:
delete_statement = table.update().\ delete_statement = table.update().\
@ -386,7 +493,7 @@ def _change_deleted_column_type_to_boolean_sqlite(migrate_engine, table_name,
else: else:
c_select.append(table.c.deleted == table.c.id) c_select.append(table.c.deleted == table.c.id)
ins = InsertFromSelect(new_table, select(c_select)) ins = InsertFromSelect(new_table, sqlalchemy.sql.select(c_select))
migrate_engine.execute(ins) migrate_engine.execute(ins)
table.drop() table.drop()

View File

@ -22,7 +22,7 @@ import unittest2
from sahara import context from sahara import context
from sahara.db import api as db_api from sahara.db import api as db_api
from sahara import main from sahara import main
from sahara.openstack.common.db.sqlalchemy import session from sahara.openstack.common.db import options
class SaharaTestCase(unittest2.TestCase): class SaharaTestCase(unittest2.TestCase):
@ -55,7 +55,7 @@ class SaharaWithDbTestCase(SaharaTestCase):
def setup_db(self): def setup_db(self):
self.db_fd, self.db_path = tempfile.mkstemp() self.db_fd, self.db_path = tempfile.mkstemp()
session.set_defaults('sqlite:///' + self.db_path, self.db_path) options.set_defaults('sqlite:///' + self.db_path, self.db_path)
db_api.setup_db() db_api.setup_db()
self.addCleanup(self._drop_db) self.addCleanup(self._drop_db)

View File

@ -36,7 +36,7 @@ import sqlalchemy.exc
import unittest2 import unittest2
import sahara.db.migration import sahara.db.migration
from sahara.openstack.common.db.sqlalchemy import session from sahara.db.sqlalchemy import api as sa
from sahara.openstack.common import lockutils from sahara.openstack.common import lockutils
from sahara.openstack.common import log as logging from sahara.openstack.common import log as logging
from sahara.openstack.common import processutils from sahara.openstack.common import processutils
@ -394,7 +394,7 @@ class BaseWalkMigrationTestCase(BaseMigrationTestCase):
database functionality (reset default settings and session cleanup). database functionality (reset default settings and session cleanup).
""" """
CONF.set_override('connection', str(engine.url), group='database') CONF.set_override('connection', str(engine.url), group='database')
session.cleanup() sa.cleanup()
def _test_mysql_opportunistically(self): def _test_mysql_opportunistically(self):
# Test that table creation on mysql only builds InnoDB tables # Test that table creation on mysql only builds InnoDB tables
@ -453,11 +453,11 @@ class BaseWalkMigrationTestCase(BaseMigrationTestCase):
""" """
self.ALEMBIC_CONFIG.stdout = buf = io.StringIO() self.ALEMBIC_CONFIG.stdout = buf = io.StringIO()
CONF.set_override('connection', str(engine.url), group='database') CONF.set_override('connection', str(engine.url), group='database')
session.cleanup() sa.cleanup()
getattr(command, alembic_command)(*args, **kwargs) getattr(command, alembic_command)(*args, **kwargs)
res = buf.getvalue().strip() res = buf.getvalue().strip()
LOG.debug('Alembic command `%s` returns: %s' % (alembic_command, res)) LOG.debug('Alembic command `%s` returns: %s' % (alembic_command, res))
session.cleanup() sa.cleanup()
return res return res
def _get_alembic_versions(self, engine): def _get_alembic_versions(self, engine):

View File

@ -7,6 +7,7 @@ docutils==0.9.1
fixtures>=0.3.14 fixtures>=0.3.14
mock>=1.0 mock>=1.0
oslosphinx oslosphinx
oslotest
psycopg2 psycopg2
pylint==0.25.2 pylint==0.25.2
sphinx>=1.2.1,<1.3 sphinx>=1.2.1,<1.3