Replace oslo-incubator's db with standalone oslo.db
Change-Id: Id78149faefb7e458f7b11de1b2d71a03a8d56091
This commit is contained in:
parent
a9ec0c1c4d
commit
6d035709a7
@ -256,7 +256,7 @@
|
|||||||
#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
|
#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
|
||||||
|
|
||||||
# List of logger=LEVEL pairs. (list value)
|
# List of logger=LEVEL pairs. (list value)
|
||||||
#default_log_levels=sqlalchemy=WARN,eventlet.wsgi.server=WARN
|
#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,eventlet.wsgi.server=WARN,stevedore=INFO,mistral.openstack.common.loopingcall=INFO,mistral.openstack.common.periodic_task=INFO,mistral.services.periodic=INFO
|
||||||
|
|
||||||
# Enables or disables publication of error events. (boolean
|
# Enables or disables publication of error events. (boolean
|
||||||
# value)
|
# value)
|
||||||
@ -343,26 +343,30 @@
|
|||||||
[database]
|
[database]
|
||||||
|
|
||||||
#
|
#
|
||||||
# Options defined in mistral.openstack.common.db.options
|
# Options defined in oslo.db
|
||||||
#
|
#
|
||||||
|
|
||||||
# The file name to use with SQLite (string value)
|
# The file name to use with SQLite. (string value)
|
||||||
#sqlite_db=mistral.sqlite
|
#sqlite_db=oslo.sqlite
|
||||||
|
|
||||||
# If True, SQLite uses synchronous mode (boolean value)
|
# If True, SQLite uses synchronous mode. (boolean value)
|
||||||
#sqlite_synchronous=true
|
#sqlite_synchronous=true
|
||||||
|
|
||||||
# The backend to use for db (string value)
|
# The back end to use for the database. (string value)
|
||||||
# Deprecated group/name - [DEFAULT]/db_backend
|
# Deprecated group/name - [DEFAULT]/db_backend
|
||||||
#backend=sqlalchemy
|
#backend=sqlalchemy
|
||||||
|
|
||||||
# The SQLAlchemy connection string used to connect to the
|
# The SQLAlchemy connection string to use to connect to the
|
||||||
# database (string value)
|
# database. (string value)
|
||||||
# Deprecated group/name - [DEFAULT]/sql_connection
|
# Deprecated group/name - [DEFAULT]/sql_connection
|
||||||
# Deprecated group/name - [DATABASE]/sql_connection
|
# Deprecated group/name - [DATABASE]/sql_connection
|
||||||
# Deprecated group/name - [sql]/connection
|
# Deprecated group/name - [sql]/connection
|
||||||
#connection=<None>
|
#connection=<None>
|
||||||
|
|
||||||
|
# The SQLAlchemy connection string to use to connect to the
|
||||||
|
# slave database. (string value)
|
||||||
|
#slave_connection=<None>
|
||||||
|
|
||||||
# The SQL mode to be used for MySQL sessions. This option,
|
# The SQL mode to be used for MySQL sessions. This option,
|
||||||
# including the default, overrides any server-set SQL mode. To
|
# including the default, overrides any server-set SQL mode. To
|
||||||
# use whatever SQL mode is set by the server configuration,
|
# use whatever SQL mode is set by the server configuration,
|
||||||
@ -370,75 +374,76 @@
|
|||||||
# value)
|
# value)
|
||||||
#mysql_sql_mode=TRADITIONAL
|
#mysql_sql_mode=TRADITIONAL
|
||||||
|
|
||||||
# Timeout before idle sql connections are reaped (integer
|
# Timeout before idle SQL connections are reaped. (integer
|
||||||
# value)
|
# value)
|
||||||
# Deprecated group/name - [DEFAULT]/sql_idle_timeout
|
# Deprecated group/name - [DEFAULT]/sql_idle_timeout
|
||||||
# Deprecated group/name - [DATABASE]/sql_idle_timeout
|
# Deprecated group/name - [DATABASE]/sql_idle_timeout
|
||||||
# Deprecated group/name - [sql]/idle_timeout
|
# Deprecated group/name - [sql]/idle_timeout
|
||||||
#idle_timeout=3600
|
#idle_timeout=3600
|
||||||
|
|
||||||
# Minimum number of SQL connections to keep open in a pool
|
# Minimum number of SQL connections to keep open in a pool.
|
||||||
# (integer value)
|
# (integer value)
|
||||||
# Deprecated group/name - [DEFAULT]/sql_min_pool_size
|
# Deprecated group/name - [DEFAULT]/sql_min_pool_size
|
||||||
# Deprecated group/name - [DATABASE]/sql_min_pool_size
|
# Deprecated group/name - [DATABASE]/sql_min_pool_size
|
||||||
#min_pool_size=1
|
#min_pool_size=1
|
||||||
|
|
||||||
# Maximum number of SQL connections to keep open in a pool
|
# Maximum number of SQL connections to keep open in a pool.
|
||||||
# (integer value)
|
# (integer value)
|
||||||
# Deprecated group/name - [DEFAULT]/sql_max_pool_size
|
# Deprecated group/name - [DEFAULT]/sql_max_pool_size
|
||||||
# Deprecated group/name - [DATABASE]/sql_max_pool_size
|
# Deprecated group/name - [DATABASE]/sql_max_pool_size
|
||||||
#max_pool_size=<None>
|
#max_pool_size=<None>
|
||||||
|
|
||||||
# Maximum db connection retries during startup. (setting -1
|
# Maximum db connection retries during startup. Set to -1 to
|
||||||
# implies an infinite retry count) (integer value)
|
# specify an infinite retry count. (integer value)
|
||||||
# Deprecated group/name - [DEFAULT]/sql_max_retries
|
# Deprecated group/name - [DEFAULT]/sql_max_retries
|
||||||
# Deprecated group/name - [DATABASE]/sql_max_retries
|
# Deprecated group/name - [DATABASE]/sql_max_retries
|
||||||
#max_retries=10
|
#max_retries=10
|
||||||
|
|
||||||
# Interval between retries of opening a sql connection
|
# Interval between retries of opening a SQL connection.
|
||||||
# (integer value)
|
# (integer value)
|
||||||
# Deprecated group/name - [DEFAULT]/sql_retry_interval
|
# Deprecated group/name - [DEFAULT]/sql_retry_interval
|
||||||
# Deprecated group/name - [DATABASE]/reconnect_interval
|
# Deprecated group/name - [DATABASE]/reconnect_interval
|
||||||
#retry_interval=10
|
#retry_interval=10
|
||||||
|
|
||||||
# If set, use this value for max_overflow with sqlalchemy
|
# If set, use this value for max_overflow with SQLAlchemy.
|
||||||
# (integer value)
|
# (integer value)
|
||||||
# Deprecated group/name - [DEFAULT]/sql_max_overflow
|
# Deprecated group/name - [DEFAULT]/sql_max_overflow
|
||||||
# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
|
# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
|
||||||
#max_overflow=<None>
|
#max_overflow=<None>
|
||||||
|
|
||||||
# Verbosity of SQL debugging information. 0=None,
|
# Verbosity of SQL debugging information: 0=None,
|
||||||
# 100=Everything (integer value)
|
# 100=Everything. (integer value)
|
||||||
# Deprecated group/name - [DEFAULT]/sql_connection_debug
|
# Deprecated group/name - [DEFAULT]/sql_connection_debug
|
||||||
#connection_debug=0
|
#connection_debug=0
|
||||||
|
|
||||||
# Add python stack traces to SQL as comment strings (boolean
|
# Add Python stack traces to SQL as comment strings. (boolean
|
||||||
# value)
|
# value)
|
||||||
# Deprecated group/name - [DEFAULT]/sql_connection_trace
|
# Deprecated group/name - [DEFAULT]/sql_connection_trace
|
||||||
#connection_trace=false
|
#connection_trace=false
|
||||||
|
|
||||||
# If set, use this value for pool_timeout with sqlalchemy
|
# If set, use this value for pool_timeout with SQLAlchemy.
|
||||||
# (integer value)
|
# (integer value)
|
||||||
# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
|
# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
|
||||||
#pool_timeout=<None>
|
#pool_timeout=<None>
|
||||||
|
|
||||||
# Enable the experimental use of database reconnect on
|
# Enable the experimental use of database reconnect on
|
||||||
# connection lost (boolean value)
|
# connection lost. (boolean value)
|
||||||
#use_db_reconnect=false
|
#use_db_reconnect=false
|
||||||
|
|
||||||
# seconds between db connection retries (integer value)
|
# Seconds between database connection retries. (integer value)
|
||||||
#db_retry_interval=1
|
#db_retry_interval=1
|
||||||
|
|
||||||
# Whether to increase interval between db connection retries,
|
# If True, increases the interval between database connection
|
||||||
# up to db_max_retry_interval (boolean value)
|
# retries up to db_max_retry_interval. (boolean value)
|
||||||
#db_inc_retry_interval=true
|
#db_inc_retry_interval=true
|
||||||
|
|
||||||
# max seconds between db connection retries, if
|
# If db_inc_retry_interval is set, the maximum seconds between
|
||||||
# db_inc_retry_interval is enabled (integer value)
|
# database connection retries. (integer value)
|
||||||
#db_max_retry_interval=10
|
#db_max_retry_interval=10
|
||||||
|
|
||||||
# maximum db connection retries before error is raised.
|
# Maximum database connection retries before error is raised.
|
||||||
# (setting -1 implies an infinite retry count) (integer value)
|
# Set to -1 to specify an infinite retry count. (integer
|
||||||
|
# value)
|
||||||
#db_max_retries=20
|
#db_max_retries=20
|
||||||
|
|
||||||
|
|
||||||
@ -508,7 +513,7 @@
|
|||||||
#auth_uri=<None>
|
#auth_uri=<None>
|
||||||
|
|
||||||
# Complete admin Identity API endpoint. This should specify
|
# Complete admin Identity API endpoint. This should specify
|
||||||
# the unversioned root endpoint eg. https://localhost:35357/
|
# the unversioned root endpoint e.g. https://localhost:35357/
|
||||||
# (string value)
|
# (string value)
|
||||||
#identity_uri=<None>
|
#identity_uri=<None>
|
||||||
|
|
||||||
@ -529,9 +534,12 @@
|
|||||||
# with Identity API Server. (integer value)
|
# with Identity API Server. (integer value)
|
||||||
#http_request_max_retries=3
|
#http_request_max_retries=3
|
||||||
|
|
||||||
# Single shared secret with the Keystone configuration used
|
# This option is deprecated and may be removed in a future
|
||||||
# for bootstrapping a Keystone installation, or otherwise
|
# release. Single shared secret with the Keystone
|
||||||
# bypassing the normal authentication process. (string value)
|
# configuration used for bootstrapping a Keystone
|
||||||
|
# installation, or otherwise bypassing the normal
|
||||||
|
# authentication process. This option should not be used, use
|
||||||
|
# `admin_user` and `admin_password` instead. (string value)
|
||||||
#admin_token=<None>
|
#admin_token=<None>
|
||||||
|
|
||||||
# Keystone account username (string value)
|
# Keystone account username (string value)
|
||||||
@ -583,7 +591,7 @@
|
|||||||
# number of revocation events combined with a low cache
|
# number of revocation events combined with a low cache
|
||||||
# duration may significantly reduce performance. (integer
|
# duration may significantly reduce performance. (integer
|
||||||
# value)
|
# value)
|
||||||
#revocation_cache_time=300
|
#revocation_cache_time=10
|
||||||
|
|
||||||
# (optional) if defined, indicate whether token data should be
|
# (optional) if defined, indicate whether token data should be
|
||||||
# authenticated or authenticated and encrypted. Acceptable
|
# authenticated or authenticated and encrypted. Acceptable
|
||||||
@ -616,6 +624,23 @@
|
|||||||
# value)
|
# value)
|
||||||
#enforce_token_bind=permissive
|
#enforce_token_bind=permissive
|
||||||
|
|
||||||
|
# If true, the revocation list will be checked for cached
|
||||||
|
# tokens. This requires that PKI tokens are configured on the
|
||||||
|
# Keystone server. (boolean value)
|
||||||
|
#check_revocations_for_cached=false
|
||||||
|
|
||||||
|
# Hash algorithms to use for hashing PKI tokens. This may be a
|
||||||
|
# single algorithm or multiple. The algorithms are those
|
||||||
|
# supported by Python standard hashlib.new(). The hashes will
|
||||||
|
# be tried in the order given, so put the preferred one first
|
||||||
|
# for performance. The result of the first hash will be stored
|
||||||
|
# in the cache. This will typically be set to multiple values
|
||||||
|
# only while migrating from a less secure algorithm to a more
|
||||||
|
# secure one. Once all the old tokens are expired this option
|
||||||
|
# should be set to a single value for better performance.
|
||||||
|
# (list value)
|
||||||
|
#hash_algorithms=md5
|
||||||
|
|
||||||
|
|
||||||
[matchmaker_ring]
|
[matchmaker_ring]
|
||||||
|
|
||||||
|
@ -14,8 +14,9 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
|
from oslo.db import api as db_api
|
||||||
|
|
||||||
from mistral import exceptions
|
from mistral import exceptions
|
||||||
from mistral.openstack.common.db import api as db_api
|
|
||||||
from mistral.openstack.common import log as logging
|
from mistral.openstack.common import log as logging
|
||||||
|
|
||||||
# Workbooks
|
# Workbooks
|
||||||
|
@ -17,22 +17,21 @@
|
|||||||
import sys
|
import sys
|
||||||
|
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
from oslo.db import exception as db_exc
|
||||||
|
from oslo.db import options
|
||||||
|
from oslo.db.sqlalchemy import session as db_session
|
||||||
import sqlalchemy as sa
|
import sqlalchemy as sa
|
||||||
|
|
||||||
from mistral import context
|
from mistral import context
|
||||||
from mistral.db.sqlalchemy import models as m
|
from mistral.db.sqlalchemy import models as m
|
||||||
from mistral import exceptions as exc
|
from mistral import exceptions as exc
|
||||||
from mistral.openstack.common.db import exception as db_exc
|
|
||||||
from mistral.openstack.common.db.sqlalchemy import session as db_session
|
|
||||||
from mistral.openstack.common import log as logging
|
from mistral.openstack.common import log as logging
|
||||||
from mistral import utils
|
from mistral import utils
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
cfg.CONF.import_opt('connection',
|
options.set_defaults(cfg.CONF, sqlite_db="mistral.sqlite")
|
||||||
'mistral.openstack.common.db.options',
|
|
||||||
group='database')
|
|
||||||
|
|
||||||
_DB_SESSION_THREAD_LOCAL_NAME = "db_sql_alchemy_session"
|
_DB_SESSION_THREAD_LOCAL_NAME = "db_sql_alchemy_session"
|
||||||
|
|
||||||
|
@ -14,11 +14,10 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
|
from oslo.db.sqlalchemy import models as oslo_models
|
||||||
from sqlalchemy.ext import declarative
|
from sqlalchemy.ext import declarative
|
||||||
from sqlalchemy.orm import attributes
|
from sqlalchemy.orm import attributes
|
||||||
|
|
||||||
from mistral.openstack.common.db.sqlalchemy import models as oslo_models
|
|
||||||
|
|
||||||
|
|
||||||
class _MistralBase(oslo_models.ModelBase, oslo_models.TimestampMixin):
|
class _MistralBase(oslo_models.ModelBase, oslo_models.TimestampMixin):
|
||||||
"""Base class for all Mistral SQLAlchemy DB Models."""
|
"""Base class for all Mistral SQLAlchemy DB Models."""
|
||||||
|
@ -1,162 +0,0 @@
|
|||||||
# Copyright (c) 2013 Rackspace Hosting
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""Multiple DB API backend support.
|
|
||||||
|
|
||||||
A DB backend module should implement a method named 'get_backend' which
|
|
||||||
takes no arguments. The method can return any object that implements DB
|
|
||||||
API methods.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import functools
|
|
||||||
import logging
|
|
||||||
import threading
|
|
||||||
import time
|
|
||||||
|
|
||||||
from mistral.openstack.common.db import exception
|
|
||||||
from mistral.openstack.common.gettextutils import _LE
|
|
||||||
from mistral.openstack.common import importutils
|
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def safe_for_db_retry(f):
|
|
||||||
"""Enable db-retry for decorated function, if config option enabled."""
|
|
||||||
f.__dict__['enable_retry'] = True
|
|
||||||
return f
|
|
||||||
|
|
||||||
|
|
||||||
class wrap_db_retry(object):
|
|
||||||
"""Retry db.api methods, if DBConnectionError() raised
|
|
||||||
|
|
||||||
Retry decorated db.api methods. If we enabled `use_db_reconnect`
|
|
||||||
in config, this decorator will be applied to all db.api functions,
|
|
||||||
marked with @safe_for_db_retry decorator.
|
|
||||||
Decorator catchs DBConnectionError() and retries function in a
|
|
||||||
loop until it succeeds, or until maximum retries count will be reached.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, retry_interval, max_retries, inc_retry_interval,
|
|
||||||
max_retry_interval):
|
|
||||||
super(wrap_db_retry, self).__init__()
|
|
||||||
|
|
||||||
self.retry_interval = retry_interval
|
|
||||||
self.max_retries = max_retries
|
|
||||||
self.inc_retry_interval = inc_retry_interval
|
|
||||||
self.max_retry_interval = max_retry_interval
|
|
||||||
|
|
||||||
def __call__(self, f):
|
|
||||||
@functools.wraps(f)
|
|
||||||
def wrapper(*args, **kwargs):
|
|
||||||
next_interval = self.retry_interval
|
|
||||||
remaining = self.max_retries
|
|
||||||
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
return f(*args, **kwargs)
|
|
||||||
except exception.DBConnectionError as e:
|
|
||||||
if remaining == 0:
|
|
||||||
LOG.exception(_LE('DB exceeded retry limit.'))
|
|
||||||
raise exception.DBError(e)
|
|
||||||
if remaining != -1:
|
|
||||||
remaining -= 1
|
|
||||||
LOG.exception(_LE('DB connection error.'))
|
|
||||||
# NOTE(vsergeyev): We are using patched time module, so
|
|
||||||
# this effectively yields the execution
|
|
||||||
# context to another green thread.
|
|
||||||
time.sleep(next_interval)
|
|
||||||
if self.inc_retry_interval:
|
|
||||||
next_interval = min(
|
|
||||||
next_interval * 2,
|
|
||||||
self.max_retry_interval
|
|
||||||
)
|
|
||||||
return wrapper
|
|
||||||
|
|
||||||
|
|
||||||
class DBAPI(object):
|
|
||||||
def __init__(self, backend_name, backend_mapping=None, lazy=False,
|
|
||||||
**kwargs):
|
|
||||||
"""Initialize the chosen DB API backend.
|
|
||||||
|
|
||||||
:param backend_name: name of the backend to load
|
|
||||||
:type backend_name: str
|
|
||||||
|
|
||||||
:param backend_mapping: backend name -> module/class to load mapping
|
|
||||||
:type backend_mapping: dict
|
|
||||||
|
|
||||||
:param lazy: load the DB backend lazily on the first DB API method call
|
|
||||||
:type lazy: bool
|
|
||||||
|
|
||||||
Keyword arguments:
|
|
||||||
|
|
||||||
:keyword use_db_reconnect: retry DB transactions on disconnect or not
|
|
||||||
:type use_db_reconnect: bool
|
|
||||||
|
|
||||||
:keyword retry_interval: seconds between transaction retries
|
|
||||||
:type retry_interval: int
|
|
||||||
|
|
||||||
:keyword inc_retry_interval: increase retry interval or not
|
|
||||||
:type inc_retry_interval: bool
|
|
||||||
|
|
||||||
:keyword max_retry_interval: max interval value between retries
|
|
||||||
:type max_retry_interval: int
|
|
||||||
|
|
||||||
:keyword max_retries: max number of retries before an error is raised
|
|
||||||
:type max_retries: int
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
self._backend = None
|
|
||||||
self._backend_name = backend_name
|
|
||||||
self._backend_mapping = backend_mapping or {}
|
|
||||||
self._lock = threading.Lock()
|
|
||||||
|
|
||||||
if not lazy:
|
|
||||||
self._load_backend()
|
|
||||||
|
|
||||||
self.use_db_reconnect = kwargs.get('use_db_reconnect', False)
|
|
||||||
self.retry_interval = kwargs.get('retry_interval', 1)
|
|
||||||
self.inc_retry_interval = kwargs.get('inc_retry_interval', True)
|
|
||||||
self.max_retry_interval = kwargs.get('max_retry_interval', 10)
|
|
||||||
self.max_retries = kwargs.get('max_retries', 20)
|
|
||||||
|
|
||||||
def _load_backend(self):
|
|
||||||
with self._lock:
|
|
||||||
if not self._backend:
|
|
||||||
# Import the untranslated name if we don't have a mapping
|
|
||||||
backend_path = self._backend_mapping.get(self._backend_name,
|
|
||||||
self._backend_name)
|
|
||||||
backend_mod = importutils.import_module(backend_path)
|
|
||||||
self._backend = backend_mod.get_backend()
|
|
||||||
|
|
||||||
def __getattr__(self, key):
|
|
||||||
if not self._backend:
|
|
||||||
self._load_backend()
|
|
||||||
|
|
||||||
attr = getattr(self._backend, key)
|
|
||||||
if not hasattr(attr, '__call__'):
|
|
||||||
return attr
|
|
||||||
# NOTE(vsergeyev): If `use_db_reconnect` option is set to True, retry
|
|
||||||
# DB API methods, decorated with @safe_for_db_retry
|
|
||||||
# on disconnect.
|
|
||||||
if self.use_db_reconnect and hasattr(attr, 'enable_retry'):
|
|
||||||
attr = wrap_db_retry(
|
|
||||||
retry_interval=self.retry_interval,
|
|
||||||
max_retries=self.max_retries,
|
|
||||||
inc_retry_interval=self.inc_retry_interval,
|
|
||||||
max_retry_interval=self.max_retry_interval)(attr)
|
|
||||||
|
|
||||||
return attr
|
|
@ -1,56 +0,0 @@
|
|||||||
# Copyright 2010 United States Government as represented by the
|
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""DB related custom exceptions."""
|
|
||||||
|
|
||||||
import six
|
|
||||||
|
|
||||||
from mistral.openstack.common.gettextutils import _
|
|
||||||
|
|
||||||
|
|
||||||
class DBError(Exception):
|
|
||||||
"""Wraps an implementation specific exception."""
|
|
||||||
def __init__(self, inner_exception=None):
|
|
||||||
self.inner_exception = inner_exception
|
|
||||||
super(DBError, self).__init__(six.text_type(inner_exception))
|
|
||||||
|
|
||||||
|
|
||||||
class DBDuplicateEntry(DBError):
|
|
||||||
"""Wraps an implementation specific exception."""
|
|
||||||
def __init__(self, columns=[], inner_exception=None):
|
|
||||||
self.columns = columns
|
|
||||||
super(DBDuplicateEntry, self).__init__(inner_exception)
|
|
||||||
|
|
||||||
|
|
||||||
class DBDeadlock(DBError):
|
|
||||||
def __init__(self, inner_exception=None):
|
|
||||||
super(DBDeadlock, self).__init__(inner_exception)
|
|
||||||
|
|
||||||
|
|
||||||
class DBInvalidUnicodeParameter(Exception):
|
|
||||||
message = _("Invalid Parameter: "
|
|
||||||
"Unicode is not supported by the current database.")
|
|
||||||
|
|
||||||
|
|
||||||
class DbMigrationError(DBError):
|
|
||||||
"""Wraps migration specific exception."""
|
|
||||||
def __init__(self, message=None):
|
|
||||||
super(DbMigrationError, self).__init__(message)
|
|
||||||
|
|
||||||
|
|
||||||
class DBConnectionError(DBError):
|
|
||||||
"""Wraps connection specific exception."""
|
|
||||||
pass
|
|
@ -1,171 +0,0 @@
|
|||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import copy
|
|
||||||
|
|
||||||
from oslo.config import cfg
|
|
||||||
|
|
||||||
|
|
||||||
database_opts = [
|
|
||||||
cfg.StrOpt('sqlite_db',
|
|
||||||
deprecated_group='DEFAULT',
|
|
||||||
default='mistral.sqlite',
|
|
||||||
help='The file name to use with SQLite'),
|
|
||||||
cfg.BoolOpt('sqlite_synchronous',
|
|
||||||
deprecated_group='DEFAULT',
|
|
||||||
default=True,
|
|
||||||
help='If True, SQLite uses synchronous mode'),
|
|
||||||
cfg.StrOpt('backend',
|
|
||||||
default='sqlalchemy',
|
|
||||||
deprecated_name='db_backend',
|
|
||||||
deprecated_group='DEFAULT',
|
|
||||||
help='The backend to use for db'),
|
|
||||||
cfg.StrOpt('connection',
|
|
||||||
help='The SQLAlchemy connection string used to connect to the '
|
|
||||||
'database',
|
|
||||||
secret=True,
|
|
||||||
deprecated_opts=[cfg.DeprecatedOpt('sql_connection',
|
|
||||||
group='DEFAULT'),
|
|
||||||
cfg.DeprecatedOpt('sql_connection',
|
|
||||||
group='DATABASE'),
|
|
||||||
cfg.DeprecatedOpt('connection',
|
|
||||||
group='sql'), ]),
|
|
||||||
cfg.StrOpt('mysql_sql_mode',
|
|
||||||
default='TRADITIONAL',
|
|
||||||
help='The SQL mode to be used for MySQL sessions. '
|
|
||||||
'This option, including the default, overrides any '
|
|
||||||
'server-set SQL mode. To use whatever SQL mode '
|
|
||||||
'is set by the server configuration, '
|
|
||||||
'set this to no value. Example: mysql_sql_mode='),
|
|
||||||
cfg.IntOpt('idle_timeout',
|
|
||||||
default=3600,
|
|
||||||
deprecated_opts=[cfg.DeprecatedOpt('sql_idle_timeout',
|
|
||||||
group='DEFAULT'),
|
|
||||||
cfg.DeprecatedOpt('sql_idle_timeout',
|
|
||||||
group='DATABASE'),
|
|
||||||
cfg.DeprecatedOpt('idle_timeout',
|
|
||||||
group='sql')],
|
|
||||||
help='Timeout before idle sql connections are reaped'),
|
|
||||||
cfg.IntOpt('min_pool_size',
|
|
||||||
default=1,
|
|
||||||
deprecated_opts=[cfg.DeprecatedOpt('sql_min_pool_size',
|
|
||||||
group='DEFAULT'),
|
|
||||||
cfg.DeprecatedOpt('sql_min_pool_size',
|
|
||||||
group='DATABASE')],
|
|
||||||
help='Minimum number of SQL connections to keep open in a '
|
|
||||||
'pool'),
|
|
||||||
cfg.IntOpt('max_pool_size',
|
|
||||||
default=None,
|
|
||||||
deprecated_opts=[cfg.DeprecatedOpt('sql_max_pool_size',
|
|
||||||
group='DEFAULT'),
|
|
||||||
cfg.DeprecatedOpt('sql_max_pool_size',
|
|
||||||
group='DATABASE')],
|
|
||||||
help='Maximum number of SQL connections to keep open in a '
|
|
||||||
'pool'),
|
|
||||||
cfg.IntOpt('max_retries',
|
|
||||||
default=10,
|
|
||||||
deprecated_opts=[cfg.DeprecatedOpt('sql_max_retries',
|
|
||||||
group='DEFAULT'),
|
|
||||||
cfg.DeprecatedOpt('sql_max_retries',
|
|
||||||
group='DATABASE')],
|
|
||||||
help='Maximum db connection retries during startup. '
|
|
||||||
'(setting -1 implies an infinite retry count)'),
|
|
||||||
cfg.IntOpt('retry_interval',
|
|
||||||
default=10,
|
|
||||||
deprecated_opts=[cfg.DeprecatedOpt('sql_retry_interval',
|
|
||||||
group='DEFAULT'),
|
|
||||||
cfg.DeprecatedOpt('reconnect_interval',
|
|
||||||
group='DATABASE')],
|
|
||||||
help='Interval between retries of opening a sql connection'),
|
|
||||||
cfg.IntOpt('max_overflow',
|
|
||||||
default=None,
|
|
||||||
deprecated_opts=[cfg.DeprecatedOpt('sql_max_overflow',
|
|
||||||
group='DEFAULT'),
|
|
||||||
cfg.DeprecatedOpt('sqlalchemy_max_overflow',
|
|
||||||
group='DATABASE')],
|
|
||||||
help='If set, use this value for max_overflow with sqlalchemy'),
|
|
||||||
cfg.IntOpt('connection_debug',
|
|
||||||
default=0,
|
|
||||||
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_debug',
|
|
||||||
group='DEFAULT')],
|
|
||||||
help='Verbosity of SQL debugging information. 0=None, '
|
|
||||||
'100=Everything'),
|
|
||||||
cfg.BoolOpt('connection_trace',
|
|
||||||
default=False,
|
|
||||||
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_trace',
|
|
||||||
group='DEFAULT')],
|
|
||||||
help='Add python stack traces to SQL as comment strings'),
|
|
||||||
cfg.IntOpt('pool_timeout',
|
|
||||||
default=None,
|
|
||||||
deprecated_opts=[cfg.DeprecatedOpt('sqlalchemy_pool_timeout',
|
|
||||||
group='DATABASE')],
|
|
||||||
help='If set, use this value for pool_timeout with sqlalchemy'),
|
|
||||||
cfg.BoolOpt('use_db_reconnect',
|
|
||||||
default=False,
|
|
||||||
help='Enable the experimental use of database reconnect '
|
|
||||||
'on connection lost'),
|
|
||||||
cfg.IntOpt('db_retry_interval',
|
|
||||||
default=1,
|
|
||||||
help='seconds between db connection retries'),
|
|
||||||
cfg.BoolOpt('db_inc_retry_interval',
|
|
||||||
default=True,
|
|
||||||
help='Whether to increase interval between db connection '
|
|
||||||
'retries, up to db_max_retry_interval'),
|
|
||||||
cfg.IntOpt('db_max_retry_interval',
|
|
||||||
default=10,
|
|
||||||
help='max seconds between db connection retries, if '
|
|
||||||
'db_inc_retry_interval is enabled'),
|
|
||||||
cfg.IntOpt('db_max_retries',
|
|
||||||
default=20,
|
|
||||||
help='maximum db connection retries before error is raised. '
|
|
||||||
'(setting -1 implies an infinite retry count)'),
|
|
||||||
]
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
CONF.register_opts(database_opts, 'database')
|
|
||||||
|
|
||||||
|
|
||||||
def set_defaults(sql_connection, sqlite_db, max_pool_size=None,
|
|
||||||
max_overflow=None, pool_timeout=None):
|
|
||||||
"""Set defaults for configuration variables."""
|
|
||||||
cfg.set_defaults(database_opts,
|
|
||||||
connection=sql_connection,
|
|
||||||
sqlite_db=sqlite_db)
|
|
||||||
# Update the QueuePool defaults
|
|
||||||
if max_pool_size is not None:
|
|
||||||
cfg.set_defaults(database_opts,
|
|
||||||
max_pool_size=max_pool_size)
|
|
||||||
if max_overflow is not None:
|
|
||||||
cfg.set_defaults(database_opts,
|
|
||||||
max_overflow=max_overflow)
|
|
||||||
if pool_timeout is not None:
|
|
||||||
cfg.set_defaults(database_opts,
|
|
||||||
pool_timeout=pool_timeout)
|
|
||||||
|
|
||||||
|
|
||||||
def list_opts():
|
|
||||||
"""Returns a list of oslo.config options available in the library.
|
|
||||||
|
|
||||||
The returned list includes all oslo.config options which may be registered
|
|
||||||
at runtime by the library.
|
|
||||||
|
|
||||||
Each element of the list is a tuple. The first element is the name of the
|
|
||||||
group under which the list of elements in the second element will be
|
|
||||||
registered. A group name of None corresponds to the [DEFAULT] group in
|
|
||||||
config files.
|
|
||||||
|
|
||||||
The purpose of this is to allow tools like the Oslo sample config file
|
|
||||||
generator to discover the options exposed to users by this library.
|
|
||||||
|
|
||||||
:returns: a list of (group_name, opts) tuples
|
|
||||||
"""
|
|
||||||
return [('database', copy.deepcopy(database_opts))]
|
|
@ -1,278 +0,0 @@
|
|||||||
# coding: utf-8
|
|
||||||
#
|
|
||||||
# Copyright (c) 2013 OpenStack Foundation
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
# Base on code in migrate/changeset/databases/sqlite.py which is under
|
|
||||||
# the following license:
|
|
||||||
#
|
|
||||||
# The MIT License
|
|
||||||
#
|
|
||||||
# Copyright (c) 2009 Evan Rosson, Jan Dittberner, Domen Kožar
|
|
||||||
#
|
|
||||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
# of this software and associated documentation files (the "Software"), to deal
|
|
||||||
# in the Software without restriction, including without limitation the rights
|
|
||||||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
# copies of the Software, and to permit persons to whom the Software is
|
|
||||||
# furnished to do so, subject to the following conditions:
|
|
||||||
# The above copyright notice and this permission notice shall be included in
|
|
||||||
# all copies or substantial portions of the Software.
|
|
||||||
#
|
|
||||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
||||||
# THE SOFTWARE.
|
|
||||||
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
|
|
||||||
from migrate.changeset import ansisql
|
|
||||||
from migrate.changeset.databases import sqlite
|
|
||||||
from migrate import exceptions as versioning_exceptions
|
|
||||||
from migrate.versioning import api as versioning_api
|
|
||||||
from migrate.versioning.repository import Repository
|
|
||||||
import sqlalchemy
|
|
||||||
from sqlalchemy.schema import UniqueConstraint
|
|
||||||
|
|
||||||
from mistral.openstack.common.db import exception
|
|
||||||
from mistral.openstack.common.gettextutils import _
|
|
||||||
|
|
||||||
|
|
||||||
def _get_unique_constraints(self, table):
|
|
||||||
"""Retrieve information about existing unique constraints of the table
|
|
||||||
|
|
||||||
This feature is needed for _recreate_table() to work properly.
|
|
||||||
Unfortunately, it's not available in sqlalchemy 0.7.x/0.8.x.
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
data = table.metadata.bind.execute(
|
|
||||||
"""SELECT sql
|
|
||||||
FROM sqlite_master
|
|
||||||
WHERE
|
|
||||||
type='table' AND
|
|
||||||
name=:table_name""",
|
|
||||||
table_name=table.name
|
|
||||||
).fetchone()[0]
|
|
||||||
|
|
||||||
UNIQUE_PATTERN = "CONSTRAINT (\w+) UNIQUE \(([^\)]+)\)"
|
|
||||||
return [
|
|
||||||
UniqueConstraint(
|
|
||||||
*[getattr(table.columns, c.strip(' "')) for c in cols.split(",")],
|
|
||||||
name=name
|
|
||||||
)
|
|
||||||
for name, cols in re.findall(UNIQUE_PATTERN, data)
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
def _recreate_table(self, table, column=None, delta=None, omit_uniques=None):
|
|
||||||
"""Recreate the table properly
|
|
||||||
|
|
||||||
Unlike the corresponding original method of sqlalchemy-migrate this one
|
|
||||||
doesn't drop existing unique constraints when creating a new one.
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
table_name = self.preparer.format_table(table)
|
|
||||||
|
|
||||||
# we remove all indexes so as not to have
|
|
||||||
# problems during copy and re-create
|
|
||||||
for index in table.indexes:
|
|
||||||
index.drop()
|
|
||||||
|
|
||||||
# reflect existing unique constraints
|
|
||||||
for uc in self._get_unique_constraints(table):
|
|
||||||
table.append_constraint(uc)
|
|
||||||
# omit given unique constraints when creating a new table if required
|
|
||||||
table.constraints = set([
|
|
||||||
cons for cons in table.constraints
|
|
||||||
if omit_uniques is None or cons.name not in omit_uniques
|
|
||||||
])
|
|
||||||
|
|
||||||
self.append('ALTER TABLE %s RENAME TO migration_tmp' % table_name)
|
|
||||||
self.execute()
|
|
||||||
|
|
||||||
insertion_string = self._modify_table(table, column, delta)
|
|
||||||
|
|
||||||
table.create(bind=self.connection)
|
|
||||||
self.append(insertion_string % {'table_name': table_name})
|
|
||||||
self.execute()
|
|
||||||
self.append('DROP TABLE migration_tmp')
|
|
||||||
self.execute()
|
|
||||||
|
|
||||||
|
|
||||||
def _visit_migrate_unique_constraint(self, *p, **k):
|
|
||||||
"""Drop the given unique constraint
|
|
||||||
|
|
||||||
The corresponding original method of sqlalchemy-migrate just
|
|
||||||
raises NotImplemented error
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
self.recreate_table(p[0].table, omit_uniques=[p[0].name])
|
|
||||||
|
|
||||||
|
|
||||||
def patch_migrate():
|
|
||||||
"""A workaround for SQLite's inability to alter things
|
|
||||||
|
|
||||||
SQLite abilities to alter tables are very limited (please read
|
|
||||||
http://www.sqlite.org/lang_altertable.html for more details).
|
|
||||||
E. g. one can't drop a column or a constraint in SQLite. The
|
|
||||||
workaround for this is to recreate the original table omitting
|
|
||||||
the corresponding constraint (or column).
|
|
||||||
|
|
||||||
sqlalchemy-migrate library has recreate_table() method that
|
|
||||||
implements this workaround, but it does it wrong:
|
|
||||||
|
|
||||||
- information about unique constraints of a table
|
|
||||||
is not retrieved. So if you have a table with one
|
|
||||||
unique constraint and a migration adding another one
|
|
||||||
you will end up with a table that has only the
|
|
||||||
latter unique constraint, and the former will be lost
|
|
||||||
|
|
||||||
- dropping of unique constraints is not supported at all
|
|
||||||
|
|
||||||
The proper way to fix this is to provide a pull-request to
|
|
||||||
sqlalchemy-migrate, but the project seems to be dead. So we
|
|
||||||
can go on with monkey-patching of the lib at least for now.
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
# this patch is needed to ensure that recreate_table() doesn't drop
|
|
||||||
# existing unique constraints of the table when creating a new one
|
|
||||||
helper_cls = sqlite.SQLiteHelper
|
|
||||||
helper_cls.recreate_table = _recreate_table
|
|
||||||
helper_cls._get_unique_constraints = _get_unique_constraints
|
|
||||||
|
|
||||||
# this patch is needed to be able to drop existing unique constraints
|
|
||||||
constraint_cls = sqlite.SQLiteConstraintDropper
|
|
||||||
constraint_cls.visit_migrate_unique_constraint = \
|
|
||||||
_visit_migrate_unique_constraint
|
|
||||||
constraint_cls.__bases__ = (ansisql.ANSIColumnDropper,
|
|
||||||
sqlite.SQLiteConstraintGenerator)
|
|
||||||
|
|
||||||
|
|
||||||
def db_sync(engine, abs_path, version=None, init_version=0, sanity_check=True):
|
|
||||||
"""Upgrade or downgrade a database.
|
|
||||||
|
|
||||||
Function runs the upgrade() or downgrade() functions in change scripts.
|
|
||||||
|
|
||||||
:param engine: SQLAlchemy engine instance for a given database
|
|
||||||
:param abs_path: Absolute path to migrate repository.
|
|
||||||
:param version: Database will upgrade/downgrade until this version.
|
|
||||||
If None - database will update to the latest
|
|
||||||
available version.
|
|
||||||
:param init_version: Initial database version
|
|
||||||
:param sanity_check: Require schema sanity checking for all tables
|
|
||||||
"""
|
|
||||||
|
|
||||||
if version is not None:
|
|
||||||
try:
|
|
||||||
version = int(version)
|
|
||||||
except ValueError:
|
|
||||||
raise exception.DbMigrationError(
|
|
||||||
message=_("version should be an integer"))
|
|
||||||
|
|
||||||
current_version = db_version(engine, abs_path, init_version)
|
|
||||||
repository = _find_migrate_repo(abs_path)
|
|
||||||
if sanity_check:
|
|
||||||
_db_schema_sanity_check(engine)
|
|
||||||
if version is None or version > current_version:
|
|
||||||
return versioning_api.upgrade(engine, repository, version)
|
|
||||||
else:
|
|
||||||
return versioning_api.downgrade(engine, repository,
|
|
||||||
version)
|
|
||||||
|
|
||||||
|
|
||||||
def _db_schema_sanity_check(engine):
|
|
||||||
"""Ensure all database tables were created with required parameters.
|
|
||||||
|
|
||||||
:param engine: SQLAlchemy engine instance for a given database
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
if engine.name == 'mysql':
|
|
||||||
onlyutf8_sql = ('SELECT TABLE_NAME,TABLE_COLLATION '
|
|
||||||
'from information_schema.TABLES '
|
|
||||||
'where TABLE_SCHEMA=%s and '
|
|
||||||
'TABLE_COLLATION NOT LIKE "%%utf8%%"')
|
|
||||||
|
|
||||||
# NOTE(morganfainberg): exclude the sqlalchemy-migrate and alembic
|
|
||||||
# versioning tables from the tables we need to verify utf8 status on.
|
|
||||||
# Non-standard table names are not supported.
|
|
||||||
EXCLUDED_TABLES = ['migrate_version', 'alembic_version']
|
|
||||||
|
|
||||||
table_names = [res[0] for res in
|
|
||||||
engine.execute(onlyutf8_sql, engine.url.database) if
|
|
||||||
res[0].lower() not in EXCLUDED_TABLES]
|
|
||||||
|
|
||||||
if len(table_names) > 0:
|
|
||||||
raise ValueError(_('Tables "%s" have non utf8 collation, '
|
|
||||||
'please make sure all tables are CHARSET=utf8'
|
|
||||||
) % ','.join(table_names))
|
|
||||||
|
|
||||||
|
|
||||||
def db_version(engine, abs_path, init_version):
|
|
||||||
"""Show the current version of the repository.
|
|
||||||
|
|
||||||
:param engine: SQLAlchemy engine instance for a given database
|
|
||||||
:param abs_path: Absolute path to migrate repository
|
|
||||||
:param version: Initial database version
|
|
||||||
"""
|
|
||||||
repository = _find_migrate_repo(abs_path)
|
|
||||||
try:
|
|
||||||
return versioning_api.db_version(engine, repository)
|
|
||||||
except versioning_exceptions.DatabaseNotControlledError:
|
|
||||||
meta = sqlalchemy.MetaData()
|
|
||||||
meta.reflect(bind=engine)
|
|
||||||
tables = meta.tables
|
|
||||||
if len(tables) == 0 or 'alembic_version' in tables:
|
|
||||||
db_version_control(engine, abs_path, version=init_version)
|
|
||||||
return versioning_api.db_version(engine, repository)
|
|
||||||
else:
|
|
||||||
raise exception.DbMigrationError(
|
|
||||||
message=_(
|
|
||||||
"The database is not under version control, but has "
|
|
||||||
"tables. Please stamp the current version of the schema "
|
|
||||||
"manually."))
|
|
||||||
|
|
||||||
|
|
||||||
def db_version_control(engine, abs_path, version=None):
|
|
||||||
"""Mark a database as under this repository's version control.
|
|
||||||
|
|
||||||
Once a database is under version control, schema changes should
|
|
||||||
only be done via change scripts in this repository.
|
|
||||||
|
|
||||||
:param engine: SQLAlchemy engine instance for a given database
|
|
||||||
:param abs_path: Absolute path to migrate repository
|
|
||||||
:param version: Initial database version
|
|
||||||
"""
|
|
||||||
repository = _find_migrate_repo(abs_path)
|
|
||||||
versioning_api.version_control(engine, repository, version)
|
|
||||||
return version
|
|
||||||
|
|
||||||
|
|
||||||
def _find_migrate_repo(abs_path):
|
|
||||||
"""Get the project's change script repository
|
|
||||||
|
|
||||||
:param abs_path: Absolute path to migrate repository
|
|
||||||
"""
|
|
||||||
if not os.path.exists(abs_path):
|
|
||||||
raise exception.DbMigrationError("Path %s not found" % abs_path)
|
|
||||||
return Repository(abs_path)
|
|
@ -1,119 +0,0 @@
|
|||||||
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
|
||||||
# Copyright 2011 Piston Cloud Computing, Inc.
|
|
||||||
# Copyright 2012 Cloudscaling Group, Inc.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
"""
|
|
||||||
SQLAlchemy models.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import six
|
|
||||||
|
|
||||||
from sqlalchemy import Column, Integer
|
|
||||||
from sqlalchemy import DateTime
|
|
||||||
from sqlalchemy.orm import object_mapper
|
|
||||||
|
|
||||||
from mistral.openstack.common import timeutils
|
|
||||||
|
|
||||||
|
|
||||||
class ModelBase(six.Iterator):
|
|
||||||
"""Base class for models."""
|
|
||||||
__table_initialized__ = False
|
|
||||||
|
|
||||||
def save(self, session):
|
|
||||||
"""Save this object."""
|
|
||||||
|
|
||||||
# NOTE(boris-42): This part of code should be look like:
|
|
||||||
# session.add(self)
|
|
||||||
# session.flush()
|
|
||||||
# But there is a bug in sqlalchemy and eventlet that
|
|
||||||
# raises NoneType exception if there is no running
|
|
||||||
# transaction and rollback is called. As long as
|
|
||||||
# sqlalchemy has this bug we have to create transaction
|
|
||||||
# explicitly.
|
|
||||||
with session.begin(subtransactions=True):
|
|
||||||
session.add(self)
|
|
||||||
session.flush()
|
|
||||||
|
|
||||||
def __setitem__(self, key, value):
|
|
||||||
setattr(self, key, value)
|
|
||||||
|
|
||||||
def __getitem__(self, key):
|
|
||||||
return getattr(self, key)
|
|
||||||
|
|
||||||
def get(self, key, default=None):
|
|
||||||
return getattr(self, key, default)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def _extra_keys(self):
|
|
||||||
"""Specifies custom fields
|
|
||||||
|
|
||||||
Subclasses can override this property to return a list
|
|
||||||
of custom fields that should be included in their dict
|
|
||||||
representation.
|
|
||||||
|
|
||||||
For reference check tests/db/sqlalchemy/test_models.py
|
|
||||||
"""
|
|
||||||
return []
|
|
||||||
|
|
||||||
def __iter__(self):
|
|
||||||
columns = list(dict(object_mapper(self).columns).keys())
|
|
||||||
# NOTE(russellb): Allow models to specify other keys that can be looked
|
|
||||||
# up, beyond the actual db columns. An example would be the 'name'
|
|
||||||
# property for an Instance.
|
|
||||||
columns.extend(self._extra_keys)
|
|
||||||
self._i = iter(columns)
|
|
||||||
return self
|
|
||||||
|
|
||||||
# In Python 3, __next__() has replaced next().
|
|
||||||
def __next__(self):
|
|
||||||
n = six.advance_iterator(self._i)
|
|
||||||
return n, getattr(self, n)
|
|
||||||
|
|
||||||
def next(self):
|
|
||||||
return self.__next__()
|
|
||||||
|
|
||||||
def update(self, values):
|
|
||||||
"""Make the model object behave like a dict."""
|
|
||||||
for k, v in six.iteritems(values):
|
|
||||||
setattr(self, k, v)
|
|
||||||
|
|
||||||
def iteritems(self):
|
|
||||||
"""Make the model object behave like a dict.
|
|
||||||
|
|
||||||
Includes attributes from joins.
|
|
||||||
"""
|
|
||||||
local = dict(self)
|
|
||||||
joined = dict([(k, v) for k, v in six.iteritems(self.__dict__)
|
|
||||||
if not k[0] == '_'])
|
|
||||||
local.update(joined)
|
|
||||||
return six.iteritems(local)
|
|
||||||
|
|
||||||
|
|
||||||
class TimestampMixin(object):
|
|
||||||
created_at = Column(DateTime, default=lambda: timeutils.utcnow())
|
|
||||||
updated_at = Column(DateTime, onupdate=lambda: timeutils.utcnow())
|
|
||||||
|
|
||||||
|
|
||||||
class SoftDeleteMixin(object):
|
|
||||||
deleted_at = Column(DateTime)
|
|
||||||
deleted = Column(Integer, default=0)
|
|
||||||
|
|
||||||
def soft_delete(self, session):
|
|
||||||
"""Mark this object as deleted."""
|
|
||||||
self.deleted = self.id
|
|
||||||
self.deleted_at = timeutils.utcnow()
|
|
||||||
self.save(session=session)
|
|
@ -1,904 +0,0 @@
|
|||||||
# Copyright 2010 United States Government as represented by the
|
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""Session Handling for SQLAlchemy backend.
|
|
||||||
|
|
||||||
Recommended ways to use sessions within this framework:
|
|
||||||
|
|
||||||
* Don't use them explicitly; this is like running with ``AUTOCOMMIT=1``.
|
|
||||||
`model_query()` will implicitly use a session when called without one
|
|
||||||
supplied. This is the ideal situation because it will allow queries
|
|
||||||
to be automatically retried if the database connection is interrupted.
|
|
||||||
|
|
||||||
.. note:: Automatic retry will be enabled in a future patch.
|
|
||||||
|
|
||||||
It is generally fine to issue several queries in a row like this. Even though
|
|
||||||
they may be run in separate transactions and/or separate sessions, each one
|
|
||||||
will see the data from the prior calls. If needed, undo- or rollback-like
|
|
||||||
functionality should be handled at a logical level. For an example, look at
|
|
||||||
the code around quotas and `reservation_rollback()`.
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
def get_foo(context, foo):
|
|
||||||
return (model_query(context, models.Foo).
|
|
||||||
filter_by(foo=foo).
|
|
||||||
first())
|
|
||||||
|
|
||||||
def update_foo(context, id, newfoo):
|
|
||||||
(model_query(context, models.Foo).
|
|
||||||
filter_by(id=id).
|
|
||||||
update({'foo': newfoo}))
|
|
||||||
|
|
||||||
def create_foo(context, values):
|
|
||||||
foo_ref = models.Foo()
|
|
||||||
foo_ref.update(values)
|
|
||||||
foo_ref.save()
|
|
||||||
return foo_ref
|
|
||||||
|
|
||||||
|
|
||||||
* Within the scope of a single method, keep all the reads and writes within
|
|
||||||
the context managed by a single session. In this way, the session's
|
|
||||||
`__exit__` handler will take care of calling `flush()` and `commit()` for
|
|
||||||
you. If using this approach, you should not explicitly call `flush()` or
|
|
||||||
`commit()`. Any error within the context of the session will cause the
|
|
||||||
session to emit a `ROLLBACK`. Database errors like `IntegrityError` will be
|
|
||||||
raised in `session`'s `__exit__` handler, and any try/except within the
|
|
||||||
context managed by `session` will not be triggered. And catching other
|
|
||||||
non-database errors in the session will not trigger the ROLLBACK, so
|
|
||||||
exception handlers should always be outside the session, unless the
|
|
||||||
developer wants to do a partial commit on purpose. If the connection is
|
|
||||||
dropped before this is possible, the database will implicitly roll back the
|
|
||||||
transaction.
|
|
||||||
|
|
||||||
.. note:: Statements in the session scope will not be automatically retried.
|
|
||||||
|
|
||||||
If you create models within the session, they need to be added, but you
|
|
||||||
do not need to call `model.save()`:
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
def create_many_foo(context, foos):
|
|
||||||
session = sessionmaker()
|
|
||||||
with session.begin():
|
|
||||||
for foo in foos:
|
|
||||||
foo_ref = models.Foo()
|
|
||||||
foo_ref.update(foo)
|
|
||||||
session.add(foo_ref)
|
|
||||||
|
|
||||||
def update_bar(context, foo_id, newbar):
|
|
||||||
session = sessionmaker()
|
|
||||||
with session.begin():
|
|
||||||
foo_ref = (model_query(context, models.Foo, session).
|
|
||||||
filter_by(id=foo_id).
|
|
||||||
first())
|
|
||||||
(model_query(context, models.Bar, session).
|
|
||||||
filter_by(id=foo_ref['bar_id']).
|
|
||||||
update({'bar': newbar}))
|
|
||||||
|
|
||||||
.. note:: `update_bar` is a trivially simple example of using
|
|
||||||
``with session.begin``. Whereas `create_many_foo` is a good example of
|
|
||||||
when a transaction is needed, it is always best to use as few queries as
|
|
||||||
possible.
|
|
||||||
|
|
||||||
The two queries in `update_bar` can be better expressed using a single query
|
|
||||||
which avoids the need for an explicit transaction. It can be expressed like
|
|
||||||
so:
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
def update_bar(context, foo_id, newbar):
|
|
||||||
subq = (model_query(context, models.Foo.id).
|
|
||||||
filter_by(id=foo_id).
|
|
||||||
limit(1).
|
|
||||||
subquery())
|
|
||||||
(model_query(context, models.Bar).
|
|
||||||
filter_by(id=subq.as_scalar()).
|
|
||||||
update({'bar': newbar}))
|
|
||||||
|
|
||||||
For reference, this emits approximately the following SQL statement:
|
|
||||||
|
|
||||||
.. code:: sql
|
|
||||||
|
|
||||||
UPDATE bar SET bar = ${newbar}
|
|
||||||
WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1);
|
|
||||||
|
|
||||||
.. note:: `create_duplicate_foo` is a trivially simple example of catching an
|
|
||||||
exception while using ``with session.begin``. Here create two duplicate
|
|
||||||
instances with same primary key, must catch the exception out of context
|
|
||||||
managed by a single session:
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
def create_duplicate_foo(context):
|
|
||||||
foo1 = models.Foo()
|
|
||||||
foo2 = models.Foo()
|
|
||||||
foo1.id = foo2.id = 1
|
|
||||||
session = sessionmaker()
|
|
||||||
try:
|
|
||||||
with session.begin():
|
|
||||||
session.add(foo1)
|
|
||||||
session.add(foo2)
|
|
||||||
except exception.DBDuplicateEntry as e:
|
|
||||||
handle_error(e)
|
|
||||||
|
|
||||||
* Passing an active session between methods. Sessions should only be passed
|
|
||||||
to private methods. The private method must use a subtransaction; otherwise
|
|
||||||
SQLAlchemy will throw an error when you call `session.begin()` on an existing
|
|
||||||
transaction. Public methods should not accept a session parameter and should
|
|
||||||
not be involved in sessions within the caller's scope.
|
|
||||||
|
|
||||||
Note that this incurs more overhead in SQLAlchemy than the above means
|
|
||||||
due to nesting transactions, and it is not possible to implicitly retry
|
|
||||||
failed database operations when using this approach.
|
|
||||||
|
|
||||||
This also makes code somewhat more difficult to read and debug, because a
|
|
||||||
single database transaction spans more than one method. Error handling
|
|
||||||
becomes less clear in this situation. When this is needed for code clarity,
|
|
||||||
it should be clearly documented.
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
def myfunc(foo):
|
|
||||||
session = sessionmaker()
|
|
||||||
with session.begin():
|
|
||||||
# do some database things
|
|
||||||
bar = _private_func(foo, session)
|
|
||||||
return bar
|
|
||||||
|
|
||||||
def _private_func(foo, session=None):
|
|
||||||
if not session:
|
|
||||||
session = sessionmaker()
|
|
||||||
with session.begin(subtransaction=True):
|
|
||||||
# do some other database things
|
|
||||||
return bar
|
|
||||||
|
|
||||||
|
|
||||||
There are some things which it is best to avoid:
|
|
||||||
|
|
||||||
* Don't keep a transaction open any longer than necessary.
|
|
||||||
|
|
||||||
This means that your ``with session.begin()`` block should be as short
|
|
||||||
as possible, while still containing all the related calls for that
|
|
||||||
transaction.
|
|
||||||
|
|
||||||
* Avoid ``with_lockmode('UPDATE')`` when possible.
|
|
||||||
|
|
||||||
In MySQL/InnoDB, when a ``SELECT ... FOR UPDATE`` query does not match
|
|
||||||
any rows, it will take a gap-lock. This is a form of write-lock on the
|
|
||||||
"gap" where no rows exist, and prevents any other writes to that space.
|
|
||||||
This can effectively prevent any INSERT into a table by locking the gap
|
|
||||||
at the end of the index. Similar problems will occur if the SELECT FOR UPDATE
|
|
||||||
has an overly broad WHERE clause, or doesn't properly use an index.
|
|
||||||
|
|
||||||
One idea proposed at ODS Fall '12 was to use a normal SELECT to test the
|
|
||||||
number of rows matching a query, and if only one row is returned,
|
|
||||||
then issue the SELECT FOR UPDATE.
|
|
||||||
|
|
||||||
The better long-term solution is to use
|
|
||||||
``INSERT .. ON DUPLICATE KEY UPDATE``.
|
|
||||||
However, this can not be done until the "deleted" columns are removed and
|
|
||||||
proper UNIQUE constraints are added to the tables.
|
|
||||||
|
|
||||||
|
|
||||||
Enabling soft deletes:
|
|
||||||
|
|
||||||
* To use/enable soft-deletes, the `SoftDeleteMixin` must be added
|
|
||||||
to your model class. For example:
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
class NovaBase(models.SoftDeleteMixin, models.ModelBase):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
Efficient use of soft deletes:
|
|
||||||
|
|
||||||
* There are two possible ways to mark a record as deleted:
|
|
||||||
`model.soft_delete()` and `query.soft_delete()`.
|
|
||||||
|
|
||||||
The `model.soft_delete()` method works with a single already-fetched entry.
|
|
||||||
`query.soft_delete()` makes only one db request for all entries that
|
|
||||||
correspond to the query.
|
|
||||||
|
|
||||||
* In almost all cases you should use `query.soft_delete()`. Some examples:
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
def soft_delete_bar():
|
|
||||||
count = model_query(BarModel).find(some_condition).soft_delete()
|
|
||||||
if count == 0:
|
|
||||||
raise Exception("0 entries were soft deleted")
|
|
||||||
|
|
||||||
def complex_soft_delete_with_synchronization_bar(session=None):
|
|
||||||
if session is None:
|
|
||||||
session = sessionmaker()
|
|
||||||
with session.begin(subtransactions=True):
|
|
||||||
count = (model_query(BarModel).
|
|
||||||
find(some_condition).
|
|
||||||
soft_delete(synchronize_session=True))
|
|
||||||
# Here synchronize_session is required, because we
|
|
||||||
# don't know what is going on in outer session.
|
|
||||||
if count == 0:
|
|
||||||
raise Exception("0 entries were soft deleted")
|
|
||||||
|
|
||||||
* There is only one situation where `model.soft_delete()` is appropriate: when
|
|
||||||
you fetch a single record, work with it, and mark it as deleted in the same
|
|
||||||
transaction.
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
def soft_delete_bar_model():
|
|
||||||
session = sessionmaker()
|
|
||||||
with session.begin():
|
|
||||||
bar_ref = model_query(BarModel).find(some_condition).first()
|
|
||||||
# Work with bar_ref
|
|
||||||
bar_ref.soft_delete(session=session)
|
|
||||||
|
|
||||||
However, if you need to work with all entries that correspond to query and
|
|
||||||
then soft delete them you should use the `query.soft_delete()` method:
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
def soft_delete_multi_models():
|
|
||||||
session = sessionmaker()
|
|
||||||
with session.begin():
|
|
||||||
query = (model_query(BarModel, session=session).
|
|
||||||
find(some_condition))
|
|
||||||
model_refs = query.all()
|
|
||||||
# Work with model_refs
|
|
||||||
query.soft_delete(synchronize_session=False)
|
|
||||||
# synchronize_session=False should be set if there is no outer
|
|
||||||
# session and these entries are not used after this.
|
|
||||||
|
|
||||||
When working with many rows, it is very important to use query.soft_delete,
|
|
||||||
which issues a single query. Using `model.soft_delete()`, as in the following
|
|
||||||
example, is very inefficient.
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
for bar_ref in bar_refs:
|
|
||||||
bar_ref.soft_delete(session=session)
|
|
||||||
# This will produce count(bar_refs) db requests.
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
import functools
|
|
||||||
import logging
|
|
||||||
import re
|
|
||||||
import time
|
|
||||||
|
|
||||||
import six
|
|
||||||
from sqlalchemy import exc as sqla_exc
|
|
||||||
from sqlalchemy.interfaces import PoolListener
|
|
||||||
import sqlalchemy.orm
|
|
||||||
from sqlalchemy.pool import NullPool, StaticPool
|
|
||||||
from sqlalchemy.sql.expression import literal_column
|
|
||||||
|
|
||||||
from mistral.openstack.common.db import exception
|
|
||||||
from mistral.openstack.common.gettextutils import _LE, _LW
|
|
||||||
from mistral.openstack.common import timeutils
|
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class SqliteForeignKeysListener(PoolListener):
|
|
||||||
"""Ensures that the foreign key constraints are enforced in SQLite.
|
|
||||||
|
|
||||||
The foreign key constraints are disabled by default in SQLite,
|
|
||||||
so the foreign key constraints will be enabled here for every
|
|
||||||
database connection
|
|
||||||
"""
|
|
||||||
def connect(self, dbapi_con, con_record):
|
|
||||||
dbapi_con.execute('pragma foreign_keys=ON')
|
|
||||||
|
|
||||||
|
|
||||||
# note(boris-42): In current versions of DB backends unique constraint
|
|
||||||
# violation messages follow the structure:
|
|
||||||
#
|
|
||||||
# sqlite:
|
|
||||||
# 1 column - (IntegrityError) column c1 is not unique
|
|
||||||
# N columns - (IntegrityError) column c1, c2, ..., N are not unique
|
|
||||||
#
|
|
||||||
# sqlite since 3.7.16:
|
|
||||||
# 1 column - (IntegrityError) UNIQUE constraint failed: tbl.k1
|
|
||||||
#
|
|
||||||
# N columns - (IntegrityError) UNIQUE constraint failed: tbl.k1, tbl.k2
|
|
||||||
#
|
|
||||||
# postgres:
|
|
||||||
# 1 column - (IntegrityError) duplicate key value violates unique
|
|
||||||
# constraint "users_c1_key"
|
|
||||||
# N columns - (IntegrityError) duplicate key value violates unique
|
|
||||||
# constraint "name_of_our_constraint"
|
|
||||||
#
|
|
||||||
# mysql:
|
|
||||||
# 1 column - (IntegrityError) (1062, "Duplicate entry 'value_of_c1' for key
|
|
||||||
# 'c1'")
|
|
||||||
# N columns - (IntegrityError) (1062, "Duplicate entry 'values joined
|
|
||||||
# with -' for key 'name_of_our_constraint'")
|
|
||||||
#
|
|
||||||
# ibm_db_sa:
|
|
||||||
# N columns - (IntegrityError) SQL0803N One or more values in the INSERT
|
|
||||||
# statement, UPDATE statement, or foreign key update caused by a
|
|
||||||
# DELETE statement are not valid because the primary key, unique
|
|
||||||
# constraint or unique index identified by "2" constrains table
|
|
||||||
# "NOVA.KEY_PAIRS" from having duplicate values for the index
|
|
||||||
# key.
|
|
||||||
_DUP_KEY_RE_DB = {
|
|
||||||
"sqlite": (re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"),
|
|
||||||
re.compile(r"^.*UNIQUE\s+constraint\s+failed:\s+(.+)$")),
|
|
||||||
"postgresql": (re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"),),
|
|
||||||
"mysql": (re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$"),),
|
|
||||||
"ibm_db_sa": (re.compile(r"^.*SQL0803N.*$"),),
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def _raise_if_duplicate_entry_error(integrity_error, engine_name):
|
|
||||||
"""Raise exception if two entries are duplicated.
|
|
||||||
|
|
||||||
In this function will be raised DBDuplicateEntry exception if integrity
|
|
||||||
error wrap unique constraint violation.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def get_columns_from_uniq_cons_or_name(columns):
|
|
||||||
# note(vsergeyev): UniqueConstraint name convention: "uniq_t0c10c2"
|
|
||||||
# where `t` it is table name and columns `c1`, `c2`
|
|
||||||
# are in UniqueConstraint.
|
|
||||||
uniqbase = "uniq_"
|
|
||||||
if not columns.startswith(uniqbase):
|
|
||||||
if engine_name == "postgresql":
|
|
||||||
return [columns[columns.index("_") + 1:columns.rindex("_")]]
|
|
||||||
return [columns]
|
|
||||||
return columns[len(uniqbase):].split("0")[1:]
|
|
||||||
|
|
||||||
if engine_name not in ("ibm_db_sa", "mysql", "sqlite", "postgresql"):
|
|
||||||
return
|
|
||||||
|
|
||||||
# FIXME(johannes): The usage of the .message attribute has been
|
|
||||||
# deprecated since Python 2.6. However, the exceptions raised by
|
|
||||||
# SQLAlchemy can differ when using unicode() and accessing .message.
|
|
||||||
# An audit across all three supported engines will be necessary to
|
|
||||||
# ensure there are no regressions.
|
|
||||||
for pattern in _DUP_KEY_RE_DB[engine_name]:
|
|
||||||
match = pattern.match(integrity_error.message)
|
|
||||||
if match:
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
return
|
|
||||||
|
|
||||||
# NOTE(mriedem): The ibm_db_sa integrity error message doesn't provide the
|
|
||||||
# columns so we have to omit that from the DBDuplicateEntry error.
|
|
||||||
columns = ''
|
|
||||||
|
|
||||||
if engine_name != 'ibm_db_sa':
|
|
||||||
columns = match.group(1)
|
|
||||||
|
|
||||||
if engine_name == "sqlite":
|
|
||||||
columns = [c.split('.')[-1] for c in columns.strip().split(", ")]
|
|
||||||
else:
|
|
||||||
columns = get_columns_from_uniq_cons_or_name(columns)
|
|
||||||
raise exception.DBDuplicateEntry(columns, integrity_error)
|
|
||||||
|
|
||||||
|
|
||||||
# NOTE(comstud): In current versions of DB backends, Deadlock violation
|
|
||||||
# messages follow the structure:
|
|
||||||
#
|
|
||||||
# mysql:
|
|
||||||
# (OperationalError) (1213, 'Deadlock found when trying to get lock; try '
|
|
||||||
# 'restarting transaction') <query_str> <query_args>
|
|
||||||
_DEADLOCK_RE_DB = {
|
|
||||||
"mysql": re.compile(r"^.*\(1213, 'Deadlock.*")
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def _raise_if_deadlock_error(operational_error, engine_name):
|
|
||||||
"""Raise exception on deadlock condition.
|
|
||||||
|
|
||||||
Raise DBDeadlock exception if OperationalError contains a Deadlock
|
|
||||||
condition.
|
|
||||||
"""
|
|
||||||
re = _DEADLOCK_RE_DB.get(engine_name)
|
|
||||||
if re is None:
|
|
||||||
return
|
|
||||||
# FIXME(johannes): The usage of the .message attribute has been
|
|
||||||
# deprecated since Python 2.6. However, the exceptions raised by
|
|
||||||
# SQLAlchemy can differ when using unicode() and accessing .message.
|
|
||||||
# An audit across all three supported engines will be necessary to
|
|
||||||
# ensure there are no regressions.
|
|
||||||
m = re.match(operational_error.message)
|
|
||||||
if not m:
|
|
||||||
return
|
|
||||||
raise exception.DBDeadlock(operational_error)
|
|
||||||
|
|
||||||
|
|
||||||
def _wrap_db_error(f):
|
|
||||||
@functools.wraps(f)
|
|
||||||
def _wrap(self, *args, **kwargs):
|
|
||||||
try:
|
|
||||||
assert issubclass(
|
|
||||||
self.__class__, sqlalchemy.orm.session.Session
|
|
||||||
), ('_wrap_db_error() can only be applied to methods of '
|
|
||||||
'subclasses of sqlalchemy.orm.session.Session.')
|
|
||||||
|
|
||||||
return f(self, *args, **kwargs)
|
|
||||||
except UnicodeEncodeError:
|
|
||||||
raise exception.DBInvalidUnicodeParameter()
|
|
||||||
except sqla_exc.OperationalError as e:
|
|
||||||
_raise_if_db_connection_lost(e, self.bind)
|
|
||||||
_raise_if_deadlock_error(e, self.bind.dialect.name)
|
|
||||||
# NOTE(comstud): A lot of code is checking for OperationalError
|
|
||||||
# so let's not wrap it for now.
|
|
||||||
raise
|
|
||||||
# note(boris-42): We should catch unique constraint violation and
|
|
||||||
# wrap it by our own DBDuplicateEntry exception. Unique constraint
|
|
||||||
# violation is wrapped by IntegrityError.
|
|
||||||
except sqla_exc.IntegrityError as e:
|
|
||||||
# note(boris-42): SqlAlchemy doesn't unify errors from different
|
|
||||||
# DBs so we must do this. Also in some tables (for example
|
|
||||||
# instance_types) there are more than one unique constraint. This
|
|
||||||
# means we should get names of columns, which values violate
|
|
||||||
# unique constraint, from error message.
|
|
||||||
_raise_if_duplicate_entry_error(e, self.bind.dialect.name)
|
|
||||||
raise exception.DBError(e)
|
|
||||||
except Exception as e:
|
|
||||||
LOG.exception(_LE('DB exception wrapped.'))
|
|
||||||
raise exception.DBError(e)
|
|
||||||
return _wrap
|
|
||||||
|
|
||||||
|
|
||||||
def _synchronous_switch_listener(dbapi_conn, connection_rec):
|
|
||||||
"""Switch sqlite connections to non-synchronous mode."""
|
|
||||||
dbapi_conn.execute("PRAGMA synchronous = OFF")
|
|
||||||
|
|
||||||
|
|
||||||
def _add_regexp_listener(dbapi_con, con_record):
|
|
||||||
"""Add REGEXP function to sqlite connections."""
|
|
||||||
|
|
||||||
def regexp(expr, item):
|
|
||||||
reg = re.compile(expr)
|
|
||||||
return reg.search(six.text_type(item)) is not None
|
|
||||||
dbapi_con.create_function('regexp', 2, regexp)
|
|
||||||
|
|
||||||
|
|
||||||
def _thread_yield(dbapi_con, con_record):
|
|
||||||
"""Ensure other greenthreads get a chance to be executed.
|
|
||||||
|
|
||||||
If we use eventlet.monkey_patch(), eventlet.greenthread.sleep(0) will
|
|
||||||
execute instead of time.sleep(0).
|
|
||||||
Force a context switch. With common database backends (eg MySQLdb and
|
|
||||||
sqlite), there is no implicit yield caused by network I/O since they are
|
|
||||||
implemented by C libraries that eventlet cannot monkey patch.
|
|
||||||
"""
|
|
||||||
time.sleep(0)
|
|
||||||
|
|
||||||
|
|
||||||
def _ping_listener(engine, dbapi_conn, connection_rec, connection_proxy):
|
|
||||||
"""Ensures that MySQL, PostgreSQL or DB2 connections are alive.
|
|
||||||
|
|
||||||
Borrowed from:
|
|
||||||
http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f
|
|
||||||
"""
|
|
||||||
cursor = dbapi_conn.cursor()
|
|
||||||
try:
|
|
||||||
ping_sql = 'select 1'
|
|
||||||
if engine.name == 'ibm_db_sa':
|
|
||||||
# DB2 requires a table expression
|
|
||||||
ping_sql = 'select 1 from (values (1)) AS t1'
|
|
||||||
cursor.execute(ping_sql)
|
|
||||||
except Exception as ex:
|
|
||||||
if engine.dialect.is_disconnect(ex, dbapi_conn, cursor):
|
|
||||||
msg = _LW('Database server has gone away: %s') % ex
|
|
||||||
LOG.warning(msg)
|
|
||||||
|
|
||||||
# if the database server has gone away, all connections in the pool
|
|
||||||
# have become invalid and we can safely close all of them here,
|
|
||||||
# rather than waste time on checking of every single connection
|
|
||||||
engine.dispose()
|
|
||||||
|
|
||||||
# this will be handled by SQLAlchemy and will force it to create
|
|
||||||
# a new connection and retry the original action
|
|
||||||
raise sqla_exc.DisconnectionError(msg)
|
|
||||||
else:
|
|
||||||
raise
|
|
||||||
|
|
||||||
|
|
||||||
def _set_session_sql_mode(dbapi_con, connection_rec, sql_mode=None):
|
|
||||||
"""Set the sql_mode session variable.
|
|
||||||
|
|
||||||
MySQL supports several server modes. The default is None, but sessions
|
|
||||||
may choose to enable server modes like TRADITIONAL, ANSI,
|
|
||||||
several STRICT_* modes and others.
|
|
||||||
|
|
||||||
Note: passing in '' (empty string) for sql_mode clears
|
|
||||||
the SQL mode for the session, overriding a potentially set
|
|
||||||
server default.
|
|
||||||
"""
|
|
||||||
|
|
||||||
cursor = dbapi_con.cursor()
|
|
||||||
cursor.execute("SET SESSION sql_mode = %s", [sql_mode])
|
|
||||||
|
|
||||||
|
|
||||||
def _mysql_get_effective_sql_mode(engine):
|
|
||||||
"""Returns the effective SQL mode for connections from the engine pool.
|
|
||||||
|
|
||||||
Returns ``None`` if the mode isn't available, otherwise returns the mode.
|
|
||||||
|
|
||||||
"""
|
|
||||||
# Get the real effective SQL mode. Even when unset by
|
|
||||||
# our own config, the server may still be operating in a specific
|
|
||||||
# SQL mode as set by the server configuration.
|
|
||||||
# Also note that the checkout listener will be called on execute to
|
|
||||||
# set the mode if it's registered.
|
|
||||||
row = engine.execute("SHOW VARIABLES LIKE 'sql_mode'").fetchone()
|
|
||||||
if row is None:
|
|
||||||
return
|
|
||||||
return row[1]
|
|
||||||
|
|
||||||
|
|
||||||
def _mysql_check_effective_sql_mode(engine):
|
|
||||||
"""Logs a message based on the effective SQL mode for MySQL connections."""
|
|
||||||
realmode = _mysql_get_effective_sql_mode(engine)
|
|
||||||
|
|
||||||
if realmode is None:
|
|
||||||
LOG.warning(_LW('Unable to detect effective SQL mode'))
|
|
||||||
return
|
|
||||||
|
|
||||||
LOG.debug('MySQL server mode set to %s', realmode)
|
|
||||||
# 'TRADITIONAL' mode enables several other modes, so
|
|
||||||
# we need a substring match here
|
|
||||||
if not ('TRADITIONAL' in realmode.upper() or
|
|
||||||
'STRICT_ALL_TABLES' in realmode.upper()):
|
|
||||||
LOG.warning(_LW("MySQL SQL mode is '%s', "
|
|
||||||
"consider enabling TRADITIONAL or STRICT_ALL_TABLES"),
|
|
||||||
realmode)
|
|
||||||
|
|
||||||
|
|
||||||
def _mysql_set_mode_callback(engine, sql_mode):
|
|
||||||
if sql_mode is not None:
|
|
||||||
mode_callback = functools.partial(_set_session_sql_mode,
|
|
||||||
sql_mode=sql_mode)
|
|
||||||
sqlalchemy.event.listen(engine, 'connect', mode_callback)
|
|
||||||
_mysql_check_effective_sql_mode(engine)
|
|
||||||
|
|
||||||
|
|
||||||
def _is_db_connection_error(args):
|
|
||||||
"""Return True if error in connecting to db."""
|
|
||||||
# NOTE(adam_g): This is currently MySQL specific and needs to be extended
|
|
||||||
# to support Postgres and others.
|
|
||||||
# For the db2, the error code is -30081 since the db2 is still not ready
|
|
||||||
conn_err_codes = ('2002', '2003', '2006', '2013', '-30081')
|
|
||||||
for err_code in conn_err_codes:
|
|
||||||
if args.find(err_code) != -1:
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def _raise_if_db_connection_lost(error, engine):
|
|
||||||
# NOTE(vsergeyev): Function is_disconnect(e, connection, cursor)
|
|
||||||
# requires connection and cursor in incoming parameters,
|
|
||||||
# but we have no possibility to create connection if DB
|
|
||||||
# is not available, so in such case reconnect fails.
|
|
||||||
# But is_disconnect() ignores these parameters, so it
|
|
||||||
# makes sense to pass to function None as placeholder
|
|
||||||
# instead of connection and cursor.
|
|
||||||
if engine.dialect.is_disconnect(error, None, None):
|
|
||||||
raise exception.DBConnectionError(error)
|
|
||||||
|
|
||||||
|
|
||||||
def create_engine(sql_connection, sqlite_fk=False, mysql_sql_mode=None,
|
|
||||||
idle_timeout=3600,
|
|
||||||
connection_debug=0, max_pool_size=None, max_overflow=None,
|
|
||||||
pool_timeout=None, sqlite_synchronous=True,
|
|
||||||
connection_trace=False, max_retries=10, retry_interval=10):
|
|
||||||
"""Return a new SQLAlchemy engine."""
|
|
||||||
|
|
||||||
connection_dict = sqlalchemy.engine.url.make_url(sql_connection)
|
|
||||||
|
|
||||||
engine_args = {
|
|
||||||
"pool_recycle": idle_timeout,
|
|
||||||
'convert_unicode': True,
|
|
||||||
}
|
|
||||||
|
|
||||||
logger = logging.getLogger('sqlalchemy.engine')
|
|
||||||
|
|
||||||
# Map SQL debug level to Python log level
|
|
||||||
if connection_debug >= 100:
|
|
||||||
logger.setLevel(logging.DEBUG)
|
|
||||||
elif connection_debug >= 50:
|
|
||||||
logger.setLevel(logging.INFO)
|
|
||||||
else:
|
|
||||||
logger.setLevel(logging.WARNING)
|
|
||||||
|
|
||||||
if "sqlite" in connection_dict.drivername:
|
|
||||||
if sqlite_fk:
|
|
||||||
engine_args["listeners"] = [SqliteForeignKeysListener()]
|
|
||||||
engine_args["poolclass"] = NullPool
|
|
||||||
|
|
||||||
if sql_connection == "sqlite://":
|
|
||||||
engine_args["poolclass"] = StaticPool
|
|
||||||
engine_args["connect_args"] = {'check_same_thread': False}
|
|
||||||
else:
|
|
||||||
if max_pool_size is not None:
|
|
||||||
engine_args['pool_size'] = max_pool_size
|
|
||||||
if max_overflow is not None:
|
|
||||||
engine_args['max_overflow'] = max_overflow
|
|
||||||
if pool_timeout is not None:
|
|
||||||
engine_args['pool_timeout'] = pool_timeout
|
|
||||||
|
|
||||||
engine = sqlalchemy.create_engine(sql_connection, **engine_args)
|
|
||||||
|
|
||||||
sqlalchemy.event.listen(engine, 'checkin', _thread_yield)
|
|
||||||
|
|
||||||
if engine.name in ('ibm_db_sa', 'mysql', 'postgresql'):
|
|
||||||
ping_callback = functools.partial(_ping_listener, engine)
|
|
||||||
sqlalchemy.event.listen(engine, 'checkout', ping_callback)
|
|
||||||
if engine.name == 'mysql':
|
|
||||||
if mysql_sql_mode:
|
|
||||||
_mysql_set_mode_callback(engine, mysql_sql_mode)
|
|
||||||
elif 'sqlite' in connection_dict.drivername:
|
|
||||||
if not sqlite_synchronous:
|
|
||||||
sqlalchemy.event.listen(engine, 'connect',
|
|
||||||
_synchronous_switch_listener)
|
|
||||||
sqlalchemy.event.listen(engine, 'connect', _add_regexp_listener)
|
|
||||||
|
|
||||||
if connection_trace and engine.dialect.dbapi.__name__ == 'MySQLdb':
|
|
||||||
_patch_mysqldb_with_stacktrace_comments()
|
|
||||||
|
|
||||||
try:
|
|
||||||
engine.connect()
|
|
||||||
except sqla_exc.OperationalError as e:
|
|
||||||
if not _is_db_connection_error(e.args[0]):
|
|
||||||
raise
|
|
||||||
|
|
||||||
remaining = max_retries
|
|
||||||
if remaining == -1:
|
|
||||||
remaining = 'infinite'
|
|
||||||
while True:
|
|
||||||
msg = _LW('SQL connection failed. %s attempts left.')
|
|
||||||
LOG.warning(msg % remaining)
|
|
||||||
if remaining != 'infinite':
|
|
||||||
remaining -= 1
|
|
||||||
time.sleep(retry_interval)
|
|
||||||
try:
|
|
||||||
engine.connect()
|
|
||||||
break
|
|
||||||
except sqla_exc.OperationalError as e:
|
|
||||||
if (remaining != 'infinite' and remaining == 0) or \
|
|
||||||
not _is_db_connection_error(e.args[0]):
|
|
||||||
raise
|
|
||||||
return engine
|
|
||||||
|
|
||||||
|
|
||||||
class Query(sqlalchemy.orm.query.Query):
|
|
||||||
"""Subclass of sqlalchemy.query with soft_delete() method."""
|
|
||||||
def soft_delete(self, synchronize_session='evaluate'):
|
|
||||||
return self.update({'deleted': literal_column('id'),
|
|
||||||
'updated_at': literal_column('updated_at'),
|
|
||||||
'deleted_at': timeutils.utcnow()},
|
|
||||||
synchronize_session=synchronize_session)
|
|
||||||
|
|
||||||
|
|
||||||
class Session(sqlalchemy.orm.session.Session):
|
|
||||||
"""Custom Session class to avoid SqlAlchemy Session monkey patching."""
|
|
||||||
@_wrap_db_error
|
|
||||||
def query(self, *args, **kwargs):
|
|
||||||
return super(Session, self).query(*args, **kwargs)
|
|
||||||
|
|
||||||
@_wrap_db_error
|
|
||||||
def flush(self, *args, **kwargs):
|
|
||||||
return super(Session, self).flush(*args, **kwargs)
|
|
||||||
|
|
||||||
@_wrap_db_error
|
|
||||||
def execute(self, *args, **kwargs):
|
|
||||||
return super(Session, self).execute(*args, **kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
def get_maker(engine, autocommit=True, expire_on_commit=False):
|
|
||||||
"""Return a SQLAlchemy sessionmaker using the given engine."""
|
|
||||||
return sqlalchemy.orm.sessionmaker(bind=engine,
|
|
||||||
class_=Session,
|
|
||||||
autocommit=autocommit,
|
|
||||||
expire_on_commit=expire_on_commit,
|
|
||||||
query_cls=Query)
|
|
||||||
|
|
||||||
|
|
||||||
def _patch_mysqldb_with_stacktrace_comments():
|
|
||||||
"""Adds current stack trace as a comment in queries.
|
|
||||||
|
|
||||||
Patches MySQLdb.cursors.BaseCursor._do_query.
|
|
||||||
"""
|
|
||||||
import MySQLdb.cursors
|
|
||||||
import traceback
|
|
||||||
|
|
||||||
old_mysql_do_query = MySQLdb.cursors.BaseCursor._do_query
|
|
||||||
|
|
||||||
def _do_query(self, q):
|
|
||||||
stack = ''
|
|
||||||
for filename, line, method, function in traceback.extract_stack():
|
|
||||||
# exclude various common things from trace
|
|
||||||
if filename.endswith('session.py') and method == '_do_query':
|
|
||||||
continue
|
|
||||||
if filename.endswith('api.py') and method == 'wrapper':
|
|
||||||
continue
|
|
||||||
if filename.endswith('utils.py') and method == '_inner':
|
|
||||||
continue
|
|
||||||
if filename.endswith('exception.py') and method == '_wrap':
|
|
||||||
continue
|
|
||||||
# db/api is just a wrapper around db/sqlalchemy/api
|
|
||||||
if filename.endswith('db/api.py'):
|
|
||||||
continue
|
|
||||||
# only trace inside mistral
|
|
||||||
index = filename.rfind('mistral')
|
|
||||||
if index == -1:
|
|
||||||
continue
|
|
||||||
stack += "File:%s:%s Method:%s() Line:%s | " \
|
|
||||||
% (filename[index:], line, method, function)
|
|
||||||
|
|
||||||
# strip trailing " | " from stack
|
|
||||||
if stack:
|
|
||||||
stack = stack[:-3]
|
|
||||||
qq = "%s /* %s */" % (q, stack)
|
|
||||||
else:
|
|
||||||
qq = q
|
|
||||||
old_mysql_do_query(self, qq)
|
|
||||||
|
|
||||||
setattr(MySQLdb.cursors.BaseCursor, '_do_query', _do_query)
|
|
||||||
|
|
||||||
|
|
||||||
class EngineFacade(object):
|
|
||||||
"""A helper class for removing of global engine instances from mistral.db.
|
|
||||||
|
|
||||||
As a library, mistral.db can't decide where to store/when to create engine
|
|
||||||
and sessionmaker instances, so this must be left for a target application.
|
|
||||||
|
|
||||||
On the other hand, in order to simplify the adoption of mistral.db changes,
|
|
||||||
we'll provide a helper class, which creates engine and sessionmaker
|
|
||||||
on its instantiation and provides get_engine()/get_session() methods
|
|
||||||
that are compatible with corresponding utility functions that currently
|
|
||||||
exist in target projects, e.g. in Nova.
|
|
||||||
|
|
||||||
engine/sessionmaker instances will still be global (and they are meant to
|
|
||||||
be global), but they will be stored in the app context, rather that in the
|
|
||||||
mistral.db context.
|
|
||||||
|
|
||||||
Note: using of this helper is completely optional and you are encouraged to
|
|
||||||
integrate engine/sessionmaker instances into your apps any way you like
|
|
||||||
(e.g. one might want to bind a session to a request context). Two important
|
|
||||||
things to remember:
|
|
||||||
|
|
||||||
1. An Engine instance is effectively a pool of DB connections, so it's
|
|
||||||
meant to be shared (and it's thread-safe).
|
|
||||||
2. A Session instance is not meant to be shared and represents a DB
|
|
||||||
transactional context (i.e. it's not thread-safe). sessionmaker is
|
|
||||||
a factory of sessions.
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, sql_connection,
|
|
||||||
sqlite_fk=False, autocommit=True,
|
|
||||||
expire_on_commit=False, **kwargs):
|
|
||||||
"""Initialize engine and sessionmaker instances.
|
|
||||||
|
|
||||||
:param sqlite_fk: enable foreign keys in SQLite
|
|
||||||
:type sqlite_fk: bool
|
|
||||||
|
|
||||||
:param autocommit: use autocommit mode for created Session instances
|
|
||||||
:type autocommit: bool
|
|
||||||
|
|
||||||
:param expire_on_commit: expire session objects on commit
|
|
||||||
:type expire_on_commit: bool
|
|
||||||
|
|
||||||
Keyword arguments:
|
|
||||||
|
|
||||||
:keyword mysql_sql_mode: the SQL mode to be used for MySQL sessions.
|
|
||||||
(defaults to TRADITIONAL)
|
|
||||||
:keyword idle_timeout: timeout before idle sql connections are reaped
|
|
||||||
(defaults to 3600)
|
|
||||||
:keyword connection_debug: verbosity of SQL debugging information.
|
|
||||||
0=None, 100=Everything (defaults to 0)
|
|
||||||
:keyword max_pool_size: maximum number of SQL connections to keep open
|
|
||||||
in a pool (defaults to SQLAlchemy settings)
|
|
||||||
:keyword max_overflow: if set, use this value for max_overflow with
|
|
||||||
sqlalchemy (defaults to SQLAlchemy settings)
|
|
||||||
:keyword pool_timeout: if set, use this value for pool_timeout with
|
|
||||||
sqlalchemy (defaults to SQLAlchemy settings)
|
|
||||||
:keyword sqlite_synchronous: if True, SQLite uses synchronous mode
|
|
||||||
(defaults to True)
|
|
||||||
:keyword connection_trace: add python stack traces to SQL as comment
|
|
||||||
strings (defaults to False)
|
|
||||||
:keyword max_retries: maximum db connection retries during startup.
|
|
||||||
(setting -1 implies an infinite retry count)
|
|
||||||
(defaults to 10)
|
|
||||||
:keyword retry_interval: interval between retries of opening a sql
|
|
||||||
connection (defaults to 10)
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
super(EngineFacade, self).__init__()
|
|
||||||
|
|
||||||
self._engine = create_engine(
|
|
||||||
sql_connection=sql_connection,
|
|
||||||
sqlite_fk=sqlite_fk,
|
|
||||||
mysql_sql_mode=kwargs.get('mysql_sql_mode', 'TRADITIONAL'),
|
|
||||||
idle_timeout=kwargs.get('idle_timeout', 3600),
|
|
||||||
connection_debug=kwargs.get('connection_debug', 0),
|
|
||||||
max_pool_size=kwargs.get('max_pool_size'),
|
|
||||||
max_overflow=kwargs.get('max_overflow'),
|
|
||||||
pool_timeout=kwargs.get('pool_timeout'),
|
|
||||||
sqlite_synchronous=kwargs.get('sqlite_synchronous', True),
|
|
||||||
connection_trace=kwargs.get('connection_trace', False),
|
|
||||||
max_retries=kwargs.get('max_retries', 10),
|
|
||||||
retry_interval=kwargs.get('retry_interval', 10))
|
|
||||||
self._session_maker = get_maker(
|
|
||||||
engine=self._engine,
|
|
||||||
autocommit=autocommit,
|
|
||||||
expire_on_commit=expire_on_commit)
|
|
||||||
|
|
||||||
def get_engine(self):
|
|
||||||
"""Get the engine instance (note, that it's shared)."""
|
|
||||||
|
|
||||||
return self._engine
|
|
||||||
|
|
||||||
def get_session(self, **kwargs):
|
|
||||||
"""Get a Session instance.
|
|
||||||
|
|
||||||
If passed, keyword arguments values override the ones used when the
|
|
||||||
sessionmaker instance was created.
|
|
||||||
|
|
||||||
:keyword autocommit: use autocommit mode for created Session instances
|
|
||||||
:type autocommit: bool
|
|
||||||
|
|
||||||
:keyword expire_on_commit: expire session objects on commit
|
|
||||||
:type expire_on_commit: bool
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
for arg in kwargs:
|
|
||||||
if arg not in ('autocommit', 'expire_on_commit'):
|
|
||||||
del kwargs[arg]
|
|
||||||
|
|
||||||
return self._session_maker(**kwargs)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def from_config(cls, connection_string, conf,
|
|
||||||
sqlite_fk=False, autocommit=True, expire_on_commit=False):
|
|
||||||
"""Initialize EngineFacade using oslo.config config instance options.
|
|
||||||
|
|
||||||
:param connection_string: SQLAlchemy connection string
|
|
||||||
:type connection_string: string
|
|
||||||
|
|
||||||
:param conf: oslo.config config instance
|
|
||||||
:type conf: oslo.config.cfg.ConfigOpts
|
|
||||||
|
|
||||||
:param sqlite_fk: enable foreign keys in SQLite
|
|
||||||
:type sqlite_fk: bool
|
|
||||||
|
|
||||||
:param autocommit: use autocommit mode for created Session instances
|
|
||||||
:type autocommit: bool
|
|
||||||
|
|
||||||
:param expire_on_commit: expire session objects on commit
|
|
||||||
:type expire_on_commit: bool
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
return cls(sql_connection=connection_string,
|
|
||||||
sqlite_fk=sqlite_fk,
|
|
||||||
autocommit=autocommit,
|
|
||||||
expire_on_commit=expire_on_commit,
|
|
||||||
**dict(conf.database.items()))
|
|
@ -2,10 +2,6 @@
|
|||||||
|
|
||||||
# The list of modules to copy from oslo-incubator.git
|
# The list of modules to copy from oslo-incubator.git
|
||||||
module=config.generator
|
module=config.generator
|
||||||
module=db
|
|
||||||
module=db.sqlalchemy.models
|
|
||||||
module=db.sqlalchemy.session
|
|
||||||
module=db.sqlalchemy.migration
|
|
||||||
module=log
|
module=log
|
||||||
module=jsonutils
|
module=jsonutils
|
||||||
module=lockutils
|
module=lockutils
|
||||||
|
@ -12,6 +12,7 @@ croniter>=0.3.4 # MIT License
|
|||||||
requests>=1.1
|
requests>=1.1
|
||||||
kombu>=2.4.8
|
kombu>=2.4.8
|
||||||
oslo.config>=1.2.1
|
oslo.config>=1.2.1
|
||||||
|
oslo.db>=0.2.0 # Apache-2.0
|
||||||
oslo.messaging>=1.3.0
|
oslo.messaging>=1.3.0
|
||||||
oslotest
|
oslotest
|
||||||
paramiko>=1.13.0
|
paramiko>=1.13.0
|
||||||
|
@ -1,2 +1,2 @@
|
|||||||
export MISTRAL_CONFIG_GENERATOR_EXTRA_LIBRARIES=oslo.messaging
|
export MISTRAL_CONFIG_GENERATOR_EXTRA_LIBRARIES="oslo.messaging oslo.db"
|
||||||
export MISTRAL_CONFIG_GENERATOR_EXTRA_MODULES=keystoneclient.middleware.auth_token
|
export MISTRAL_CONFIG_GENERATOR_EXTRA_MODULES=keystoneclient.middleware.auth_token
|
||||||
|
Loading…
Reference in New Issue
Block a user