Deprecate sysinv.openstack.common.db in favor of oslo_db

openstack.common.db was not being used except by unit tests.
The sysinv engine had previously been converted, so the
changes are primarily in the unit test environment.

Story: 2006796
Task: 37426
Change-Id: Ie638ee7e347fef0ada061ed4047decd0cbb919ef
Signed-off-by: Al Bailey <Al.Bailey@windriver.com>
This commit is contained in:
Al Bailey 2020-02-05 09:38:42 -06:00 committed by albailey
parent 2a6ecd8a4a
commit 4598ca8d65
17 changed files with 98 additions and 1015 deletions

View File

@ -22,5 +22,4 @@ graft etc
include sysinv/db/sqlalchemy/migrate_repo/migrate.cfg
include sysinv/openstack/common/config/generator.py
include sysinv/tests/policy.json
include sysinv/tests/db/sqlalchemy/test_migrations.conf
graft tools

View File

@ -97,7 +97,7 @@
#
# Options defined in sysinv.openstack.common.db.sqlalchemy.session
# Options defined in oslo_db
#
# the filename to use with sqlite (string value)
@ -467,7 +467,7 @@
#
# Options defined in sysinv.openstack.common.db.api
# Options defined in oslo_db
#
# The backend to use for db (string value)
@ -479,12 +479,12 @@
#
# Options defined in sysinv.openstack.common.db.sqlalchemy.session
# Options defined in oslo_db
#
# The SQLAlchemy connection string used to connect to the
# database (string value)
#connection=sqlite:////sysinv.openstack.common/db/$sqlite_db
#connection=sqlite:////oslo_db/$sqlite_db
# timeout before idle sql connections are reaped (integer
# value)

View File

@ -1,8 +1,6 @@
[DEFAULT]
module=config.generator
module=context
module=db
module=db.sqlalchemy
module=flakes
module=install_venv_common
module=local

View File

@ -17,6 +17,8 @@ import wsme
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from oslo_db.exception import DBDuplicateEntry
from oslo_db.exception import DBError
from oslo_log import log
from sysinv._i18n import _
from sysinv.api.controllers.v1 import base
@ -27,8 +29,6 @@ from sysinv.api.controllers.v1 import utils as api_utils
from sysinv.common import exception
from sysinv.common import utils as cutils
from sysinv import objects
from sysinv.openstack.common.db.exception import DBDuplicateEntry
from sysinv.openstack.common.db.exception import DBError
LOG = log.getLogger(__name__)

View File

@ -56,7 +56,7 @@ from sysinv.common import utils as cutils
import xml.etree.ElementTree as et
from lxml import etree
from sysinv.api.controllers.v1 import profile_utils
from sysinv.openstack.common.db import exception as dbException
from oslo_db import exception as dbException
from wsme import types as wtypes
from sysinv.common.storage_backend_conf import StorageBackendConfig

View File

@ -20,16 +20,16 @@
from oslo_config import cfg
from sysinv.common import paths
from sysinv.openstack.common.db.sqlalchemy import session as db_session
from oslo_db import options as db_options
from sysinv.openstack.common import rpc
from sysinv import version
_DEFAULT_SQL_CONNECTION = 'sqlite:///' + paths.state_path_def('$sqlite_db')
_DEFAULT_SQL_CONNECTION = 'sqlite:///' + paths.state_path_def('sysinv.sqlite')
db_options.set_defaults(cfg.CONF, connection=_DEFAULT_SQL_CONNECTION)
def parse_args(argv, default_config_files=None):
db_session.set_defaults(sql_connection=_DEFAULT_SQL_CONNECTION,
sqlite_db='sysinv.sqlite')
rpc.set_defaults(control_exchange='sysinv')
cfg.CONF(argv[1:],
project='sysinv',

View File

@ -33,9 +33,9 @@ from oslo_log import log
LOG = log.getLogger(__name__)
_BACKEND_MAPPING = {'sqlalchemy': 'sysinv.db.sqlalchemy.api'}
IMPL = db_api.DBAPI.from_config(cfg.CONF, backend_mapping=_BACKEND_MAPPING,
IMPL = db_api.DBAPI.from_config(cfg.CONF,
backend_mapping=_BACKEND_MAPPING,
lazy=True)

View File

@ -51,9 +51,6 @@ from sysinv.db import api
from sysinv.db.sqlalchemy import models
CONF = cfg.CONF
CONF.import_opt('connection',
'sysinv.openstack.common.db.sqlalchemy.session',
group='database')
CONF.import_opt('journal_min_size',
'sysinv.api.controllers.v1.storage',
group='journal')

View File

@ -30,7 +30,9 @@ from migrate.versioning.repository import Repository
_REPOSITORY = None
get_engine = enginefacade.get_legacy_facade().get_engine
def get_engine():
return enginefacade.get_legacy_facade().get_engine()
def db_sync(version=None):

View File

@ -1,16 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Cloudscaling Group, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,57 +0,0 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""DB related custom exceptions."""
from sysinv._i18n import _
class DBError(Exception):
"""Wraps an implementation specific exception."""
def __init__(self, inner_exception=None):
self.inner_exception = inner_exception
super(DBError, self).__init__(str(inner_exception))
class DBDuplicateEntry(DBError):
"""Wraps an implementation specific exception."""
def __init__(self, columns=None, inner_exception=None):
if columns is None:
self.columns = []
else:
self.columns = columns
super(DBDuplicateEntry, self).__init__(inner_exception)
class DBDeadlock(DBError):
def __init__(self, inner_exception=None):
super(DBDeadlock, self).__init__(inner_exception)
class DBInvalidUnicodeParameter(Exception):
message = _("Invalid Parameter: "
"Unicode is not supported by the current database.")
class DbMigrationError(DBError):
"""Wraps migration specific exception."""
def __init__(self, message=None):
super(DbMigrationError, self).__init__(str(message))
class DBConnectionError(DBError):
"""Wraps connection specific exception."""
pass

View File

@ -1,16 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Cloudscaling Group, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,720 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Session Handling for SQLAlchemy backend.
Initializing:
* Call set_defaults with the minimal of the following kwargs:
sql_connection, sqlite_db
Example:
session.set_defaults(
sql_connection="sqlite:///var/lib/sysinv.sqlite.db",
sqlite_db="/var/lib/sysinv/sqlite.db")
Recommended ways to use sessions within this framework:
* Don't use them explicitly; this is like running with AUTOCOMMIT=1.
model_query() will implicitly use a session when called without one
supplied. This is the ideal situation because it will allow queries
to be automatically retried if the database connection is interrupted.
Note: Automatic retry will be enabled in a future patch.
It is generally fine to issue several queries in a row like this. Even though
they may be run in separate transactions and/or separate sessions, each one
will see the data from the prior calls. If needed, undo- or rollback-like
functionality should be handled at a logical level. For an example, look at
the code around quotas and reservation_rollback().
Examples:
def get_foo(context, foo):
return model_query(context, models.Foo).\
filter_by(foo=foo).\
first()
def update_foo(context, id, newfoo):
model_query(context, models.Foo).\
filter_by(id=id).\
update({'foo': newfoo})
def create_foo(context, values):
foo_ref = models.Foo()
foo_ref.update(values)
foo_ref.save()
return foo_ref
* Within the scope of a single method, keeping all the reads and writes within
the context managed by a single session. In this way, the session's __exit__
handler will take care of calling flush() and commit() for you.
If using this approach, you should not explicitly call flush() or commit().
Any error within the context of the session will cause the session to emit
a ROLLBACK. If the connection is dropped before this is possible, the
database will implicitly rollback the transaction.
Note: statements in the session scope will not be automatically retried.
If you create models within the session, they need to be added, but you
do not need to call model.save()
def create_many_foo(context, foos):
session = get_session()
with session.begin():
for foo in foos:
foo_ref = models.Foo()
foo_ref.update(foo)
session.add(foo_ref)
def update_bar(context, foo_id, newbar):
session = get_session()
with session.begin():
foo_ref = model_query(context, models.Foo, session).\
filter_by(id=foo_id).\
first()
model_query(context, models.Bar, session).\
filter_by(id=foo_ref['bar_id']).\
update({'bar': newbar})
Note: update_bar is a trivially simple example of using "with session.begin".
Whereas create_many_foo is a good example of when a transaction is needed,
it is always best to use as few queries as possible. The two queries in
update_bar can be better expressed using a single query which avoids
the need for an explicit transaction. It can be expressed like so:
def update_bar(context, foo_id, newbar):
subq = model_query(context, models.Foo.id).\
filter_by(id=foo_id).\
limit(1).\
subquery()
model_query(context, models.Bar).\
filter_by(id=subq.as_scalar()).\
update({'bar': newbar})
For reference, this emits approximagely the following SQL statement:
UPDATE bar SET bar = ${newbar}
WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1);
* Passing an active session between methods. Sessions should only be passed
to private methods. The private method must use a subtransaction; otherwise
SQLAlchemy will throw an error when you call session.begin() on an existing
transaction. Public methods should not accept a session parameter and should
not be involved in sessions within the caller's scope.
Note that this incurs more overhead in SQLAlchemy than the above means
due to nesting transactions, and it is not possible to implicitly retry
failed database operations when using this approach.
This also makes code somewhat more difficult to read and debug, because a
single database transaction spans more than one method. Error handling
becomes less clear in this situation. When this is needed for code clarity,
it should be clearly documented.
def myfunc(foo):
session = get_session()
with session.begin():
# do some database things
bar = _private_func(foo, session)
return bar
def _private_func(foo, session=None):
if not session:
session = get_session()
with session.begin(subtransaction=True):
# do some other database things
return bar
There are some things which it is best to avoid:
* Don't keep a transaction open any longer than necessary.
This means that your "with session.begin()" block should be as short
as possible, while still containing all the related calls for that
transaction.
* Avoid "with_lockmode('UPDATE')" when possible.
In MySQL/InnoDB, when a "SELECT ... FOR UPDATE" query does not match
any rows, it will take a gap-lock. This is a form of write-lock on the
"gap" where no rows exist, and prevents any other writes to that space.
This can effectively prevent any INSERT into a table by locking the gap
at the end of the index. Similar problems will occur if the SELECT FOR UPDATE
has an overly broad WHERE clause, or doesn't properly use an index.
One idea proposed at ODS Fall '12 was to use a normal SELECT to test the
number of rows matching a query, and if only one row is returned,
then issue the SELECT FOR UPDATE.
The better long-term solution is to use INSERT .. ON DUPLICATE KEY UPDATE.
However, this can not be done until the "deleted" columns are removed and
proper UNIQUE constraints are added to the tables.
Enabling soft deletes:
* To use/enable soft-deletes, the SoftDeleteMixin must be added
to your model class. For example:
class NovaBase(models.SoftDeleteMixin, models.ModelBase):
pass
Efficient use of soft deletes:
* There are two possible ways to mark a record as deleted:
model.soft_delete() and query.soft_delete().
model.soft_delete() method works with single already fetched entry.
query.soft_delete() makes only one db request for all entries that correspond
to query.
* In almost all cases you should use query.soft_delete(). Some examples:
def soft_delete_bar():
count = model_query(BarModel).find(some_condition).soft_delete()
if count == 0:
raise Exception("0 entries were soft deleted")
def complex_soft_delete_with_synchronization_bar(session=None):
if session is None:
session = get_session()
with session.begin(subtransactions=True):
count = model_query(BarModel).\
find(some_condition).\
soft_delete(synchronize_session=True)
# Here synchronize_session is required, because we
# don't know what is going on in outer session.
if count == 0:
raise Exception("0 entries were soft deleted")
* There is only one situation where model.soft_delete() is appropriate: when
you fetch a single record, work with it, and mark it as deleted in the same
transaction.
def soft_delete_bar_model():
session = get_session()
with session.begin():
bar_ref = model_query(BarModel).find(some_condition).first()
# Work with bar_ref
bar_ref.soft_delete(session=session)
However, if you need to work with all entries that correspond to query and
then soft delete them you should use query.soft_delete() method:
def soft_delete_multi_models():
session = get_session()
with session.begin():
query = model_query(BarModel, session=session).\
find(some_condition)
model_refs = query.all()
# Work with model_refs
query.soft_delete(synchronize_session=False)
# synchronize_session=False should be set if there is no outer
# session and these entries are not used after this.
When working with many rows, it is very important to use query.soft_delete,
which issues a single query. Using model.soft_delete(), as in the following
example, is very inefficient.
for bar_ref in bar_refs:
bar_ref.soft_delete(session=session)
# This will produce count(bar_refs) db requests.
"""
import os.path
import re
import time
import eventlet
from eventlet import greenthread
from eventlet.green import threading
from oslo_config import cfg
import six
from sqlalchemy import exc as sqla_exc
import sqlalchemy.interfaces
from sqlalchemy.interfaces import PoolListener
import sqlalchemy.orm
from sqlalchemy.pool import NullPool, StaticPool
from sqlalchemy.sql.expression import literal_column
from oslo_log import log as logging
from oslo_utils import timeutils
from sysinv._i18n import _
from sysinv.openstack.common.db import exception
DEFAULT = 'DEFAULT'
sqlite_db_opts = [
cfg.StrOpt('sqlite_db',
default='sysinv.sqlite',
help='the filename to use with sqlite'),
cfg.BoolOpt('sqlite_synchronous',
default=True,
help='If true, use synchronous mode for sqlite'),
]
database_opts = [
cfg.StrOpt('connection',
default='sqlite:///' +
os.path.abspath(os.path.join(os.path.dirname(__file__),
'../', '$sqlite_db')),
help='The SQLAlchemy connection string used to connect to the '
'database',
deprecated_name='sql_connection',
deprecated_group=DEFAULT,
secret=True),
cfg.IntOpt('idle_timeout',
default=3600,
deprecated_name='sql_idle_timeout',
deprecated_group=DEFAULT,
help='timeout before idle sql connections are reaped'),
cfg.IntOpt('min_pool_size',
default=1,
deprecated_name='sql_min_pool_size',
deprecated_group=DEFAULT,
help='Minimum number of SQL connections to keep open in a '
'pool'),
cfg.IntOpt('max_pool_size',
default=50,
deprecated_name='sql_max_pool_size',
deprecated_group=DEFAULT,
help='Maximum number of SQL connections to keep open in a '
'pool'),
cfg.IntOpt('max_retries',
default=10,
deprecated_name='sql_max_retries',
deprecated_group=DEFAULT,
help='maximum db connection retries during startup. '
'(setting -1 implies an infinite retry count)'),
cfg.IntOpt('retry_interval',
default=10,
deprecated_name='sql_retry_interval',
deprecated_group=DEFAULT,
help='interval between retries of opening a sql connection'),
cfg.IntOpt('max_overflow',
default=100,
deprecated_name='sql_max_overflow',
deprecated_group=DEFAULT,
help='If set, use this value for max_overflow with sqlalchemy'),
cfg.IntOpt('connection_debug',
default=0,
deprecated_name='sql_connection_debug',
deprecated_group=DEFAULT,
help='Verbosity of SQL debugging information. 0=None, '
'100=Everything'),
cfg.BoolOpt('connection_trace',
default=False,
deprecated_name='sql_connection_trace',
deprecated_group=DEFAULT,
help='Add python stack traces to SQL as comment strings'),
]
CONF = cfg.CONF
CONF.register_opts(sqlite_db_opts)
LOG = logging.getLogger(__name__)
if not hasattr(CONF.database, 'connection'):
CONF.register_opts(database_opts, 'database')
_ENGINE = None
_MAKER = None
def set_defaults(sql_connection, sqlite_db):
"""Set defaults for configuration variables."""
cfg.set_defaults(database_opts,
connection=sql_connection)
cfg.set_defaults(sqlite_db_opts,
sqlite_db=sqlite_db)
def cleanup():
global _ENGINE, _MAKER
if _MAKER:
_MAKER.close_all() # pylint: disable=no-member
_MAKER = None
if _ENGINE:
_ENGINE.dispose()
_ENGINE = None
class SqliteForeignKeysListener(PoolListener):
"""
Ensures that the foreign key constraints are enforced in SQLite.
The foreign key constraints are disabled by default in SQLite,
so the foreign key constraints will be enabled here for every
database connection
"""
def connect(self, dbapi_con, con_record):
dbapi_con.execute('pragma foreign_keys=ON')
def get_session(autocommit=True, expire_on_commit=False,
sqlite_fk=False):
"""Return a greenthread scoped SQLAlchemy session."""
if _ENGINE is None:
engine = get_engine(sqlite_fk=sqlite_fk)
engine = _ENGINE
scoped_session = get_maker(engine, autocommit, expire_on_commit)
LOG.debug("get_session scoped_session=%s" % (scoped_session))
return scoped_session
# note(boris-42): In current versions of DB backends unique constraint
# violation messages follow the structure:
#
# sqlite:
# 1 column - (IntegrityError) column c1 is not unique
# N columns - (IntegrityError) column c1, c2, ..., N are not unique
#
# postgres:
# 1 column - (IntegrityError) duplicate key value violates unique
# constraint "users_c1_key"
# N columns - (IntegrityError) duplicate key value violates unique
# constraint "name_of_our_constraint"
#
# mysql:
# 1 column - (IntegrityError) (1062, "Duplicate entry 'value_of_c1' for key
# 'c1'")
# N columns - (IntegrityError) (1062, "Duplicate entry 'values joined
# with -' for key 'name_of_our_constraint'")
_DUP_KEY_RE_DB = {
"sqlite": re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"),
"postgresql": re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"),
"mysql": re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$")
}
def _raise_if_duplicate_entry_error(integrity_error, engine_name):
"""
In this function will be raised DBDuplicateEntry exception if integrity
error wrap unique constraint violation.
"""
def get_columns_from_uniq_cons_or_name(columns):
# note(boris-42): UniqueConstraint name convention: "uniq_c1_x_c2_x_c3"
# means that columns c1, c2, c3 are in UniqueConstraint.
uniqbase = "uniq_"
if not columns.startswith(uniqbase):
if engine_name == "postgresql":
return [columns[columns.index("_") + 1:columns.rindex("_")]]
return [columns]
return columns[len(uniqbase):].split("_x_")
if engine_name not in ["mysql", "sqlite", "postgresql"]:
return
m = _DUP_KEY_RE_DB[engine_name].match(integrity_error.message)
if not m:
return
columns = m.group(1)
if engine_name == "sqlite":
columns = columns.strip().split(", ")
else:
columns = get_columns_from_uniq_cons_or_name(columns)
raise exception.DBDuplicateEntry(columns, integrity_error)
# NOTE(comstud): In current versions of DB backends, Deadlock violation
# messages follow the structure:
#
# mysql:
# (OperationalError) (1213, 'Deadlock found when trying to get lock; try '
# 'restarting transaction') <query_str> <query_args>
_DEADLOCK_RE_DB = {
"mysql": re.compile(r"^.*\(1213, 'Deadlock.*")
}
def _raise_if_deadlock_error(operational_error, engine_name):
"""
Raise DBDeadlock exception if OperationalError contains a Deadlock
condition.
"""
re = _DEADLOCK_RE_DB.get(engine_name)
if re is None:
return
m = re.match(operational_error.message)
if not m:
return
raise exception.DBDeadlock(operational_error)
def _wrap_db_error(f):
def _wrap(*args, **kwargs):
try:
return f(*args, **kwargs)
except UnicodeEncodeError:
raise exception.DBInvalidUnicodeParameter()
# note(boris-42): We should catch unique constraint violation and
# wrap it by our own DBDuplicateEntry exception. Unique constraint
# violation is wrapped by IntegrityError.
except sqla_exc.OperationalError as e:
_raise_if_deadlock_error(e, get_engine().name)
# NOTE(comstud): A lot of code is checking for OperationalError
# so let's not wrap it for now.
raise
except sqla_exc.IntegrityError as e:
# note(boris-42): SqlAlchemy doesn't unify errors from different
# DBs so we must do this. Also in some tables (for example
# instance_types) there are more than one unique constraint. This
# means we should get names of columns, which values violate
# unique constraint, from error message.
_raise_if_duplicate_entry_error(e, get_engine().name)
raise exception.DBError(e)
except Exception as e:
LOG.exception(_('DB exception wrapped.'))
raise exception.DBError(e)
_wrap.__name__ = f.__name__
return _wrap
def get_engine(sqlite_fk=False):
"""Return a SQLAlchemy engine."""
global _ENGINE
if _ENGINE is None:
_ENGINE = create_engine(CONF.database.connection,
sqlite_fk=sqlite_fk)
return _ENGINE
def _synchronous_switch_listener(dbapi_conn, connection_rec):
"""Switch sqlite connections to non-synchronous mode."""
dbapi_conn.execute("PRAGMA synchronous = OFF")
def _add_regexp_listener(dbapi_con, con_record):
"""Add REGEXP function to sqlite connections."""
def regexp(expr, item):
reg = re.compile(expr)
return reg.search(six.text_type(item)) is not None
dbapi_con.create_function('regexp', 2, regexp)
def _greenthread_yield(dbapi_con, con_record):
"""
Ensure other greenthreads get a chance to execute by forcing a context
switch. With common database backends (eg MySQLdb and sqlite), there is
no implicit yield caused by network I/O since they are implemented by
C libraries that eventlet cannot monkey patch.
"""
greenthread.sleep(0)
def _ping_listener(dbapi_conn, connection_rec, connection_proxy):
"""
Ensures that MySQL connections checked out of the
pool are alive.
Borrowed from:
http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f
"""
try:
dbapi_conn.cursor().execute('select 1')
except dbapi_conn.OperationalError as ex:
if ex.args[0] in (2006, 2013, 2014, 2045, 2055):
LOG.warn(_('Got mysql server has gone away: %s'), ex)
raise sqla_exc.DisconnectionError("Database server went away")
else:
raise
def _is_db_connection_error(args):
"""Return True if error in connecting to db."""
# NOTE(adam_g): This is currently MySQL specific and needs to be extended
# to support Postgres and others.
conn_err_codes = ('2002', '2003', '2006')
for err_code in conn_err_codes:
if args.find(err_code) != -1:
return True
return False
def create_engine(sql_connection, sqlite_fk=False):
"""Return a new SQLAlchemy engine."""
connection_dict = sqlalchemy.engine.url.make_url(sql_connection)
engine_args = {
"pool_recycle": CONF.database.idle_timeout,
"echo": False,
'convert_unicode': True,
}
# Map our SQL debug level to SQLAlchemy's options
if CONF.database.connection_debug >= 100:
engine_args['echo'] = 'debug'
elif CONF.database.connection_debug >= 50:
engine_args['echo'] = True
if "sqlite" in connection_dict.drivername:
if sqlite_fk:
engine_args["listeners"] = [SqliteForeignKeysListener()]
engine_args["poolclass"] = NullPool
if CONF.database.connection == "sqlite://":
engine_args["poolclass"] = StaticPool
engine_args["connect_args"] = {'check_same_thread': False}
else:
engine_args['pool_size'] = CONF.database.max_pool_size
if CONF.database.max_overflow is not None:
engine_args['max_overflow'] = CONF.database.max_overflow
engine = sqlalchemy.create_engine(sql_connection, **engine_args)
sqlalchemy.event.listen(engine, 'checkin', _greenthread_yield)
if 'mysql' in connection_dict.drivername:
sqlalchemy.event.listen(engine, 'checkout', _ping_listener)
elif 'sqlite' in connection_dict.drivername:
if not CONF.sqlite_synchronous:
sqlalchemy.event.listen(engine, 'connect',
_synchronous_switch_listener)
sqlalchemy.event.listen(engine, 'connect', _add_regexp_listener)
if (CONF.database.connection_trace and
engine.dialect.dbapi.__name__ == 'MySQLdb'):
_patch_mysqldb_with_stacktrace_comments()
try:
engine.connect()
except sqla_exc.OperationalError as e:
if not _is_db_connection_error(e.args[0]):
raise
remaining = CONF.database.max_retries
if remaining == -1:
remaining = 'infinite'
while True:
msg = _('SQL connection failed. %s attempts left.')
LOG.warn(msg % remaining)
if remaining != 'infinite':
remaining -= 1
time.sleep(CONF.database.retry_interval)
try:
engine.connect()
break
except sqla_exc.OperationalError as e:
if (remaining != 'infinite' and remaining == 0) or \
not _is_db_connection_error(e.args[0]):
raise
return engine
class Query(sqlalchemy.orm.query.Query):
"""Subclass of sqlalchemy.query with soft_delete() method."""
def soft_delete(self, synchronize_session='evaluate'):
return self.update({'deleted': literal_column('id'),
'updated_at': literal_column('updated_at'),
'deleted_at': timeutils.utcnow()},
synchronize_session=synchronize_session)
class Session(sqlalchemy.orm.session.Session):
"""Custom Session class to avoid SqlAlchemy Session monkey patching."""
@_wrap_db_error
def query(self, *args, **kwargs):
return super(Session, self).query(*args, **kwargs)
@_wrap_db_error
def flush(self, *args, **kwargs):
return super(Session, self).flush(*args, **kwargs)
@_wrap_db_error
def execute(self, *args, **kwargs):
return super(Session, self).execute(*args, **kwargs)
def get_thread_id():
thread_id = id(eventlet.greenthread.getcurrent())
return thread_id
def get_maker(engine, autocommit=True, expire_on_commit=False):
"""Return a SQLAlchemy sessionmaker using the given engine."""
global _MAKER
if _MAKER is None:
scopefunc = get_thread_id()
_MAKER = sqlalchemy.orm.scoped_session(sqlalchemy.orm.sessionmaker(bind=engine,
class_=Session,
autocommit=autocommit,
expire_on_commit=expire_on_commit,
query_cls=Query),
scopefunc=get_thread_id)
LOG.info("get_maker greenthread current_thread=%s session=%s "
"autocommit=%s, scopefunc=%s" %
(threading.current_thread(), _MAKER, autocommit, scopefunc))
return _MAKER
def _patch_mysqldb_with_stacktrace_comments():
"""Adds current stack trace as a comment in queries by patching
MySQLdb.cursors.BaseCursor._do_query.
"""
import MySQLdb.cursors
import traceback
old_mysql_do_query = MySQLdb.cursors.BaseCursor._do_query
def _do_query(self, q):
stack = ''
for file, line, method, function in traceback.extract_stack():
# exclude various common things from trace
if file.endswith('session.py') and method == '_do_query':
continue
if file.endswith('api.py') and method == 'wrapper':
continue
if file.endswith('utils.py') and method == '_inner':
continue
if file.endswith('exception.py') and method == '_wrap':
continue
# db/api is just a wrapper around db/sqlalchemy/api
if file.endswith('db/api.py'):
continue
# only trace inside sysinv
index = file.rfind('sysinv')
if index == -1:
continue
stack += "File:%s:%s Method:%s() Line:%s | " \
% (file[index:], line, method, function)
# strip trailing " | " from stack
if stack:
stack = stack[:-3]
qq = "%s /* %s */" % (q, stack)
else:
qq = q
old_mysql_do_query(self, qq)
setattr(MySQLdb.cursors.BaseCursor, '_do_query', _do_query)

View File

@ -1,143 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2010-2011 OpenStack Foundation.
# Copyright 2012 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of paginate query."""
import sqlalchemy
from oslo_log import log as logging
from sysinv._i18n import _
LOG = logging.getLogger(__name__)
class InvalidSortKey(Exception):
message = _("Sort key supplied was not valid.")
# copy from glance/db/sqlalchemy/api.py
def paginate_query(query, model, limit, sort_keys, marker=None,
sort_dir=None, sort_dirs=None):
"""Returns a query with sorting / pagination criteria added.
Pagination works by requiring a unique sort_key, specified by sort_keys.
(If sort_keys is not unique, then we risk looping through values.)
We use the last row in the previous page as the 'marker' for pagination.
So we must return values that follow the passed marker in the order.
With a single-valued sort_key, this would be easy: sort_key > X.
With a compound-values sort_key, (k1, k2, k3) we must do this to repeat
the lexicographical ordering:
(k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3)
We also have to cope with different sort_directions.
Typically, the id of the last row is used as the client-facing pagination
marker, then the actual marker object must be fetched from the db and
passed in to us as marker.
:param query: the query object to which we should add paging/sorting
:param model: the ORM model class
:param limit: maximum number of items to return
:param sort_keys: array of attributes by which results should be sorted
:param marker: the last item of the previous page; we returns the next
results after this value.
:param sort_dir: direction in which results should be sorted (asc, desc)
:param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys
:rtype: sqlalchemy.orm.query.Query
:return: The query with sorting/pagination added.
"""
if 'id' not in sort_keys:
# TODO(justinsb): If this ever gives a false-positive, check
# the actual primary key, rather than assuming its id
LOG.warn(_('id not in sort_keys; is sort_keys unique?'))
assert(not (sort_dir and sort_dirs))
# Default the sort direction to ascending
if sort_dirs is None and sort_dir is None:
sort_dir = 'asc'
# Ensure a per-column sort direction
if sort_dirs is None:
sort_dirs = [sort_dir for _sort_key in sort_keys]
assert(len(sort_dirs) == len(sort_keys))
# Add sorting
for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs):
sort_dir_func = {
'asc': sqlalchemy.asc,
'desc': sqlalchemy.desc,
}[current_sort_dir]
try:
sort_key_attr = getattr(model, current_sort_key)
except AttributeError:
LOG.error('%s is not a valid sort key' % (current_sort_key))
raise InvalidSortKey()
query = query.order_by(sort_dir_func(sort_key_attr))
# Add pagination
if marker is not None:
marker_values = []
for sort_key in sort_keys:
v = getattr(marker, sort_key)
marker_values.append(v)
# Build up an array of sort criteria as in the docstring
criteria_list = []
for i in range(0, len(sort_keys)):
crit_attrs = []
for j in range(0, i):
model_attr = getattr(model, sort_keys[j])
crit_attrs.append((model_attr == marker_values[j]))
model_attr = getattr(model, sort_keys[i])
if sort_dirs[i] == 'desc':
crit_attrs.append((model_attr < marker_values[i]))
elif sort_dirs[i] == 'asc':
crit_attrs.append((model_attr > marker_values[i]))
else:
raise ValueError(_("Unknown sort direction, "
"must be 'desc' or 'asc'"))
criteria = sqlalchemy.sql.and_(*crit_attrs)
criteria_list.append(criteria)
f = sqlalchemy.sql.or_(*criteria_list)
query = query.filter(f)
if limit is not None:
query = query.limit(limit)
return query
def get_table(engine, name):
"""Returns an sqlalchemy table dynamically from db.
Needed because the models don't work for us in migrations
as models will be far out of sync with the current data.
"""
metadata = sqlalchemy.MetaData()
metadata.bind = engine
return sqlalchemy.Table(name, metadata, autoload=True)

View File

@ -30,7 +30,6 @@ import copy
import fixtures
import mock
import os
import shutil
import testtools
from oslo_config import cfg
@ -38,60 +37,47 @@ from oslo_db.sqlalchemy import enginefacade
from oslo_log import log as logging
from oslo_utils import timeutils
from sysinv.common import paths
from sysinv.db import api as dbapi
from sysinv.db import migration
from sysinv.db import migration as db_migration
from sysinv.db.sqlalchemy import migration
import sysinv.helm.utils
from sysinv.objects import base as objects_base
from sysinv.tests import conf_fixture
from sysinv.tests import policy_fixture
sys.modules['fm_core'] = mock.Mock()
sys.modules['rpm'] = mock.Mock()
CONF = cfg.CONF
_DB_CACHE = None
sys.modules['fm_core'] = mock.Mock()
sys.modules['rpm'] = mock.Mock()
class Database(fixtures.Fixture):
def __init__(self, engine, db_migrate, sql_connection,
sqlite_db, sqlite_clean_db):
def __init__(self, engine, db_migrate, sql_connection):
self.sql_connection = sql_connection
self.sqlite_db = sqlite_db
self.sqlite_clean_db = sqlite_clean_db
self.engine = engine
self.engine.dispose()
conn = self.engine.connect()
if sql_connection == "sqlite://":
if db_migrate.db_version() > db_migrate.INIT_VERSION:
return
else:
testdb = paths.state_path_rel(sqlite_db)
if os.path.exists(testdb):
return
db_migrate.db_sync()
self.setup_sqlite(db_migrate)
self.post_migrations()
if sql_connection == "sqlite://":
conn = self.engine.connect()
self._DB = "".join(line for line in conn.connection.iterdump())
self.engine.dispose()
else:
cleandb = paths.state_path_rel(sqlite_clean_db)
shutil.copyfile(testdb, cleandb)
def setup_sqlite(self, db_migrate):
if db_migrate.db_version() > db_migration.INIT_VERSION:
return
db_migrate.db_sync()
def setUp(self):
super(Database, self).setUp()
if self.sql_connection == "sqlite://":
conn = self.engine.connect()
conn.connection.executescript(self._DB)
self.addCleanup(self.engine.dispose)
else:
shutil.copyfile(paths.state_path_rel(self.sqlite_clean_db),
paths.state_path_rel(self.sqlite_db))
def post_migrations(self):
"""Any addition steps that are needed outside of the migrations."""
@ -160,15 +146,10 @@ class TestCase(testtools.TestCase):
logging.register_options(CONF)
self.useFixture(conf_fixture.ConfFixture(CONF))
global _DB_CACHE
if not _DB_CACHE:
engine = enginefacade.get_legacy_facade().get_engine()
_DB_CACHE = Database(engine, migration,
sql_connection=CONF.database.connection,
sqlite_db='sysinv.sqlite',
sqlite_clean_db='clean.sqlite')
self.useFixture(_DB_CACHE)
# The fixture config is not setup when the DB_CACHE below is being constructed
self.config(connection="sqlite://",
sqlite_synchronous=False,
group='database')
# NOTE(danms): Make sure to reset us back to non-remote objects
# for each test to avoid interactions. Also, backup the object
@ -183,6 +164,13 @@ class TestCase(testtools.TestCase):
self.policy = self.useFixture(policy_fixture.PolicyFixture())
CONF.set_override('fatal_exception_format_errors', True)
global _DB_CACHE
if not _DB_CACHE:
engine = enginefacade.get_legacy_facade().get_engine()
_DB_CACHE = Database(engine, migration,
sql_connection=CONF.database.connection)
self.useFixture(_DB_CACHE)
def tearDown(self):
super(TestCase, self).tearDown()
self.helm_refresh_patcher.stop()

View File

@ -41,7 +41,7 @@ class ConfFixture(config_fixture.Config):
self.conf.set_default('rpc_cast_timeout', 5)
self.conf.set_default('rpc_response_timeout', 5)
self.conf.set_default('connection', "sqlite://", group='database')
self.conf.set_default('sqlite_synchronous', False)
self.conf.set_default('sqlite_synchronous', False, group='database')
self.conf.set_default('use_ipv6', True)
config.parse_args([], default_config_files=[])
self.addCleanup(self.conf.reset)

View File

@ -0,0 +1,51 @@
#!/bin/bash
# This script allows a developer to setup their DB for opportunistic tests
# openstack_citest is used by oslo_db for opportunistic db tests.
# This method is based on code in neutron/tools
# Set env variable for MYSQL_PASSWORD
MYSQL_PASSWORD=${MYSQL_PASSWORD:-stackdb}
function _install_mysql {
echo "Installing MySQL database"
# Set up the 'openstack_citest' user and database in postgres
tmp_dir=$(mktemp -d)
trap "rm -rf $tmp_dir" EXIT
cat << EOF > $tmp_dir/mysql.sql
DROP DATABASE IF EXISTS openstack_citest;
CREATE DATABASE openstack_citest;
CREATE USER 'openstack_citest'@'localhost' IDENTIFIED BY 'openstack_citest';
CREATE USER 'openstack_citest' IDENTIFIED BY 'openstack_citest';
GRANT ALL PRIVILEGES ON *.* TO 'openstack_citest'@'localhost';
GRANT ALL PRIVILEGES ON *.* TO 'openstack_citest';
FLUSH PRIVILEGES;
EOF
/usr/bin/mysql -u root -p"$MYSQL_PASSWORD" < $tmp_dir/mysql.sql
}
function _install_postgres {
echo "Installing Postgres database"
tmp_dir=$(mktemp -d)
trap "rm -rf $tmp_dir" EXIT
cat << EOF > $tmp_dir/postgresql.sql
CREATE USER openstack_citest WITH CREATEDB LOGIN PASSWORD 'openstack_citest';
CREATE DATABASE openstack_citest WITH OWNER openstack_citest;
EOF
chmod 777 $tmp_dir/postgresql.sql
sudo -u postgres /usr/bin/psql --file=$tmp_dir/postgresql.sql
}
echo "TODO: Add getopts support to select which DB you want to install"
echo "MYSQL"
_install_mysql
echo "POSTGRES"
_install_postgres