Remove usage of oslo.db and oslo.config
Remove further reference to oslo-incubator database layer which itself uses oslo.config and remove its dependencies and resync with oslo-incubator after applying these changes. Implements blueprint eliminate-oslo-cfg Change-Id: Ie1ee7affae75c60833c012c921e50d7b14026101
This commit is contained in:
parent
45d350e80d
commit
d523d3651d
|
@ -1,11 +1,10 @@
|
||||||
[DEFAULT]
|
[DEFAULT]
|
||||||
|
|
||||||
# The list of modules to copy from oslo-incubator.git
|
# The list of modules to copy from oslo-incubator.git
|
||||||
module=db
|
|
||||||
module=db.sqlalchemy
|
|
||||||
module=excutils
|
module=excutils
|
||||||
module=importutils
|
module=importutils
|
||||||
module=install_venv_common
|
module=install_venv_common
|
||||||
|
module=jsonutils
|
||||||
module=timeutils
|
module=timeutils
|
||||||
module=uuidutils
|
module=uuidutils
|
||||||
|
|
||||||
|
|
|
@ -1,16 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2012 Cloudscaling Group, Inc
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
|
@ -1,106 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright (c) 2013 Rackspace Hosting
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""Multiple DB API backend support.
|
|
||||||
|
|
||||||
Supported configuration options:
|
|
||||||
|
|
||||||
The following two parameters are in the 'database' group:
|
|
||||||
`backend`: DB backend name or full module path to DB backend module.
|
|
||||||
`use_tpool`: Enable thread pooling of DB API calls.
|
|
||||||
|
|
||||||
A DB backend module should implement a method named 'get_backend' which
|
|
||||||
takes no arguments. The method can return any object that implements DB
|
|
||||||
API methods.
|
|
||||||
|
|
||||||
*NOTE*: There are bugs in eventlet when using tpool combined with
|
|
||||||
threading locks. The python logging module happens to use such locks. To
|
|
||||||
work around this issue, be sure to specify thread=False with
|
|
||||||
eventlet.monkey_patch().
|
|
||||||
|
|
||||||
A bug for eventlet has been filed here:
|
|
||||||
|
|
||||||
https://bitbucket.org/eventlet/eventlet/issue/137/
|
|
||||||
"""
|
|
||||||
import functools
|
|
||||||
|
|
||||||
from oslo.config import cfg
|
|
||||||
|
|
||||||
from taskflow.openstack.common import importutils
|
|
||||||
from taskflow.openstack.common import lockutils
|
|
||||||
|
|
||||||
|
|
||||||
db_opts = [
|
|
||||||
cfg.StrOpt('backend',
|
|
||||||
default='sqlalchemy',
|
|
||||||
deprecated_name='db_backend',
|
|
||||||
deprecated_group='DEFAULT',
|
|
||||||
help='The backend to use for db'),
|
|
||||||
cfg.BoolOpt('use_tpool',
|
|
||||||
default=False,
|
|
||||||
deprecated_name='dbapi_use_tpool',
|
|
||||||
deprecated_group='DEFAULT',
|
|
||||||
help='Enable the experimental use of thread pooling for '
|
|
||||||
'all DB API calls')
|
|
||||||
]
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
CONF.register_opts(db_opts, 'database')
|
|
||||||
|
|
||||||
|
|
||||||
class DBAPI(object):
|
|
||||||
def __init__(self, backend_mapping=None):
|
|
||||||
if backend_mapping is None:
|
|
||||||
backend_mapping = {}
|
|
||||||
self.__backend = None
|
|
||||||
self.__backend_mapping = backend_mapping
|
|
||||||
|
|
||||||
@lockutils.synchronized('dbapi_backend', 'taskflow-')
|
|
||||||
def __get_backend(self):
|
|
||||||
"""Get the actual backend. May be a module or an instance of
|
|
||||||
a class. Doesn't matter to us. We do this synchronized as it's
|
|
||||||
possible multiple greenthreads started very quickly trying to do
|
|
||||||
DB calls and eventlet can switch threads before self.__backend gets
|
|
||||||
assigned.
|
|
||||||
"""
|
|
||||||
if self.__backend:
|
|
||||||
# Another thread assigned it
|
|
||||||
return self.__backend
|
|
||||||
backend_name = CONF.database.backend
|
|
||||||
self.__use_tpool = CONF.database.use_tpool
|
|
||||||
if self.__use_tpool:
|
|
||||||
from eventlet import tpool
|
|
||||||
self.__tpool = tpool
|
|
||||||
# Import the untranslated name if we don't have a
|
|
||||||
# mapping.
|
|
||||||
backend_path = self.__backend_mapping.get(backend_name,
|
|
||||||
backend_name)
|
|
||||||
backend_mod = importutils.import_module(backend_path)
|
|
||||||
self.__backend = backend_mod.get_backend()
|
|
||||||
return self.__backend
|
|
||||||
|
|
||||||
def __getattr__(self, key):
|
|
||||||
backend = self.__backend or self.__get_backend()
|
|
||||||
attr = getattr(backend, key)
|
|
||||||
if not self.__use_tpool or not hasattr(attr, '__call__'):
|
|
||||||
return attr
|
|
||||||
|
|
||||||
def tpool_wrapper(*args, **kwargs):
|
|
||||||
return self.__tpool.execute(attr, *args, **kwargs)
|
|
||||||
|
|
||||||
functools.update_wrapper(tpool_wrapper, attr)
|
|
||||||
return tpool_wrapper
|
|
|
@ -1,51 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""DB related custom exceptions."""
|
|
||||||
|
|
||||||
from taskflow.openstack.common.gettextutils import _ # noqa
|
|
||||||
|
|
||||||
|
|
||||||
class DBError(Exception):
|
|
||||||
"""Wraps an implementation specific exception."""
|
|
||||||
def __init__(self, inner_exception=None):
|
|
||||||
self.inner_exception = inner_exception
|
|
||||||
super(DBError, self).__init__(str(inner_exception))
|
|
||||||
|
|
||||||
|
|
||||||
class DBDuplicateEntry(DBError):
|
|
||||||
"""Wraps an implementation specific exception."""
|
|
||||||
def __init__(self, columns=[], inner_exception=None):
|
|
||||||
self.columns = columns
|
|
||||||
super(DBDuplicateEntry, self).__init__(inner_exception)
|
|
||||||
|
|
||||||
|
|
||||||
class DBDeadlock(DBError):
|
|
||||||
def __init__(self, inner_exception=None):
|
|
||||||
super(DBDeadlock, self).__init__(inner_exception)
|
|
||||||
|
|
||||||
|
|
||||||
class DBInvalidUnicodeParameter(Exception):
|
|
||||||
message = _("Invalid Parameter: "
|
|
||||||
"Unicode is not supported by the current database.")
|
|
||||||
|
|
||||||
|
|
||||||
class DbMigrationError(DBError):
|
|
||||||
"""Wraps migration specific exception."""
|
|
||||||
def __init__(self, message=None):
|
|
||||||
super(DbMigrationError, self).__init__(str(message))
|
|
|
@ -1,16 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2012 Cloudscaling Group, Inc
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
|
@ -1,278 +0,0 @@
|
||||||
# coding: utf-8
|
|
||||||
#
|
|
||||||
# Copyright (c) 2013 OpenStack Foundation
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
# Base on code in migrate/changeset/databases/sqlite.py which is under
|
|
||||||
# the following license:
|
|
||||||
#
|
|
||||||
# The MIT License
|
|
||||||
#
|
|
||||||
# Copyright (c) 2009 Evan Rosson, Jan Dittberner, Domen Kožar
|
|
||||||
#
|
|
||||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
# of this software and associated documentation files (the "Software"), to deal
|
|
||||||
# in the Software without restriction, including without limitation the rights
|
|
||||||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
# copies of the Software, and to permit persons to whom the Software is
|
|
||||||
# furnished to do so, subject to the following conditions:
|
|
||||||
# The above copyright notice and this permission notice shall be included in
|
|
||||||
# all copies or substantial portions of the Software.
|
|
||||||
#
|
|
||||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
|
|
||||||
|
|
||||||
import distutils.version as dist_version
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
|
|
||||||
import migrate
|
|
||||||
from migrate.changeset import ansisql
|
|
||||||
from migrate.changeset.databases import sqlite
|
|
||||||
from migrate.versioning import util as migrate_util
|
|
||||||
import sqlalchemy
|
|
||||||
from sqlalchemy.schema import UniqueConstraint
|
|
||||||
|
|
||||||
from taskflow.openstack.common.db import exception
|
|
||||||
from taskflow.openstack.common.db.sqlalchemy import session as db_session
|
|
||||||
from taskflow.openstack.common.gettextutils import _ # noqa
|
|
||||||
|
|
||||||
|
|
||||||
@migrate_util.decorator
|
|
||||||
def patched_with_engine(f, *a, **kw):
|
|
||||||
url = a[0]
|
|
||||||
engine = migrate_util.construct_engine(url, **kw)
|
|
||||||
|
|
||||||
try:
|
|
||||||
kw['engine'] = engine
|
|
||||||
return f(*a, **kw)
|
|
||||||
finally:
|
|
||||||
if isinstance(engine, migrate_util.Engine) and engine is not url:
|
|
||||||
migrate_util.log.debug('Disposing SQLAlchemy engine %s', engine)
|
|
||||||
engine.dispose()
|
|
||||||
|
|
||||||
|
|
||||||
# TODO(jkoelker) When migrate 0.7.3 is released and nova depends
|
|
||||||
# on that version or higher, this can be removed
|
|
||||||
MIN_PKG_VERSION = dist_version.StrictVersion('0.7.3')
|
|
||||||
if (not hasattr(migrate, '__version__') or
|
|
||||||
dist_version.StrictVersion(migrate.__version__) < MIN_PKG_VERSION):
|
|
||||||
migrate_util.with_engine = patched_with_engine
|
|
||||||
|
|
||||||
|
|
||||||
# NOTE(jkoelker) Delay importing migrate until we are patched
|
|
||||||
from migrate import exceptions as versioning_exceptions
|
|
||||||
from migrate.versioning import api as versioning_api
|
|
||||||
from migrate.versioning.repository import Repository
|
|
||||||
|
|
||||||
_REPOSITORY = None
|
|
||||||
|
|
||||||
get_engine = db_session.get_engine
|
|
||||||
|
|
||||||
|
|
||||||
def _get_unique_constraints(self, table):
|
|
||||||
"""Retrieve information about existing unique constraints of the table
|
|
||||||
|
|
||||||
This feature is needed for _recreate_table() to work properly.
|
|
||||||
Unfortunately, it's not available in sqlalchemy 0.7.x/0.8.x.
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
data = table.metadata.bind.execute(
|
|
||||||
"""SELECT sql
|
|
||||||
FROM sqlite_master
|
|
||||||
WHERE
|
|
||||||
type='table' AND
|
|
||||||
name=:table_name""",
|
|
||||||
table_name=table.name
|
|
||||||
).fetchone()[0]
|
|
||||||
|
|
||||||
UNIQUE_PATTERN = "CONSTRAINT (\w+) UNIQUE \(([^\)]+)\)"
|
|
||||||
return [
|
|
||||||
UniqueConstraint(
|
|
||||||
*[getattr(table.columns, c.strip(' "')) for c in cols.split(",")],
|
|
||||||
name=name
|
|
||||||
)
|
|
||||||
for name, cols in re.findall(UNIQUE_PATTERN, data)
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
def _recreate_table(self, table, column=None, delta=None, omit_uniques=None):
|
|
||||||
"""Recreate the table properly
|
|
||||||
|
|
||||||
Unlike the corresponding original method of sqlalchemy-migrate this one
|
|
||||||
doesn't drop existing unique constraints when creating a new one.
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
table_name = self.preparer.format_table(table)
|
|
||||||
|
|
||||||
# we remove all indexes so as not to have
|
|
||||||
# problems during copy and re-create
|
|
||||||
for index in table.indexes:
|
|
||||||
index.drop()
|
|
||||||
|
|
||||||
# reflect existing unique constraints
|
|
||||||
for uc in self._get_unique_constraints(table):
|
|
||||||
table.append_constraint(uc)
|
|
||||||
# omit given unique constraints when creating a new table if required
|
|
||||||
table.constraints = set([
|
|
||||||
cons for cons in table.constraints
|
|
||||||
if omit_uniques is None or cons.name not in omit_uniques
|
|
||||||
])
|
|
||||||
|
|
||||||
self.append('ALTER TABLE %s RENAME TO migration_tmp' % table_name)
|
|
||||||
self.execute()
|
|
||||||
|
|
||||||
insertion_string = self._modify_table(table, column, delta)
|
|
||||||
|
|
||||||
table.create(bind=self.connection)
|
|
||||||
self.append(insertion_string % {'table_name': table_name})
|
|
||||||
self.execute()
|
|
||||||
self.append('DROP TABLE migration_tmp')
|
|
||||||
self.execute()
|
|
||||||
|
|
||||||
|
|
||||||
def _visit_migrate_unique_constraint(self, *p, **k):
|
|
||||||
"""Drop the given unique constraint
|
|
||||||
|
|
||||||
The corresponding original method of sqlalchemy-migrate just
|
|
||||||
raises NotImplemented error
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
self.recreate_table(p[0].table, omit_uniques=[p[0].name])
|
|
||||||
|
|
||||||
|
|
||||||
def patch_migrate():
|
|
||||||
"""A workaround for SQLite's inability to alter things
|
|
||||||
|
|
||||||
SQLite abilities to alter tables are very limited (please read
|
|
||||||
http://www.sqlite.org/lang_altertable.html for more details).
|
|
||||||
E. g. one can't drop a column or a constraint in SQLite. The
|
|
||||||
workaround for this is to recreate the original table omitting
|
|
||||||
the corresponding constraint (or column).
|
|
||||||
|
|
||||||
sqlalchemy-migrate library has recreate_table() method that
|
|
||||||
implements this workaround, but it does it wrong:
|
|
||||||
|
|
||||||
- information about unique constraints of a table
|
|
||||||
is not retrieved. So if you have a table with one
|
|
||||||
unique constraint and a migration adding another one
|
|
||||||
you will end up with a table that has only the
|
|
||||||
latter unique constraint, and the former will be lost
|
|
||||||
|
|
||||||
- dropping of unique constraints is not supported at all
|
|
||||||
|
|
||||||
The proper way to fix this is to provide a pull-request to
|
|
||||||
sqlalchemy-migrate, but the project seems to be dead. So we
|
|
||||||
can go on with monkey-patching of the lib at least for now.
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
# this patch is needed to ensure that recreate_table() doesn't drop
|
|
||||||
# existing unique constraints of the table when creating a new one
|
|
||||||
helper_cls = sqlite.SQLiteHelper
|
|
||||||
helper_cls.recreate_table = _recreate_table
|
|
||||||
helper_cls._get_unique_constraints = _get_unique_constraints
|
|
||||||
|
|
||||||
# this patch is needed to be able to drop existing unique constraints
|
|
||||||
constraint_cls = sqlite.SQLiteConstraintDropper
|
|
||||||
constraint_cls.visit_migrate_unique_constraint = \
|
|
||||||
_visit_migrate_unique_constraint
|
|
||||||
constraint_cls.__bases__ = (ansisql.ANSIColumnDropper,
|
|
||||||
sqlite.SQLiteConstraintGenerator)
|
|
||||||
|
|
||||||
|
|
||||||
def db_sync(abs_path, version=None, init_version=0):
|
|
||||||
"""Upgrade or downgrade a database.
|
|
||||||
|
|
||||||
Function runs the upgrade() or downgrade() functions in change scripts.
|
|
||||||
|
|
||||||
:param abs_path: Absolute path to migrate repository.
|
|
||||||
:param version: Database will upgrade/downgrade until this version.
|
|
||||||
If None - database will update to the latest
|
|
||||||
available version.
|
|
||||||
:param init_version: Initial database version
|
|
||||||
"""
|
|
||||||
if version is not None:
|
|
||||||
try:
|
|
||||||
version = int(version)
|
|
||||||
except ValueError:
|
|
||||||
raise exception.DbMigrationError(
|
|
||||||
message=_("version should be an integer"))
|
|
||||||
|
|
||||||
current_version = db_version(abs_path, init_version)
|
|
||||||
repository = _find_migrate_repo(abs_path)
|
|
||||||
if version is None or version > current_version:
|
|
||||||
return versioning_api.upgrade(get_engine(), repository, version)
|
|
||||||
else:
|
|
||||||
return versioning_api.downgrade(get_engine(), repository,
|
|
||||||
version)
|
|
||||||
|
|
||||||
|
|
||||||
def db_version(abs_path, init_version):
|
|
||||||
"""Show the current version of the repository.
|
|
||||||
|
|
||||||
:param abs_path: Absolute path to migrate repository
|
|
||||||
:param version: Initial database version
|
|
||||||
"""
|
|
||||||
repository = _find_migrate_repo(abs_path)
|
|
||||||
try:
|
|
||||||
return versioning_api.db_version(get_engine(), repository)
|
|
||||||
except versioning_exceptions.DatabaseNotControlledError:
|
|
||||||
meta = sqlalchemy.MetaData()
|
|
||||||
engine = get_engine()
|
|
||||||
meta.reflect(bind=engine)
|
|
||||||
tables = meta.tables
|
|
||||||
if len(tables) == 0:
|
|
||||||
db_version_control(abs_path, init_version)
|
|
||||||
return versioning_api.db_version(get_engine(), repository)
|
|
||||||
else:
|
|
||||||
# Some pre-Essex DB's may not be version controlled.
|
|
||||||
# Require them to upgrade using Essex first.
|
|
||||||
raise exception.DbMigrationError(
|
|
||||||
message=_("Upgrade DB using Essex release first."))
|
|
||||||
|
|
||||||
|
|
||||||
def db_version_control(abs_path, version=None):
|
|
||||||
"""Mark a database as under this repository's version control.
|
|
||||||
|
|
||||||
Once a database is under version control, schema changes should
|
|
||||||
only be done via change scripts in this repository.
|
|
||||||
|
|
||||||
:param abs_path: Absolute path to migrate repository
|
|
||||||
:param version: Initial database version
|
|
||||||
"""
|
|
||||||
repository = _find_migrate_repo(abs_path)
|
|
||||||
versioning_api.version_control(get_engine(), repository, version)
|
|
||||||
return version
|
|
||||||
|
|
||||||
|
|
||||||
def _find_migrate_repo(abs_path):
|
|
||||||
"""Get the project's change script repository
|
|
||||||
|
|
||||||
:param abs_path: Absolute path to migrate repository
|
|
||||||
"""
|
|
||||||
global _REPOSITORY
|
|
||||||
if not os.path.exists(abs_path):
|
|
||||||
raise exception.DbMigrationError("Path %s not found" % abs_path)
|
|
||||||
if _REPOSITORY is None:
|
|
||||||
_REPOSITORY = Repository(abs_path)
|
|
||||||
return _REPOSITORY
|
|
|
@ -1,108 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
|
||||||
# Copyright 2011 Piston Cloud Computing, Inc.
|
|
||||||
# Copyright 2012 Cloudscaling Group, Inc.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
"""
|
|
||||||
SQLAlchemy models.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import six
|
|
||||||
|
|
||||||
from sqlalchemy import Column, Integer
|
|
||||||
from sqlalchemy import DateTime
|
|
||||||
from sqlalchemy.orm import object_mapper
|
|
||||||
|
|
||||||
from taskflow.openstack.common.db.sqlalchemy import session as sa
|
|
||||||
from taskflow.openstack.common import timeutils
|
|
||||||
|
|
||||||
|
|
||||||
class ModelBase(object):
|
|
||||||
"""Base class for models."""
|
|
||||||
__table_initialized__ = False
|
|
||||||
|
|
||||||
def save(self, session=None):
|
|
||||||
"""Save this object."""
|
|
||||||
if not session:
|
|
||||||
session = sa.get_session()
|
|
||||||
# NOTE(boris-42): This part of code should be look like:
|
|
||||||
# sesssion.add(self)
|
|
||||||
# session.flush()
|
|
||||||
# But there is a bug in sqlalchemy and eventlet that
|
|
||||||
# raises NoneType exception if there is no running
|
|
||||||
# transaction and rollback is called. As long as
|
|
||||||
# sqlalchemy has this bug we have to create transaction
|
|
||||||
# explicity.
|
|
||||||
with session.begin(subtransactions=True):
|
|
||||||
session.add(self)
|
|
||||||
session.flush()
|
|
||||||
|
|
||||||
def __setitem__(self, key, value):
|
|
||||||
setattr(self, key, value)
|
|
||||||
|
|
||||||
def __getitem__(self, key):
|
|
||||||
return getattr(self, key)
|
|
||||||
|
|
||||||
def get(self, key, default=None):
|
|
||||||
return getattr(self, key, default)
|
|
||||||
|
|
||||||
def __iter__(self):
|
|
||||||
columns = dict(object_mapper(self).columns).keys()
|
|
||||||
# NOTE(russellb): Allow models to specify other keys that can be looked
|
|
||||||
# up, beyond the actual db columns. An example would be the 'name'
|
|
||||||
# property for an Instance.
|
|
||||||
if hasattr(self, '_extra_keys'):
|
|
||||||
columns.extend(self._extra_keys())
|
|
||||||
self._i = iter(columns)
|
|
||||||
return self
|
|
||||||
|
|
||||||
def next(self):
|
|
||||||
n = six.advance_iterator(self._i)
|
|
||||||
return n, getattr(self, n)
|
|
||||||
|
|
||||||
def update(self, values):
|
|
||||||
"""Make the model object behave like a dict."""
|
|
||||||
for k, v in six.iteritems(values):
|
|
||||||
setattr(self, k, v)
|
|
||||||
|
|
||||||
def iteritems(self):
|
|
||||||
"""Make the model object behave like a dict.
|
|
||||||
|
|
||||||
Includes attributes from joins.
|
|
||||||
"""
|
|
||||||
local = dict(self)
|
|
||||||
joined = dict([(k, v) for k, v in six.iteritems(self.__dict__)
|
|
||||||
if not k[0] == '_'])
|
|
||||||
local.update(joined)
|
|
||||||
return local.iteritems()
|
|
||||||
|
|
||||||
|
|
||||||
class TimestampMixin(object):
|
|
||||||
created_at = Column(DateTime, default=timeutils.utcnow)
|
|
||||||
updated_at = Column(DateTime, onupdate=timeutils.utcnow)
|
|
||||||
|
|
||||||
|
|
||||||
class SoftDeleteMixin(object):
|
|
||||||
deleted_at = Column(DateTime)
|
|
||||||
deleted = Column(Integer, default=0)
|
|
||||||
|
|
||||||
def soft_delete(self, session=None):
|
|
||||||
"""Mark this object as deleted."""
|
|
||||||
self.deleted = self.id
|
|
||||||
self.deleted_at = timeutils.utcnow()
|
|
||||||
self.save(session=session)
|
|
|
@ -1,792 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""Session Handling for SQLAlchemy backend.
|
|
||||||
|
|
||||||
Initializing:
|
|
||||||
|
|
||||||
* Call set_defaults with the minimal of the following kwargs:
|
|
||||||
sql_connection, sqlite_db
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
session.set_defaults(
|
|
||||||
sql_connection="sqlite:///var/lib/taskflow/sqlite.db",
|
|
||||||
sqlite_db="/var/lib/taskflow/sqlite.db")
|
|
||||||
|
|
||||||
Recommended ways to use sessions within this framework:
|
|
||||||
|
|
||||||
* Don't use them explicitly; this is like running with AUTOCOMMIT=1.
|
|
||||||
model_query() will implicitly use a session when called without one
|
|
||||||
supplied. This is the ideal situation because it will allow queries
|
|
||||||
to be automatically retried if the database connection is interrupted.
|
|
||||||
|
|
||||||
Note: Automatic retry will be enabled in a future patch.
|
|
||||||
|
|
||||||
It is generally fine to issue several queries in a row like this. Even though
|
|
||||||
they may be run in separate transactions and/or separate sessions, each one
|
|
||||||
will see the data from the prior calls. If needed, undo- or rollback-like
|
|
||||||
functionality should be handled at a logical level. For an example, look at
|
|
||||||
the code around quotas and reservation_rollback().
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
|
|
||||||
def get_foo(context, foo):
|
|
||||||
return model_query(context, models.Foo).\
|
|
||||||
filter_by(foo=foo).\
|
|
||||||
first()
|
|
||||||
|
|
||||||
def update_foo(context, id, newfoo):
|
|
||||||
model_query(context, models.Foo).\
|
|
||||||
filter_by(id=id).\
|
|
||||||
update({'foo': newfoo})
|
|
||||||
|
|
||||||
def create_foo(context, values):
|
|
||||||
foo_ref = models.Foo()
|
|
||||||
foo_ref.update(values)
|
|
||||||
foo_ref.save()
|
|
||||||
return foo_ref
|
|
||||||
|
|
||||||
|
|
||||||
* Within the scope of a single method, keeping all the reads and writes within
|
|
||||||
the context managed by a single session. In this way, the session's __exit__
|
|
||||||
handler will take care of calling flush() and commit() for you.
|
|
||||||
If using this approach, you should not explicitly call flush() or commit().
|
|
||||||
Any error within the context of the session will cause the session to emit
|
|
||||||
a ROLLBACK. If the connection is dropped before this is possible, the
|
|
||||||
database will implicitly rollback the transaction.
|
|
||||||
|
|
||||||
Note: statements in the session scope will not be automatically retried.
|
|
||||||
|
|
||||||
If you create models within the session, they need to be added, but you
|
|
||||||
do not need to call model.save()
|
|
||||||
|
|
||||||
def create_many_foo(context, foos):
|
|
||||||
session = get_session()
|
|
||||||
with session.begin():
|
|
||||||
for foo in foos:
|
|
||||||
foo_ref = models.Foo()
|
|
||||||
foo_ref.update(foo)
|
|
||||||
session.add(foo_ref)
|
|
||||||
|
|
||||||
def update_bar(context, foo_id, newbar):
|
|
||||||
session = get_session()
|
|
||||||
with session.begin():
|
|
||||||
foo_ref = model_query(context, models.Foo, session).\
|
|
||||||
filter_by(id=foo_id).\
|
|
||||||
first()
|
|
||||||
model_query(context, models.Bar, session).\
|
|
||||||
filter_by(id=foo_ref['bar_id']).\
|
|
||||||
update({'bar': newbar})
|
|
||||||
|
|
||||||
Note: update_bar is a trivially simple example of using "with session.begin".
|
|
||||||
Whereas create_many_foo is a good example of when a transaction is needed,
|
|
||||||
it is always best to use as few queries as possible. The two queries in
|
|
||||||
update_bar can be better expressed using a single query which avoids
|
|
||||||
the need for an explicit transaction. It can be expressed like so:
|
|
||||||
|
|
||||||
def update_bar(context, foo_id, newbar):
|
|
||||||
subq = model_query(context, models.Foo.id).\
|
|
||||||
filter_by(id=foo_id).\
|
|
||||||
limit(1).\
|
|
||||||
subquery()
|
|
||||||
model_query(context, models.Bar).\
|
|
||||||
filter_by(id=subq.as_scalar()).\
|
|
||||||
update({'bar': newbar})
|
|
||||||
|
|
||||||
For reference, this emits approximagely the following SQL statement:
|
|
||||||
|
|
||||||
UPDATE bar SET bar = ${newbar}
|
|
||||||
WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1);
|
|
||||||
|
|
||||||
* Passing an active session between methods. Sessions should only be passed
|
|
||||||
to private methods. The private method must use a subtransaction; otherwise
|
|
||||||
SQLAlchemy will throw an error when you call session.begin() on an existing
|
|
||||||
transaction. Public methods should not accept a session parameter and should
|
|
||||||
not be involved in sessions within the caller's scope.
|
|
||||||
|
|
||||||
Note that this incurs more overhead in SQLAlchemy than the above means
|
|
||||||
due to nesting transactions, and it is not possible to implicitly retry
|
|
||||||
failed database operations when using this approach.
|
|
||||||
|
|
||||||
This also makes code somewhat more difficult to read and debug, because a
|
|
||||||
single database transaction spans more than one method. Error handling
|
|
||||||
becomes less clear in this situation. When this is needed for code clarity,
|
|
||||||
it should be clearly documented.
|
|
||||||
|
|
||||||
def myfunc(foo):
|
|
||||||
session = get_session()
|
|
||||||
with session.begin():
|
|
||||||
# do some database things
|
|
||||||
bar = _private_func(foo, session)
|
|
||||||
return bar
|
|
||||||
|
|
||||||
def _private_func(foo, session=None):
|
|
||||||
if not session:
|
|
||||||
session = get_session()
|
|
||||||
with session.begin(subtransaction=True):
|
|
||||||
# do some other database things
|
|
||||||
return bar
|
|
||||||
|
|
||||||
|
|
||||||
There are some things which it is best to avoid:
|
|
||||||
|
|
||||||
* Don't keep a transaction open any longer than necessary.
|
|
||||||
|
|
||||||
This means that your "with session.begin()" block should be as short
|
|
||||||
as possible, while still containing all the related calls for that
|
|
||||||
transaction.
|
|
||||||
|
|
||||||
* Avoid "with_lockmode('UPDATE')" when possible.
|
|
||||||
|
|
||||||
In MySQL/InnoDB, when a "SELECT ... FOR UPDATE" query does not match
|
|
||||||
any rows, it will take a gap-lock. This is a form of write-lock on the
|
|
||||||
"gap" where no rows exist, and prevents any other writes to that space.
|
|
||||||
This can effectively prevent any INSERT into a table by locking the gap
|
|
||||||
at the end of the index. Similar problems will occur if the SELECT FOR UPDATE
|
|
||||||
has an overly broad WHERE clause, or doesn't properly use an index.
|
|
||||||
|
|
||||||
One idea proposed at ODS Fall '12 was to use a normal SELECT to test the
|
|
||||||
number of rows matching a query, and if only one row is returned,
|
|
||||||
then issue the SELECT FOR UPDATE.
|
|
||||||
|
|
||||||
The better long-term solution is to use INSERT .. ON DUPLICATE KEY UPDATE.
|
|
||||||
However, this can not be done until the "deleted" columns are removed and
|
|
||||||
proper UNIQUE constraints are added to the tables.
|
|
||||||
|
|
||||||
|
|
||||||
Enabling soft deletes:
|
|
||||||
|
|
||||||
* To use/enable soft-deletes, the SoftDeleteMixin must be added
|
|
||||||
to your model class. For example:
|
|
||||||
|
|
||||||
class NovaBase(models.SoftDeleteMixin, models.ModelBase):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
Efficient use of soft deletes:
|
|
||||||
|
|
||||||
* There are two possible ways to mark a record as deleted:
|
|
||||||
model.soft_delete() and query.soft_delete().
|
|
||||||
|
|
||||||
model.soft_delete() method works with single already fetched entry.
|
|
||||||
query.soft_delete() makes only one db request for all entries that correspond
|
|
||||||
to query.
|
|
||||||
|
|
||||||
* In almost all cases you should use query.soft_delete(). Some examples:
|
|
||||||
|
|
||||||
def soft_delete_bar():
|
|
||||||
count = model_query(BarModel).find(some_condition).soft_delete()
|
|
||||||
if count == 0:
|
|
||||||
raise Exception("0 entries were soft deleted")
|
|
||||||
|
|
||||||
def complex_soft_delete_with_synchronization_bar(session=None):
|
|
||||||
if session is None:
|
|
||||||
session = get_session()
|
|
||||||
with session.begin(subtransactions=True):
|
|
||||||
count = model_query(BarModel).\
|
|
||||||
find(some_condition).\
|
|
||||||
soft_delete(synchronize_session=True)
|
|
||||||
# Here synchronize_session is required, because we
|
|
||||||
# don't know what is going on in outer session.
|
|
||||||
if count == 0:
|
|
||||||
raise Exception("0 entries were soft deleted")
|
|
||||||
|
|
||||||
* There is only one situation where model.soft_delete() is appropriate: when
|
|
||||||
you fetch a single record, work with it, and mark it as deleted in the same
|
|
||||||
transaction.
|
|
||||||
|
|
||||||
def soft_delete_bar_model():
|
|
||||||
session = get_session()
|
|
||||||
with session.begin():
|
|
||||||
bar_ref = model_query(BarModel).find(some_condition).first()
|
|
||||||
# Work with bar_ref
|
|
||||||
bar_ref.soft_delete(session=session)
|
|
||||||
|
|
||||||
However, if you need to work with all entries that correspond to query and
|
|
||||||
then soft delete them you should use query.soft_delete() method:
|
|
||||||
|
|
||||||
def soft_delete_multi_models():
|
|
||||||
session = get_session()
|
|
||||||
with session.begin():
|
|
||||||
query = model_query(BarModel, session=session).\
|
|
||||||
find(some_condition)
|
|
||||||
model_refs = query.all()
|
|
||||||
# Work with model_refs
|
|
||||||
query.soft_delete(synchronize_session=False)
|
|
||||||
# synchronize_session=False should be set if there is no outer
|
|
||||||
# session and these entries are not used after this.
|
|
||||||
|
|
||||||
When working with many rows, it is very important to use query.soft_delete,
|
|
||||||
which issues a single query. Using model.soft_delete(), as in the following
|
|
||||||
example, is very inefficient.
|
|
||||||
|
|
||||||
for bar_ref in bar_refs:
|
|
||||||
bar_ref.soft_delete(session=session)
|
|
||||||
# This will produce count(bar_refs) db requests.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import os.path
|
|
||||||
import re
|
|
||||||
import time
|
|
||||||
|
|
||||||
from eventlet import greenthread
|
|
||||||
from oslo.config import cfg
|
|
||||||
import six
|
|
||||||
from sqlalchemy import exc as sqla_exc
|
|
||||||
import sqlalchemy.interfaces
|
|
||||||
from sqlalchemy.interfaces import PoolListener
|
|
||||||
import sqlalchemy.orm
|
|
||||||
from sqlalchemy.pool import NullPool, StaticPool
|
|
||||||
from sqlalchemy.sql.expression import literal_column
|
|
||||||
|
|
||||||
from taskflow.openstack.common.db import exception
|
|
||||||
from taskflow.openstack.common.gettextutils import _ # noqa
|
|
||||||
from taskflow.openstack.common import log as logging
|
|
||||||
from taskflow.openstack.common import timeutils
|
|
||||||
|
|
||||||
sqlite_db_opts = [
|
|
||||||
cfg.StrOpt('sqlite_db',
|
|
||||||
default='taskflow.sqlite',
|
|
||||||
help='the filename to use with sqlite'),
|
|
||||||
cfg.BoolOpt('sqlite_synchronous',
|
|
||||||
default=True,
|
|
||||||
help='If true, use synchronous mode for sqlite'),
|
|
||||||
]
|
|
||||||
|
|
||||||
database_opts = [
|
|
||||||
cfg.StrOpt('connection',
|
|
||||||
default='sqlite:///' +
|
|
||||||
os.path.abspath(os.path.join(os.path.dirname(__file__),
|
|
||||||
'../', '$sqlite_db')),
|
|
||||||
help='The SQLAlchemy connection string used to connect to the '
|
|
||||||
'database',
|
|
||||||
deprecated_opts=[cfg.DeprecatedOpt('sql_connection',
|
|
||||||
group='DEFAULT'),
|
|
||||||
cfg.DeprecatedOpt('sql_connection',
|
|
||||||
group='DATABASE')]),
|
|
||||||
cfg.StrOpt('slave_connection',
|
|
||||||
default='',
|
|
||||||
help='The SQLAlchemy connection string used to connect to the '
|
|
||||||
'slave database'),
|
|
||||||
cfg.IntOpt('idle_timeout',
|
|
||||||
default=3600,
|
|
||||||
deprecated_opts=[cfg.DeprecatedOpt('sql_idle_timeout',
|
|
||||||
group='DEFAULT'),
|
|
||||||
cfg.DeprecatedOpt('sql_idle_timeout',
|
|
||||||
group='DATABASE')],
|
|
||||||
help='timeout before idle sql connections are reaped'),
|
|
||||||
cfg.IntOpt('min_pool_size',
|
|
||||||
default=1,
|
|
||||||
deprecated_opts=[cfg.DeprecatedOpt('sql_min_pool_size',
|
|
||||||
group='DEFAULT'),
|
|
||||||
cfg.DeprecatedOpt('sql_min_pool_size',
|
|
||||||
group='DATABASE')],
|
|
||||||
help='Minimum number of SQL connections to keep open in a '
|
|
||||||
'pool'),
|
|
||||||
cfg.IntOpt('max_pool_size',
|
|
||||||
default=None,
|
|
||||||
deprecated_opts=[cfg.DeprecatedOpt('sql_max_pool_size',
|
|
||||||
group='DEFAULT'),
|
|
||||||
cfg.DeprecatedOpt('sql_max_pool_size',
|
|
||||||
group='DATABASE')],
|
|
||||||
help='Maximum number of SQL connections to keep open in a '
|
|
||||||
'pool'),
|
|
||||||
cfg.IntOpt('max_retries',
|
|
||||||
default=10,
|
|
||||||
deprecated_opts=[cfg.DeprecatedOpt('sql_max_retries',
|
|
||||||
group='DEFAULT'),
|
|
||||||
cfg.DeprecatedOpt('sql_max_retries',
|
|
||||||
group='DATABASE')],
|
|
||||||
help='maximum db connection retries during startup. '
|
|
||||||
'(setting -1 implies an infinite retry count)'),
|
|
||||||
cfg.IntOpt('retry_interval',
|
|
||||||
default=10,
|
|
||||||
deprecated_opts=[cfg.DeprecatedOpt('sql_retry_interval',
|
|
||||||
group='DEFAULT'),
|
|
||||||
cfg.DeprecatedOpt('reconnect_interval',
|
|
||||||
group='DATABASE')],
|
|
||||||
help='interval between retries of opening a sql connection'),
|
|
||||||
cfg.IntOpt('max_overflow',
|
|
||||||
default=None,
|
|
||||||
deprecated_opts=[cfg.DeprecatedOpt('sql_max_overflow',
|
|
||||||
group='DEFAULT'),
|
|
||||||
cfg.DeprecatedOpt('sqlalchemy_max_overflow',
|
|
||||||
group='DATABASE')],
|
|
||||||
help='If set, use this value for max_overflow with sqlalchemy'),
|
|
||||||
cfg.IntOpt('connection_debug',
|
|
||||||
default=0,
|
|
||||||
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_debug',
|
|
||||||
group='DEFAULT')],
|
|
||||||
help='Verbosity of SQL debugging information. 0=None, '
|
|
||||||
'100=Everything'),
|
|
||||||
cfg.BoolOpt('connection_trace',
|
|
||||||
default=False,
|
|
||||||
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_trace',
|
|
||||||
group='DEFAULT')],
|
|
||||||
help='Add python stack traces to SQL as comment strings'),
|
|
||||||
cfg.IntOpt('pool_timeout',
|
|
||||||
default=None,
|
|
||||||
deprecated_opts=[cfg.DeprecatedOpt('sqlalchemy_pool_timeout',
|
|
||||||
group='DATABASE')],
|
|
||||||
help='If set, use this value for pool_timeout with sqlalchemy'),
|
|
||||||
]
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
CONF.register_opts(sqlite_db_opts)
|
|
||||||
CONF.register_opts(database_opts, 'database')
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
_ENGINE = None
|
|
||||||
_MAKER = None
|
|
||||||
_SLAVE_ENGINE = None
|
|
||||||
_SLAVE_MAKER = None
|
|
||||||
|
|
||||||
|
|
||||||
def set_defaults(sql_connection, sqlite_db, max_pool_size=None,
|
|
||||||
max_overflow=None, pool_timeout=None):
|
|
||||||
"""Set defaults for configuration variables."""
|
|
||||||
cfg.set_defaults(database_opts,
|
|
||||||
connection=sql_connection)
|
|
||||||
cfg.set_defaults(sqlite_db_opts,
|
|
||||||
sqlite_db=sqlite_db)
|
|
||||||
# Update the QueuePool defaults
|
|
||||||
if max_pool_size is not None:
|
|
||||||
cfg.set_defaults(database_opts,
|
|
||||||
max_pool_size=max_pool_size)
|
|
||||||
if max_overflow is not None:
|
|
||||||
cfg.set_defaults(database_opts,
|
|
||||||
max_overflow=max_overflow)
|
|
||||||
if pool_timeout is not None:
|
|
||||||
cfg.set_defaults(database_opts,
|
|
||||||
pool_timeout=pool_timeout)
|
|
||||||
|
|
||||||
|
|
||||||
def cleanup():
|
|
||||||
global _ENGINE, _MAKER
|
|
||||||
global _SLAVE_ENGINE, _SLAVE_MAKER
|
|
||||||
|
|
||||||
if _MAKER:
|
|
||||||
_MAKER.close_all()
|
|
||||||
_MAKER = None
|
|
||||||
if _ENGINE:
|
|
||||||
_ENGINE.dispose()
|
|
||||||
_ENGINE = None
|
|
||||||
if _SLAVE_MAKER:
|
|
||||||
_SLAVE_MAKER.close_all()
|
|
||||||
_SLAVE_MAKER = None
|
|
||||||
if _SLAVE_ENGINE:
|
|
||||||
_SLAVE_ENGINE.dispose()
|
|
||||||
_SLAVE_ENGINE = None
|
|
||||||
|
|
||||||
|
|
||||||
class SqliteForeignKeysListener(PoolListener):
|
|
||||||
"""Ensures that the foreign key constraints are enforced in SQLite.
|
|
||||||
|
|
||||||
The foreign key constraints are disabled by default in SQLite,
|
|
||||||
so the foreign key constraints will be enabled here for every
|
|
||||||
database connection
|
|
||||||
"""
|
|
||||||
def connect(self, dbapi_con, con_record):
|
|
||||||
dbapi_con.execute('pragma foreign_keys=ON')
|
|
||||||
|
|
||||||
|
|
||||||
def get_session(autocommit=True, expire_on_commit=False,
|
|
||||||
sqlite_fk=False, slave_session=False):
|
|
||||||
"""Return a SQLAlchemy session."""
|
|
||||||
global _MAKER
|
|
||||||
global _SLAVE_MAKER
|
|
||||||
maker = _MAKER
|
|
||||||
|
|
||||||
if slave_session:
|
|
||||||
maker = _SLAVE_MAKER
|
|
||||||
|
|
||||||
if maker is None:
|
|
||||||
engine = get_engine(sqlite_fk=sqlite_fk, slave_engine=slave_session)
|
|
||||||
maker = get_maker(engine, autocommit, expire_on_commit)
|
|
||||||
|
|
||||||
if slave_session:
|
|
||||||
_SLAVE_MAKER = maker
|
|
||||||
else:
|
|
||||||
_MAKER = maker
|
|
||||||
|
|
||||||
session = maker()
|
|
||||||
return session
|
|
||||||
|
|
||||||
|
|
||||||
# note(boris-42): In current versions of DB backends unique constraint
|
|
||||||
# violation messages follow the structure:
|
|
||||||
#
|
|
||||||
# sqlite:
|
|
||||||
# 1 column - (IntegrityError) column c1 is not unique
|
|
||||||
# N columns - (IntegrityError) column c1, c2, ..., N are not unique
|
|
||||||
#
|
|
||||||
# postgres:
|
|
||||||
# 1 column - (IntegrityError) duplicate key value violates unique
|
|
||||||
# constraint "users_c1_key"
|
|
||||||
# N columns - (IntegrityError) duplicate key value violates unique
|
|
||||||
# constraint "name_of_our_constraint"
|
|
||||||
#
|
|
||||||
# mysql:
|
|
||||||
# 1 column - (IntegrityError) (1062, "Duplicate entry 'value_of_c1' for key
|
|
||||||
# 'c1'")
|
|
||||||
# N columns - (IntegrityError) (1062, "Duplicate entry 'values joined
|
|
||||||
# with -' for key 'name_of_our_constraint'")
|
|
||||||
_DUP_KEY_RE_DB = {
|
|
||||||
"sqlite": re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"),
|
|
||||||
"postgresql": re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"),
|
|
||||||
"mysql": re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$")
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def _raise_if_duplicate_entry_error(integrity_error, engine_name):
|
|
||||||
"""Raise exception if two entries are duplicated.
|
|
||||||
|
|
||||||
In this function will be raised DBDuplicateEntry exception if integrity
|
|
||||||
error wrap unique constraint violation.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def get_columns_from_uniq_cons_or_name(columns):
|
|
||||||
# note(vsergeyev): UniqueConstraint name convention: "uniq_t0c10c2"
|
|
||||||
# where `t` it is table name and columns `c1`, `c2`
|
|
||||||
# are in UniqueConstraint.
|
|
||||||
uniqbase = "uniq_"
|
|
||||||
if not columns.startswith(uniqbase):
|
|
||||||
if engine_name == "postgresql":
|
|
||||||
return [columns[columns.index("_") + 1:columns.rindex("_")]]
|
|
||||||
return [columns]
|
|
||||||
return columns[len(uniqbase):].split("0")[1:]
|
|
||||||
|
|
||||||
if engine_name not in ["mysql", "sqlite", "postgresql"]:
|
|
||||||
return
|
|
||||||
|
|
||||||
# FIXME(johannes): The usage of the .message attribute has been
|
|
||||||
# deprecated since Python 2.6. However, the exceptions raised by
|
|
||||||
# SQLAlchemy can differ when using unicode() and accessing .message.
|
|
||||||
# An audit across all three supported engines will be necessary to
|
|
||||||
# ensure there are no regressions.
|
|
||||||
m = _DUP_KEY_RE_DB[engine_name].match(integrity_error.message)
|
|
||||||
if not m:
|
|
||||||
return
|
|
||||||
columns = m.group(1)
|
|
||||||
|
|
||||||
if engine_name == "sqlite":
|
|
||||||
columns = columns.strip().split(", ")
|
|
||||||
else:
|
|
||||||
columns = get_columns_from_uniq_cons_or_name(columns)
|
|
||||||
raise exception.DBDuplicateEntry(columns, integrity_error)
|
|
||||||
|
|
||||||
|
|
||||||
# NOTE(comstud): In current versions of DB backends, Deadlock violation
|
|
||||||
# messages follow the structure:
|
|
||||||
#
|
|
||||||
# mysql:
|
|
||||||
# (OperationalError) (1213, 'Deadlock found when trying to get lock; try '
|
|
||||||
# 'restarting transaction') <query_str> <query_args>
|
|
||||||
_DEADLOCK_RE_DB = {
|
|
||||||
"mysql": re.compile(r"^.*\(1213, 'Deadlock.*")
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def _raise_if_deadlock_error(operational_error, engine_name):
|
|
||||||
"""Raise exception on deadlock condition.
|
|
||||||
|
|
||||||
Raise DBDeadlock exception if OperationalError contains a Deadlock
|
|
||||||
condition.
|
|
||||||
"""
|
|
||||||
re = _DEADLOCK_RE_DB.get(engine_name)
|
|
||||||
if re is None:
|
|
||||||
return
|
|
||||||
# FIXME(johannes): The usage of the .message attribute has been
|
|
||||||
# deprecated since Python 2.6. However, the exceptions raised by
|
|
||||||
# SQLAlchemy can differ when using unicode() and accessing .message.
|
|
||||||
# An audit across all three supported engines will be necessary to
|
|
||||||
# ensure there are no regressions.
|
|
||||||
m = re.match(operational_error.message)
|
|
||||||
if not m:
|
|
||||||
return
|
|
||||||
raise exception.DBDeadlock(operational_error)
|
|
||||||
|
|
||||||
|
|
||||||
def _wrap_db_error(f):
|
|
||||||
def _wrap(*args, **kwargs):
|
|
||||||
try:
|
|
||||||
return f(*args, **kwargs)
|
|
||||||
except UnicodeEncodeError:
|
|
||||||
raise exception.DBInvalidUnicodeParameter()
|
|
||||||
# note(boris-42): We should catch unique constraint violation and
|
|
||||||
# wrap it by our own DBDuplicateEntry exception. Unique constraint
|
|
||||||
# violation is wrapped by IntegrityError.
|
|
||||||
except sqla_exc.OperationalError as e:
|
|
||||||
_raise_if_deadlock_error(e, get_engine().name)
|
|
||||||
# NOTE(comstud): A lot of code is checking for OperationalError
|
|
||||||
# so let's not wrap it for now.
|
|
||||||
raise
|
|
||||||
except sqla_exc.IntegrityError as e:
|
|
||||||
# note(boris-42): SqlAlchemy doesn't unify errors from different
|
|
||||||
# DBs so we must do this. Also in some tables (for example
|
|
||||||
# instance_types) there are more than one unique constraint. This
|
|
||||||
# means we should get names of columns, which values violate
|
|
||||||
# unique constraint, from error message.
|
|
||||||
_raise_if_duplicate_entry_error(e, get_engine().name)
|
|
||||||
raise exception.DBError(e)
|
|
||||||
except Exception as e:
|
|
||||||
LOG.exception(_('DB exception wrapped.'))
|
|
||||||
raise exception.DBError(e)
|
|
||||||
_wrap.func_name = f.func_name
|
|
||||||
return _wrap
|
|
||||||
|
|
||||||
|
|
||||||
def get_engine(sqlite_fk=False, slave_engine=False):
|
|
||||||
"""Return a SQLAlchemy engine."""
|
|
||||||
global _ENGINE
|
|
||||||
global _SLAVE_ENGINE
|
|
||||||
engine = _ENGINE
|
|
||||||
db_uri = CONF.database.connection
|
|
||||||
|
|
||||||
if slave_engine:
|
|
||||||
engine = _SLAVE_ENGINE
|
|
||||||
db_uri = CONF.database.slave_connection
|
|
||||||
|
|
||||||
if engine is None:
|
|
||||||
engine = create_engine(db_uri,
|
|
||||||
sqlite_fk=sqlite_fk)
|
|
||||||
if slave_engine:
|
|
||||||
_SLAVE_ENGINE = engine
|
|
||||||
else:
|
|
||||||
_ENGINE = engine
|
|
||||||
|
|
||||||
return engine
|
|
||||||
|
|
||||||
|
|
||||||
def _synchronous_switch_listener(dbapi_conn, connection_rec):
|
|
||||||
"""Switch sqlite connections to non-synchronous mode."""
|
|
||||||
dbapi_conn.execute("PRAGMA synchronous = OFF")
|
|
||||||
|
|
||||||
|
|
||||||
def _add_regexp_listener(dbapi_con, con_record):
|
|
||||||
"""Add REGEXP function to sqlite connections."""
|
|
||||||
|
|
||||||
def regexp(expr, item):
|
|
||||||
reg = re.compile(expr)
|
|
||||||
return reg.search(six.text_type(item)) is not None
|
|
||||||
dbapi_con.create_function('regexp', 2, regexp)
|
|
||||||
|
|
||||||
|
|
||||||
def _greenthread_yield(dbapi_con, con_record):
|
|
||||||
"""Ensure other greenthreads get a chance to be executed.
|
|
||||||
|
|
||||||
Force a context switch. With common database backends (eg MySQLdb and
|
|
||||||
sqlite), there is no implicit yield caused by network I/O since they are
|
|
||||||
implemented by C libraries that eventlet cannot monkey patch.
|
|
||||||
"""
|
|
||||||
greenthread.sleep(0)
|
|
||||||
|
|
||||||
|
|
||||||
def _ping_listener(dbapi_conn, connection_rec, connection_proxy):
|
|
||||||
"""Ensures that MySQL connections checked out of the pool are alive.
|
|
||||||
|
|
||||||
Borrowed from:
|
|
||||||
http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
dbapi_conn.cursor().execute('select 1')
|
|
||||||
except dbapi_conn.OperationalError as ex:
|
|
||||||
if ex.args[0] in (2006, 2013, 2014, 2045, 2055):
|
|
||||||
LOG.warn(_('Got mysql server has gone away: %s'), ex)
|
|
||||||
raise sqla_exc.DisconnectionError("Database server went away")
|
|
||||||
else:
|
|
||||||
raise
|
|
||||||
|
|
||||||
|
|
||||||
def _is_db_connection_error(args):
|
|
||||||
"""Return True if error in connecting to db."""
|
|
||||||
# NOTE(adam_g): This is currently MySQL specific and needs to be extended
|
|
||||||
# to support Postgres and others.
|
|
||||||
conn_err_codes = ('2002', '2003', '2006')
|
|
||||||
for err_code in conn_err_codes:
|
|
||||||
if args.find(err_code) != -1:
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def create_engine(sql_connection, sqlite_fk=False):
|
|
||||||
"""Return a new SQLAlchemy engine."""
|
|
||||||
# NOTE(geekinutah): At this point we could be connecting to the normal
|
|
||||||
# db handle or the slave db handle. Things like
|
|
||||||
# _wrap_db_error aren't going to work well if their
|
|
||||||
# backends don't match. Let's check.
|
|
||||||
_assert_matching_drivers()
|
|
||||||
connection_dict = sqlalchemy.engine.url.make_url(sql_connection)
|
|
||||||
|
|
||||||
engine_args = {
|
|
||||||
"pool_recycle": CONF.database.idle_timeout,
|
|
||||||
"echo": False,
|
|
||||||
'convert_unicode': True,
|
|
||||||
}
|
|
||||||
|
|
||||||
# Map our SQL debug level to SQLAlchemy's options
|
|
||||||
if CONF.database.connection_debug >= 100:
|
|
||||||
engine_args['echo'] = 'debug'
|
|
||||||
elif CONF.database.connection_debug >= 50:
|
|
||||||
engine_args['echo'] = True
|
|
||||||
|
|
||||||
if "sqlite" in connection_dict.drivername:
|
|
||||||
if sqlite_fk:
|
|
||||||
engine_args["listeners"] = [SqliteForeignKeysListener()]
|
|
||||||
engine_args["poolclass"] = NullPool
|
|
||||||
|
|
||||||
if CONF.database.connection == "sqlite://":
|
|
||||||
engine_args["poolclass"] = StaticPool
|
|
||||||
engine_args["connect_args"] = {'check_same_thread': False}
|
|
||||||
else:
|
|
||||||
if CONF.database.max_pool_size is not None:
|
|
||||||
engine_args['pool_size'] = CONF.database.max_pool_size
|
|
||||||
if CONF.database.max_overflow is not None:
|
|
||||||
engine_args['max_overflow'] = CONF.database.max_overflow
|
|
||||||
if CONF.database.pool_timeout is not None:
|
|
||||||
engine_args['pool_timeout'] = CONF.database.pool_timeout
|
|
||||||
|
|
||||||
engine = sqlalchemy.create_engine(sql_connection, **engine_args)
|
|
||||||
|
|
||||||
sqlalchemy.event.listen(engine, 'checkin', _greenthread_yield)
|
|
||||||
|
|
||||||
if 'mysql' in connection_dict.drivername:
|
|
||||||
sqlalchemy.event.listen(engine, 'checkout', _ping_listener)
|
|
||||||
elif 'sqlite' in connection_dict.drivername:
|
|
||||||
if not CONF.sqlite_synchronous:
|
|
||||||
sqlalchemy.event.listen(engine, 'connect',
|
|
||||||
_synchronous_switch_listener)
|
|
||||||
sqlalchemy.event.listen(engine, 'connect', _add_regexp_listener)
|
|
||||||
|
|
||||||
if (CONF.database.connection_trace and
|
|
||||||
engine.dialect.dbapi.__name__ == 'MySQLdb'):
|
|
||||||
_patch_mysqldb_with_stacktrace_comments()
|
|
||||||
|
|
||||||
try:
|
|
||||||
engine.connect()
|
|
||||||
except sqla_exc.OperationalError as e:
|
|
||||||
if not _is_db_connection_error(e.args[0]):
|
|
||||||
raise
|
|
||||||
|
|
||||||
remaining = CONF.database.max_retries
|
|
||||||
if remaining == -1:
|
|
||||||
remaining = 'infinite'
|
|
||||||
while True:
|
|
||||||
msg = _('SQL connection failed. %s attempts left.')
|
|
||||||
LOG.warn(msg % remaining)
|
|
||||||
if remaining != 'infinite':
|
|
||||||
remaining -= 1
|
|
||||||
time.sleep(CONF.database.retry_interval)
|
|
||||||
try:
|
|
||||||
engine.connect()
|
|
||||||
break
|
|
||||||
except sqla_exc.OperationalError as e:
|
|
||||||
if (remaining != 'infinite' and remaining == 0) or \
|
|
||||||
not _is_db_connection_error(e.args[0]):
|
|
||||||
raise
|
|
||||||
return engine
|
|
||||||
|
|
||||||
|
|
||||||
class Query(sqlalchemy.orm.query.Query):
|
|
||||||
"""Subclass of sqlalchemy.query with soft_delete() method."""
|
|
||||||
def soft_delete(self, synchronize_session='evaluate'):
|
|
||||||
return self.update({'deleted': literal_column('id'),
|
|
||||||
'updated_at': literal_column('updated_at'),
|
|
||||||
'deleted_at': timeutils.utcnow()},
|
|
||||||
synchronize_session=synchronize_session)
|
|
||||||
|
|
||||||
|
|
||||||
class Session(sqlalchemy.orm.session.Session):
|
|
||||||
"""Custom Session class to avoid SqlAlchemy Session monkey patching."""
|
|
||||||
@_wrap_db_error
|
|
||||||
def query(self, *args, **kwargs):
|
|
||||||
return super(Session, self).query(*args, **kwargs)
|
|
||||||
|
|
||||||
@_wrap_db_error
|
|
||||||
def flush(self, *args, **kwargs):
|
|
||||||
return super(Session, self).flush(*args, **kwargs)
|
|
||||||
|
|
||||||
@_wrap_db_error
|
|
||||||
def execute(self, *args, **kwargs):
|
|
||||||
return super(Session, self).execute(*args, **kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
def get_maker(engine, autocommit=True, expire_on_commit=False):
|
|
||||||
"""Return a SQLAlchemy sessionmaker using the given engine."""
|
|
||||||
return sqlalchemy.orm.sessionmaker(bind=engine,
|
|
||||||
class_=Session,
|
|
||||||
autocommit=autocommit,
|
|
||||||
expire_on_commit=expire_on_commit,
|
|
||||||
query_cls=Query)
|
|
||||||
|
|
||||||
|
|
||||||
def _patch_mysqldb_with_stacktrace_comments():
|
|
||||||
"""Adds current stack trace as a comment in queries.
|
|
||||||
|
|
||||||
Patches MySQLdb.cursors.BaseCursor._do_query.
|
|
||||||
"""
|
|
||||||
import MySQLdb.cursors
|
|
||||||
import traceback
|
|
||||||
|
|
||||||
old_mysql_do_query = MySQLdb.cursors.BaseCursor._do_query
|
|
||||||
|
|
||||||
def _do_query(self, q):
|
|
||||||
stack = ''
|
|
||||||
for file, line, method, function in traceback.extract_stack():
|
|
||||||
# exclude various common things from trace
|
|
||||||
if file.endswith('session.py') and method == '_do_query':
|
|
||||||
continue
|
|
||||||
if file.endswith('api.py') and method == 'wrapper':
|
|
||||||
continue
|
|
||||||
if file.endswith('utils.py') and method == '_inner':
|
|
||||||
continue
|
|
||||||
if file.endswith('exception.py') and method == '_wrap':
|
|
||||||
continue
|
|
||||||
# db/api is just a wrapper around db/sqlalchemy/api
|
|
||||||
if file.endswith('db/api.py'):
|
|
||||||
continue
|
|
||||||
# only trace inside taskflow
|
|
||||||
index = file.rfind('taskflow')
|
|
||||||
if index == -1:
|
|
||||||
continue
|
|
||||||
stack += "File:%s:%s Method:%s() Line:%s | " \
|
|
||||||
% (file[index:], line, method, function)
|
|
||||||
|
|
||||||
# strip trailing " | " from stack
|
|
||||||
if stack:
|
|
||||||
stack = stack[:-3]
|
|
||||||
qq = "%s /* %s */" % (q, stack)
|
|
||||||
else:
|
|
||||||
qq = q
|
|
||||||
old_mysql_do_query(self, qq)
|
|
||||||
|
|
||||||
setattr(MySQLdb.cursors.BaseCursor, '_do_query', _do_query)
|
|
||||||
|
|
||||||
|
|
||||||
def _assert_matching_drivers():
|
|
||||||
"""Make sure slave handle and normal handle have the same driver."""
|
|
||||||
# NOTE(geekinutah): There's no use case for writing to one backend and
|
|
||||||
# reading from another. Who knows what the future holds?
|
|
||||||
if CONF.database.slave_connection == '':
|
|
||||||
return
|
|
||||||
|
|
||||||
normal = sqlalchemy.engine.url.make_url(CONF.database.connection)
|
|
||||||
slave = sqlalchemy.engine.url.make_url(CONF.database.slave_connection)
|
|
||||||
assert normal.drivername == slave.drivername
|
|
|
@ -1,499 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
|
||||||
# Copyright 2010-2011 OpenStack Foundation.
|
|
||||||
# Copyright 2012 Justin Santa Barbara
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import re
|
|
||||||
|
|
||||||
from migrate.changeset import UniqueConstraint
|
|
||||||
import sqlalchemy
|
|
||||||
from sqlalchemy import Boolean
|
|
||||||
from sqlalchemy import CheckConstraint
|
|
||||||
from sqlalchemy import Column
|
|
||||||
from sqlalchemy.engine import reflection
|
|
||||||
from sqlalchemy.ext.compiler import compiles
|
|
||||||
from sqlalchemy import func
|
|
||||||
from sqlalchemy import Index
|
|
||||||
from sqlalchemy import Integer
|
|
||||||
from sqlalchemy import MetaData
|
|
||||||
from sqlalchemy.sql.expression import literal_column
|
|
||||||
from sqlalchemy.sql.expression import UpdateBase
|
|
||||||
from sqlalchemy.sql import select
|
|
||||||
from sqlalchemy import String
|
|
||||||
from sqlalchemy import Table
|
|
||||||
from sqlalchemy.types import NullType
|
|
||||||
|
|
||||||
from taskflow.openstack.common.gettextutils import _ # noqa
|
|
||||||
|
|
||||||
from taskflow.openstack.common import exception
|
|
||||||
from taskflow.openstack.common import log as logging
|
|
||||||
from taskflow.openstack.common import timeutils
|
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
_DBURL_REGEX = re.compile(r"[^:]+://([^:]+):([^@]+)@.+")
|
|
||||||
|
|
||||||
|
|
||||||
def sanitize_db_url(url):
|
|
||||||
match = _DBURL_REGEX.match(url)
|
|
||||||
if match:
|
|
||||||
return '%s****:****%s' % (url[:match.start(1)], url[match.end(2):])
|
|
||||||
return url
|
|
||||||
|
|
||||||
|
|
||||||
class InvalidSortKey(Exception):
|
|
||||||
message = _("Sort key supplied was not valid.")
|
|
||||||
|
|
||||||
|
|
||||||
# copy from glance/db/sqlalchemy/api.py
|
|
||||||
def paginate_query(query, model, limit, sort_keys, marker=None,
|
|
||||||
sort_dir=None, sort_dirs=None):
|
|
||||||
"""Returns a query with sorting / pagination criteria added.
|
|
||||||
|
|
||||||
Pagination works by requiring a unique sort_key, specified by sort_keys.
|
|
||||||
(If sort_keys is not unique, then we risk looping through values.)
|
|
||||||
We use the last row in the previous page as the 'marker' for pagination.
|
|
||||||
So we must return values that follow the passed marker in the order.
|
|
||||||
With a single-valued sort_key, this would be easy: sort_key > X.
|
|
||||||
With a compound-values sort_key, (k1, k2, k3) we must do this to repeat
|
|
||||||
the lexicographical ordering:
|
|
||||||
(k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3)
|
|
||||||
|
|
||||||
We also have to cope with different sort_directions.
|
|
||||||
|
|
||||||
Typically, the id of the last row is used as the client-facing pagination
|
|
||||||
marker, then the actual marker object must be fetched from the db and
|
|
||||||
passed in to us as marker.
|
|
||||||
|
|
||||||
:param query: the query object to which we should add paging/sorting
|
|
||||||
:param model: the ORM model class
|
|
||||||
:param limit: maximum number of items to return
|
|
||||||
:param sort_keys: array of attributes by which results should be sorted
|
|
||||||
:param marker: the last item of the previous page; we returns the next
|
|
||||||
results after this value.
|
|
||||||
:param sort_dir: direction in which results should be sorted (asc, desc)
|
|
||||||
:param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys
|
|
||||||
|
|
||||||
:rtype: sqlalchemy.orm.query.Query
|
|
||||||
:return: The query with sorting/pagination added.
|
|
||||||
"""
|
|
||||||
|
|
||||||
if 'id' not in sort_keys:
|
|
||||||
# TODO(justinsb): If this ever gives a false-positive, check
|
|
||||||
# the actual primary key, rather than assuming its id
|
|
||||||
LOG.warn(_('Id not in sort_keys; is sort_keys unique?'))
|
|
||||||
|
|
||||||
assert(not (sort_dir and sort_dirs))
|
|
||||||
|
|
||||||
# Default the sort direction to ascending
|
|
||||||
if sort_dirs is None and sort_dir is None:
|
|
||||||
sort_dir = 'asc'
|
|
||||||
|
|
||||||
# Ensure a per-column sort direction
|
|
||||||
if sort_dirs is None:
|
|
||||||
sort_dirs = [sort_dir for _sort_key in sort_keys]
|
|
||||||
|
|
||||||
assert(len(sort_dirs) == len(sort_keys))
|
|
||||||
|
|
||||||
# Add sorting
|
|
||||||
for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs):
|
|
||||||
try:
|
|
||||||
sort_dir_func = {
|
|
||||||
'asc': sqlalchemy.asc,
|
|
||||||
'desc': sqlalchemy.desc,
|
|
||||||
}[current_sort_dir]
|
|
||||||
except KeyError:
|
|
||||||
raise ValueError(_("Unknown sort direction, "
|
|
||||||
"must be 'desc' or 'asc'"))
|
|
||||||
try:
|
|
||||||
sort_key_attr = getattr(model, current_sort_key)
|
|
||||||
except AttributeError:
|
|
||||||
raise InvalidSortKey()
|
|
||||||
query = query.order_by(sort_dir_func(sort_key_attr))
|
|
||||||
|
|
||||||
# Add pagination
|
|
||||||
if marker is not None:
|
|
||||||
marker_values = []
|
|
||||||
for sort_key in sort_keys:
|
|
||||||
v = getattr(marker, sort_key)
|
|
||||||
marker_values.append(v)
|
|
||||||
|
|
||||||
# Build up an array of sort criteria as in the docstring
|
|
||||||
criteria_list = []
|
|
||||||
for i in range(0, len(sort_keys)):
|
|
||||||
crit_attrs = []
|
|
||||||
for j in range(0, i):
|
|
||||||
model_attr = getattr(model, sort_keys[j])
|
|
||||||
crit_attrs.append((model_attr == marker_values[j]))
|
|
||||||
|
|
||||||
model_attr = getattr(model, sort_keys[i])
|
|
||||||
if sort_dirs[i] == 'desc':
|
|
||||||
crit_attrs.append((model_attr < marker_values[i]))
|
|
||||||
else:
|
|
||||||
crit_attrs.append((model_attr > marker_values[i]))
|
|
||||||
|
|
||||||
criteria = sqlalchemy.sql.and_(*crit_attrs)
|
|
||||||
criteria_list.append(criteria)
|
|
||||||
|
|
||||||
f = sqlalchemy.sql.or_(*criteria_list)
|
|
||||||
query = query.filter(f)
|
|
||||||
|
|
||||||
if limit is not None:
|
|
||||||
query = query.limit(limit)
|
|
||||||
|
|
||||||
return query
|
|
||||||
|
|
||||||
|
|
||||||
def get_table(engine, name):
|
|
||||||
"""Returns an sqlalchemy table dynamically from db.
|
|
||||||
|
|
||||||
Needed because the models don't work for us in migrations
|
|
||||||
as models will be far out of sync with the current data.
|
|
||||||
"""
|
|
||||||
metadata = MetaData()
|
|
||||||
metadata.bind = engine
|
|
||||||
return Table(name, metadata, autoload=True)
|
|
||||||
|
|
||||||
|
|
||||||
class InsertFromSelect(UpdateBase):
|
|
||||||
"""Form the base for `INSERT INTO table (SELECT ... )` statement."""
|
|
||||||
def __init__(self, table, select):
|
|
||||||
self.table = table
|
|
||||||
self.select = select
|
|
||||||
|
|
||||||
|
|
||||||
@compiles(InsertFromSelect)
|
|
||||||
def visit_insert_from_select(element, compiler, **kw):
|
|
||||||
"""Form the `INSERT INTO table (SELECT ... )` statement."""
|
|
||||||
return "INSERT INTO %s %s" % (
|
|
||||||
compiler.process(element.table, asfrom=True),
|
|
||||||
compiler.process(element.select))
|
|
||||||
|
|
||||||
|
|
||||||
def _get_not_supported_column(col_name_col_instance, column_name):
|
|
||||||
try:
|
|
||||||
column = col_name_col_instance[column_name]
|
|
||||||
except KeyError:
|
|
||||||
msg = _("Please specify column %s in col_name_col_instance "
|
|
||||||
"param. It is required because column has unsupported "
|
|
||||||
"type by sqlite).")
|
|
||||||
raise exception.OpenstackException(message=msg % column_name)
|
|
||||||
|
|
||||||
if not isinstance(column, Column):
|
|
||||||
msg = _("col_name_col_instance param has wrong type of "
|
|
||||||
"column instance for column %s It should be instance "
|
|
||||||
"of sqlalchemy.Column.")
|
|
||||||
raise exception.OpenstackException(message=msg % column_name)
|
|
||||||
return column
|
|
||||||
|
|
||||||
|
|
||||||
def drop_unique_constraint(migrate_engine, table_name, uc_name, *columns,
|
|
||||||
**col_name_col_instance):
|
|
||||||
"""Drop unique constraint from table.
|
|
||||||
|
|
||||||
This method drops UC from table and works for mysql, postgresql and sqlite.
|
|
||||||
In mysql and postgresql we are able to use "alter table" construction.
|
|
||||||
Sqlalchemy doesn't support some sqlite column types and replaces their
|
|
||||||
type with NullType in metadata. We process these columns and replace
|
|
||||||
NullType with the correct column type.
|
|
||||||
|
|
||||||
:param migrate_engine: sqlalchemy engine
|
|
||||||
:param table_name: name of table that contains uniq constraint.
|
|
||||||
:param uc_name: name of uniq constraint that will be dropped.
|
|
||||||
:param columns: columns that are in uniq constraint.
|
|
||||||
:param col_name_col_instance: contains pair column_name=column_instance.
|
|
||||||
column_instance is instance of Column. These params
|
|
||||||
are required only for columns that have unsupported
|
|
||||||
types by sqlite. For example BigInteger.
|
|
||||||
"""
|
|
||||||
|
|
||||||
meta = MetaData()
|
|
||||||
meta.bind = migrate_engine
|
|
||||||
t = Table(table_name, meta, autoload=True)
|
|
||||||
|
|
||||||
if migrate_engine.name == "sqlite":
|
|
||||||
override_cols = [
|
|
||||||
_get_not_supported_column(col_name_col_instance, col.name)
|
|
||||||
for col in t.columns
|
|
||||||
if isinstance(col.type, NullType)
|
|
||||||
]
|
|
||||||
for col in override_cols:
|
|
||||||
t.columns.replace(col)
|
|
||||||
|
|
||||||
uc = UniqueConstraint(*columns, table=t, name=uc_name)
|
|
||||||
uc.drop()
|
|
||||||
|
|
||||||
|
|
||||||
def drop_old_duplicate_entries_from_table(migrate_engine, table_name,
|
|
||||||
use_soft_delete, *uc_column_names):
|
|
||||||
"""Drop all old rows having the same values for columns in uc_columns.
|
|
||||||
|
|
||||||
This method drop (or mark ad `deleted` if use_soft_delete is True) old
|
|
||||||
duplicate rows form table with name `table_name`.
|
|
||||||
|
|
||||||
:param migrate_engine: Sqlalchemy engine
|
|
||||||
:param table_name: Table with duplicates
|
|
||||||
:param use_soft_delete: If True - values will be marked as `deleted`,
|
|
||||||
if False - values will be removed from table
|
|
||||||
:param uc_column_names: Unique constraint columns
|
|
||||||
"""
|
|
||||||
meta = MetaData()
|
|
||||||
meta.bind = migrate_engine
|
|
||||||
|
|
||||||
table = Table(table_name, meta, autoload=True)
|
|
||||||
columns_for_group_by = [table.c[name] for name in uc_column_names]
|
|
||||||
|
|
||||||
columns_for_select = [func.max(table.c.id)]
|
|
||||||
columns_for_select.extend(columns_for_group_by)
|
|
||||||
|
|
||||||
duplicated_rows_select = select(columns_for_select,
|
|
||||||
group_by=columns_for_group_by,
|
|
||||||
having=func.count(table.c.id) > 1)
|
|
||||||
|
|
||||||
for row in migrate_engine.execute(duplicated_rows_select):
|
|
||||||
# NOTE(boris-42): Do not remove row that has the biggest ID.
|
|
||||||
delete_condition = table.c.id != row[0]
|
|
||||||
is_none = None # workaround for pyflakes
|
|
||||||
delete_condition &= table.c.deleted_at == is_none
|
|
||||||
for name in uc_column_names:
|
|
||||||
delete_condition &= table.c[name] == row[name]
|
|
||||||
|
|
||||||
rows_to_delete_select = select([table.c.id]).where(delete_condition)
|
|
||||||
for row in migrate_engine.execute(rows_to_delete_select).fetchall():
|
|
||||||
LOG.info(_("Deleting duplicated row with id: %(id)s from table: "
|
|
||||||
"%(table)s") % dict(id=row[0], table=table_name))
|
|
||||||
|
|
||||||
if use_soft_delete:
|
|
||||||
delete_statement = table.update().\
|
|
||||||
where(delete_condition).\
|
|
||||||
values({
|
|
||||||
'deleted': literal_column('id'),
|
|
||||||
'updated_at': literal_column('updated_at'),
|
|
||||||
'deleted_at': timeutils.utcnow()
|
|
||||||
})
|
|
||||||
else:
|
|
||||||
delete_statement = table.delete().where(delete_condition)
|
|
||||||
migrate_engine.execute(delete_statement)
|
|
||||||
|
|
||||||
|
|
||||||
def _get_default_deleted_value(table):
|
|
||||||
if isinstance(table.c.id.type, Integer):
|
|
||||||
return 0
|
|
||||||
if isinstance(table.c.id.type, String):
|
|
||||||
return ""
|
|
||||||
raise exception.OpenstackException(
|
|
||||||
message=_("Unsupported id columns type"))
|
|
||||||
|
|
||||||
|
|
||||||
def _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes):
|
|
||||||
table = get_table(migrate_engine, table_name)
|
|
||||||
|
|
||||||
insp = reflection.Inspector.from_engine(migrate_engine)
|
|
||||||
real_indexes = insp.get_indexes(table_name)
|
|
||||||
existing_index_names = dict(
|
|
||||||
[(index['name'], index['column_names']) for index in real_indexes])
|
|
||||||
|
|
||||||
# NOTE(boris-42): Restore indexes on `deleted` column
|
|
||||||
for index in indexes:
|
|
||||||
if 'deleted' not in index['column_names']:
|
|
||||||
continue
|
|
||||||
name = index['name']
|
|
||||||
if name in existing_index_names:
|
|
||||||
column_names = [table.c[c] for c in existing_index_names[name]]
|
|
||||||
old_index = Index(name, *column_names, unique=index["unique"])
|
|
||||||
old_index.drop(migrate_engine)
|
|
||||||
|
|
||||||
column_names = [table.c[c] for c in index['column_names']]
|
|
||||||
new_index = Index(index["name"], *column_names, unique=index["unique"])
|
|
||||||
new_index.create(migrate_engine)
|
|
||||||
|
|
||||||
|
|
||||||
def change_deleted_column_type_to_boolean(migrate_engine, table_name,
|
|
||||||
**col_name_col_instance):
|
|
||||||
if migrate_engine.name == "sqlite":
|
|
||||||
return _change_deleted_column_type_to_boolean_sqlite(
|
|
||||||
migrate_engine, table_name, **col_name_col_instance)
|
|
||||||
insp = reflection.Inspector.from_engine(migrate_engine)
|
|
||||||
indexes = insp.get_indexes(table_name)
|
|
||||||
|
|
||||||
table = get_table(migrate_engine, table_name)
|
|
||||||
|
|
||||||
old_deleted = Column('old_deleted', Boolean, default=False)
|
|
||||||
old_deleted.create(table, populate_default=False)
|
|
||||||
|
|
||||||
table.update().\
|
|
||||||
where(table.c.deleted == table.c.id).\
|
|
||||||
values(old_deleted=True).\
|
|
||||||
execute()
|
|
||||||
|
|
||||||
table.c.deleted.drop()
|
|
||||||
table.c.old_deleted.alter(name="deleted")
|
|
||||||
|
|
||||||
_restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes)
|
|
||||||
|
|
||||||
|
|
||||||
def _change_deleted_column_type_to_boolean_sqlite(migrate_engine, table_name,
|
|
||||||
**col_name_col_instance):
|
|
||||||
insp = reflection.Inspector.from_engine(migrate_engine)
|
|
||||||
table = get_table(migrate_engine, table_name)
|
|
||||||
|
|
||||||
columns = []
|
|
||||||
for column in table.columns:
|
|
||||||
column_copy = None
|
|
||||||
if column.name != "deleted":
|
|
||||||
if isinstance(column.type, NullType):
|
|
||||||
column_copy = _get_not_supported_column(col_name_col_instance,
|
|
||||||
column.name)
|
|
||||||
else:
|
|
||||||
column_copy = column.copy()
|
|
||||||
else:
|
|
||||||
column_copy = Column('deleted', Boolean, default=0)
|
|
||||||
columns.append(column_copy)
|
|
||||||
|
|
||||||
constraints = [constraint.copy() for constraint in table.constraints]
|
|
||||||
|
|
||||||
meta = MetaData(bind=migrate_engine)
|
|
||||||
new_table = Table(table_name + "__tmp__", meta,
|
|
||||||
*(columns + constraints))
|
|
||||||
new_table.create()
|
|
||||||
|
|
||||||
indexes = []
|
|
||||||
for index in insp.get_indexes(table_name):
|
|
||||||
column_names = [new_table.c[c] for c in index['column_names']]
|
|
||||||
indexes.append(Index(index["name"], *column_names,
|
|
||||||
unique=index["unique"]))
|
|
||||||
|
|
||||||
c_select = []
|
|
||||||
for c in table.c:
|
|
||||||
if c.name != "deleted":
|
|
||||||
c_select.append(c)
|
|
||||||
else:
|
|
||||||
c_select.append(table.c.deleted == table.c.id)
|
|
||||||
|
|
||||||
ins = InsertFromSelect(new_table, select(c_select))
|
|
||||||
migrate_engine.execute(ins)
|
|
||||||
|
|
||||||
table.drop()
|
|
||||||
[index.create(migrate_engine) for index in indexes]
|
|
||||||
|
|
||||||
new_table.rename(table_name)
|
|
||||||
new_table.update().\
|
|
||||||
where(new_table.c.deleted == new_table.c.id).\
|
|
||||||
values(deleted=True).\
|
|
||||||
execute()
|
|
||||||
|
|
||||||
|
|
||||||
def change_deleted_column_type_to_id_type(migrate_engine, table_name,
|
|
||||||
**col_name_col_instance):
|
|
||||||
if migrate_engine.name == "sqlite":
|
|
||||||
return _change_deleted_column_type_to_id_type_sqlite(
|
|
||||||
migrate_engine, table_name, **col_name_col_instance)
|
|
||||||
insp = reflection.Inspector.from_engine(migrate_engine)
|
|
||||||
indexes = insp.get_indexes(table_name)
|
|
||||||
|
|
||||||
table = get_table(migrate_engine, table_name)
|
|
||||||
|
|
||||||
new_deleted = Column('new_deleted', table.c.id.type,
|
|
||||||
default=_get_default_deleted_value(table))
|
|
||||||
new_deleted.create(table, populate_default=True)
|
|
||||||
|
|
||||||
deleted = True # workaround for pyflakes
|
|
||||||
table.update().\
|
|
||||||
where(table.c.deleted == deleted).\
|
|
||||||
values(new_deleted=table.c.id).\
|
|
||||||
execute()
|
|
||||||
table.c.deleted.drop()
|
|
||||||
table.c.new_deleted.alter(name="deleted")
|
|
||||||
|
|
||||||
_restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes)
|
|
||||||
|
|
||||||
|
|
||||||
def _change_deleted_column_type_to_id_type_sqlite(migrate_engine, table_name,
|
|
||||||
**col_name_col_instance):
|
|
||||||
# NOTE(boris-42): sqlaclhemy-migrate can't drop column with check
|
|
||||||
# constraints in sqlite DB and our `deleted` column has
|
|
||||||
# 2 check constraints. So there is only one way to remove
|
|
||||||
# these constraints:
|
|
||||||
# 1) Create new table with the same columns, constraints
|
|
||||||
# and indexes. (except deleted column).
|
|
||||||
# 2) Copy all data from old to new table.
|
|
||||||
# 3) Drop old table.
|
|
||||||
# 4) Rename new table to old table name.
|
|
||||||
insp = reflection.Inspector.from_engine(migrate_engine)
|
|
||||||
meta = MetaData(bind=migrate_engine)
|
|
||||||
table = Table(table_name, meta, autoload=True)
|
|
||||||
default_deleted_value = _get_default_deleted_value(table)
|
|
||||||
|
|
||||||
columns = []
|
|
||||||
for column in table.columns:
|
|
||||||
column_copy = None
|
|
||||||
if column.name != "deleted":
|
|
||||||
if isinstance(column.type, NullType):
|
|
||||||
column_copy = _get_not_supported_column(col_name_col_instance,
|
|
||||||
column.name)
|
|
||||||
else:
|
|
||||||
column_copy = column.copy()
|
|
||||||
else:
|
|
||||||
column_copy = Column('deleted', table.c.id.type,
|
|
||||||
default=default_deleted_value)
|
|
||||||
columns.append(column_copy)
|
|
||||||
|
|
||||||
def is_deleted_column_constraint(constraint):
|
|
||||||
# NOTE(boris-42): There is no other way to check is CheckConstraint
|
|
||||||
# associated with deleted column.
|
|
||||||
if not isinstance(constraint, CheckConstraint):
|
|
||||||
return False
|
|
||||||
sqltext = str(constraint.sqltext)
|
|
||||||
return (sqltext.endswith("deleted in (0, 1)") or
|
|
||||||
sqltext.endswith("deleted IN (:deleted_1, :deleted_2)"))
|
|
||||||
|
|
||||||
constraints = []
|
|
||||||
for constraint in table.constraints:
|
|
||||||
if not is_deleted_column_constraint(constraint):
|
|
||||||
constraints.append(constraint.copy())
|
|
||||||
|
|
||||||
new_table = Table(table_name + "__tmp__", meta,
|
|
||||||
*(columns + constraints))
|
|
||||||
new_table.create()
|
|
||||||
|
|
||||||
indexes = []
|
|
||||||
for index in insp.get_indexes(table_name):
|
|
||||||
column_names = [new_table.c[c] for c in index['column_names']]
|
|
||||||
indexes.append(Index(index["name"], *column_names,
|
|
||||||
unique=index["unique"]))
|
|
||||||
|
|
||||||
ins = InsertFromSelect(new_table, table.select())
|
|
||||||
migrate_engine.execute(ins)
|
|
||||||
|
|
||||||
table.drop()
|
|
||||||
[index.create(migrate_engine) for index in indexes]
|
|
||||||
|
|
||||||
new_table.rename(table_name)
|
|
||||||
deleted = True # workaround for pyflakes
|
|
||||||
new_table.update().\
|
|
||||||
where(new_table.c.deleted == deleted).\
|
|
||||||
values(deleted=new_table.c.id).\
|
|
||||||
execute()
|
|
||||||
|
|
||||||
# NOTE(boris-42): Fix value of deleted column: False -> "" or 0.
|
|
||||||
deleted = False # workaround for pyflakes
|
|
||||||
new_table.update().\
|
|
||||||
where(new_table.c.deleted == deleted).\
|
|
||||||
values(deleted=default_deleted_value).\
|
|
||||||
execute()
|
|
|
@ -1,139 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2011 OpenStack Foundation.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""
|
|
||||||
Exceptions common to OpenStack projects
|
|
||||||
"""
|
|
||||||
|
|
||||||
import logging
|
|
||||||
|
|
||||||
from taskflow.openstack.common.gettextutils import _ # noqa
|
|
||||||
|
|
||||||
_FATAL_EXCEPTION_FORMAT_ERRORS = False
|
|
||||||
|
|
||||||
|
|
||||||
class Error(Exception):
|
|
||||||
def __init__(self, message=None):
|
|
||||||
super(Error, self).__init__(message)
|
|
||||||
|
|
||||||
|
|
||||||
class ApiError(Error):
|
|
||||||
def __init__(self, message='Unknown', code='Unknown'):
|
|
||||||
self.api_message = message
|
|
||||||
self.code = code
|
|
||||||
super(ApiError, self).__init__('%s: %s' % (code, message))
|
|
||||||
|
|
||||||
|
|
||||||
class NotFound(Error):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class UnknownScheme(Error):
|
|
||||||
|
|
||||||
msg_fmt = "Unknown scheme '%s' found in URI"
|
|
||||||
|
|
||||||
def __init__(self, scheme):
|
|
||||||
msg = self.msg_fmt % scheme
|
|
||||||
super(UnknownScheme, self).__init__(msg)
|
|
||||||
|
|
||||||
|
|
||||||
class BadStoreUri(Error):
|
|
||||||
|
|
||||||
msg_fmt = "The Store URI %s was malformed. Reason: %s"
|
|
||||||
|
|
||||||
def __init__(self, uri, reason):
|
|
||||||
msg = self.msg_fmt % (uri, reason)
|
|
||||||
super(BadStoreUri, self).__init__(msg)
|
|
||||||
|
|
||||||
|
|
||||||
class Duplicate(Error):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class NotAuthorized(Error):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class NotEmpty(Error):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class Invalid(Error):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class BadInputError(Exception):
|
|
||||||
"""Error resulting from a client sending bad input to a server"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class MissingArgumentError(Error):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class DatabaseMigrationError(Error):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class ClientConnectionError(Exception):
|
|
||||||
"""Error resulting from a client connecting to a server"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def wrap_exception(f):
|
|
||||||
def _wrap(*args, **kw):
|
|
||||||
try:
|
|
||||||
return f(*args, **kw)
|
|
||||||
except Exception as e:
|
|
||||||
if not isinstance(e, Error):
|
|
||||||
logging.exception(_('Uncaught exception'))
|
|
||||||
raise Error(str(e))
|
|
||||||
raise
|
|
||||||
_wrap.func_name = f.func_name
|
|
||||||
return _wrap
|
|
||||||
|
|
||||||
|
|
||||||
class OpenstackException(Exception):
|
|
||||||
"""Base Exception class.
|
|
||||||
|
|
||||||
To correctly use this class, inherit from it and define
|
|
||||||
a 'msg_fmt' property. That message will get printf'd
|
|
||||||
with the keyword arguments provided to the constructor.
|
|
||||||
"""
|
|
||||||
msg_fmt = "An unknown exception occurred"
|
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
|
||||||
try:
|
|
||||||
self._error_string = self.msg_fmt % kwargs
|
|
||||||
|
|
||||||
except Exception:
|
|
||||||
if _FATAL_EXCEPTION_FORMAT_ERRORS:
|
|
||||||
raise
|
|
||||||
else:
|
|
||||||
# at least get the core message out if something happened
|
|
||||||
self._error_string = self.msg_fmt
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
return self._error_string
|
|
||||||
|
|
||||||
|
|
||||||
class MalformedRequestBody(OpenstackException):
|
|
||||||
msg_fmt = "Malformed message body: %(reason)s"
|
|
||||||
|
|
||||||
|
|
||||||
class InvalidContentType(OpenstackException):
|
|
||||||
msg_fmt = "Invalid content type %(content_type)s"
|
|
|
@ -24,6 +24,8 @@ import sys
|
||||||
import time
|
import time
|
||||||
import traceback
|
import traceback
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
from taskflow.openstack.common.gettextutils import _ # noqa
|
from taskflow.openstack.common.gettextutils import _ # noqa
|
||||||
|
|
||||||
|
|
||||||
|
@ -65,7 +67,7 @@ class save_and_reraise_exception(object):
|
||||||
self.tb))
|
self.tb))
|
||||||
return False
|
return False
|
||||||
if self.reraise:
|
if self.reraise:
|
||||||
raise self.type_, self.value, self.tb
|
six.reraise(self.type_, self.value, self.tb)
|
||||||
|
|
||||||
|
|
||||||
def forever_retry_uncaught_exceptions(infunc):
|
def forever_retry_uncaught_exceptions(infunc):
|
||||||
|
|
|
@ -1,110 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2011 OpenStack Foundation.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
|
|
||||||
import contextlib
|
|
||||||
import errno
|
|
||||||
import os
|
|
||||||
|
|
||||||
from taskflow.openstack.common import excutils
|
|
||||||
from taskflow.openstack.common.gettextutils import _ # noqa
|
|
||||||
from taskflow.openstack.common import log as logging
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
_FILE_CACHE = {}
|
|
||||||
|
|
||||||
|
|
||||||
def ensure_tree(path):
|
|
||||||
"""Create a directory (and any ancestor directories required)
|
|
||||||
|
|
||||||
:param path: Directory to create
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
os.makedirs(path)
|
|
||||||
except OSError as exc:
|
|
||||||
if exc.errno == errno.EEXIST:
|
|
||||||
if not os.path.isdir(path):
|
|
||||||
raise
|
|
||||||
else:
|
|
||||||
raise
|
|
||||||
|
|
||||||
|
|
||||||
def read_cached_file(filename, force_reload=False):
|
|
||||||
"""Read from a file if it has been modified.
|
|
||||||
|
|
||||||
:param force_reload: Whether to reload the file.
|
|
||||||
:returns: A tuple with a boolean specifying if the data is fresh
|
|
||||||
or not.
|
|
||||||
"""
|
|
||||||
global _FILE_CACHE
|
|
||||||
|
|
||||||
if force_reload and filename in _FILE_CACHE:
|
|
||||||
del _FILE_CACHE[filename]
|
|
||||||
|
|
||||||
reloaded = False
|
|
||||||
mtime = os.path.getmtime(filename)
|
|
||||||
cache_info = _FILE_CACHE.setdefault(filename, {})
|
|
||||||
|
|
||||||
if not cache_info or mtime > cache_info.get('mtime', 0):
|
|
||||||
LOG.debug(_("Reloading cached file %s") % filename)
|
|
||||||
with open(filename) as fap:
|
|
||||||
cache_info['data'] = fap.read()
|
|
||||||
cache_info['mtime'] = mtime
|
|
||||||
reloaded = True
|
|
||||||
return (reloaded, cache_info['data'])
|
|
||||||
|
|
||||||
|
|
||||||
def delete_if_exists(path):
|
|
||||||
"""Delete a file, but ignore file not found error.
|
|
||||||
|
|
||||||
:param path: File to delete
|
|
||||||
"""
|
|
||||||
|
|
||||||
try:
|
|
||||||
os.unlink(path)
|
|
||||||
except OSError as e:
|
|
||||||
if e.errno == errno.ENOENT:
|
|
||||||
return
|
|
||||||
else:
|
|
||||||
raise
|
|
||||||
|
|
||||||
|
|
||||||
@contextlib.contextmanager
|
|
||||||
def remove_path_on_error(path):
|
|
||||||
"""Protect code that wants to operate on PATH atomically.
|
|
||||||
Any exception will cause PATH to be removed.
|
|
||||||
|
|
||||||
:param path: File to work with
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
yield
|
|
||||||
except Exception:
|
|
||||||
with excutils.save_and_reraise_exception():
|
|
||||||
delete_if_exists(path)
|
|
||||||
|
|
||||||
|
|
||||||
def file_open(*args, **kwargs):
|
|
||||||
"""Open file
|
|
||||||
|
|
||||||
see built-in file() documentation for more details
|
|
||||||
|
|
||||||
Note: The reason this is kept in a separate module is to easily
|
|
||||||
be able to provide a stub module that doesn't alter system
|
|
||||||
state at all (for unit tests)
|
|
||||||
"""
|
|
||||||
return file(*args, **kwargs)
|
|
|
@ -26,10 +26,13 @@ Usual usage in an openstack.common module:
|
||||||
|
|
||||||
import copy
|
import copy
|
||||||
import gettext
|
import gettext
|
||||||
import logging.handlers
|
import logging
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
import UserString
|
try:
|
||||||
|
import UserString as _userString
|
||||||
|
except ImportError:
|
||||||
|
import collections as _userString
|
||||||
|
|
||||||
from babel import localedata
|
from babel import localedata
|
||||||
import six
|
import six
|
||||||
|
@ -37,10 +40,26 @@ import six
|
||||||
_localedir = os.environ.get('taskflow'.upper() + '_LOCALEDIR')
|
_localedir = os.environ.get('taskflow'.upper() + '_LOCALEDIR')
|
||||||
_t = gettext.translation('taskflow', localedir=_localedir, fallback=True)
|
_t = gettext.translation('taskflow', localedir=_localedir, fallback=True)
|
||||||
|
|
||||||
_AVAILABLE_LANGUAGES = []
|
_AVAILABLE_LANGUAGES = {}
|
||||||
|
USE_LAZY = False
|
||||||
|
|
||||||
|
|
||||||
|
def enable_lazy():
|
||||||
|
"""Convenience function for configuring _() to use lazy gettext
|
||||||
|
|
||||||
|
Call this at the start of execution to enable the gettextutils._
|
||||||
|
function to use lazy gettext functionality. This is useful if
|
||||||
|
your project is importing _ directly instead of using the
|
||||||
|
gettextutils.install() way of importing the _ function.
|
||||||
|
"""
|
||||||
|
global USE_LAZY
|
||||||
|
USE_LAZY = True
|
||||||
|
|
||||||
|
|
||||||
def _(msg):
|
def _(msg):
|
||||||
|
if USE_LAZY:
|
||||||
|
return Message(msg, 'taskflow')
|
||||||
|
else:
|
||||||
return _t.ugettext(msg)
|
return _t.ugettext(msg)
|
||||||
|
|
||||||
|
|
||||||
|
@ -95,7 +114,7 @@ def install(domain, lazy=False):
|
||||||
unicode=True)
|
unicode=True)
|
||||||
|
|
||||||
|
|
||||||
class Message(UserString.UserString, object):
|
class Message(_userString.UserString, object):
|
||||||
"""Class used to encapsulate translatable messages."""
|
"""Class used to encapsulate translatable messages."""
|
||||||
def __init__(self, msg, domain):
|
def __init__(self, msg, domain):
|
||||||
# _msg is the gettext msgid and should never change
|
# _msg is the gettext msgid and should never change
|
||||||
|
@ -236,7 +255,7 @@ class Message(UserString.UserString, object):
|
||||||
if name in ops:
|
if name in ops:
|
||||||
return getattr(self.data, name)
|
return getattr(self.data, name)
|
||||||
else:
|
else:
|
||||||
return UserString.UserString.__getattribute__(self, name)
|
return _userString.UserString.__getattribute__(self, name)
|
||||||
|
|
||||||
|
|
||||||
def get_available_languages(domain):
|
def get_available_languages(domain):
|
||||||
|
@ -244,8 +263,8 @@ def get_available_languages(domain):
|
||||||
|
|
||||||
:param domain: the domain to get languages for
|
:param domain: the domain to get languages for
|
||||||
"""
|
"""
|
||||||
if _AVAILABLE_LANGUAGES:
|
if domain in _AVAILABLE_LANGUAGES:
|
||||||
return _AVAILABLE_LANGUAGES
|
return copy.copy(_AVAILABLE_LANGUAGES[domain])
|
||||||
|
|
||||||
localedir = '%s_LOCALEDIR' % domain.upper()
|
localedir = '%s_LOCALEDIR' % domain.upper()
|
||||||
find = lambda x: gettext.find(domain,
|
find = lambda x: gettext.find(domain,
|
||||||
|
@ -254,7 +273,7 @@ def get_available_languages(domain):
|
||||||
|
|
||||||
# NOTE(mrodden): en_US should always be available (and first in case
|
# NOTE(mrodden): en_US should always be available (and first in case
|
||||||
# order matters) since our in-line message strings are en_US
|
# order matters) since our in-line message strings are en_US
|
||||||
_AVAILABLE_LANGUAGES.append('en_US')
|
language_list = ['en_US']
|
||||||
# NOTE(luisg): Babel <1.0 used a function called list(), which was
|
# NOTE(luisg): Babel <1.0 used a function called list(), which was
|
||||||
# renamed to locale_identifiers() in >=1.0, the requirements master list
|
# renamed to locale_identifiers() in >=1.0, the requirements master list
|
||||||
# requires >=0.9.6, uncapped, so defensively work with both. We can remove
|
# requires >=0.9.6, uncapped, so defensively work with both. We can remove
|
||||||
|
@ -264,13 +283,14 @@ def get_available_languages(domain):
|
||||||
locale_identifiers = list_identifiers()
|
locale_identifiers = list_identifiers()
|
||||||
for i in locale_identifiers:
|
for i in locale_identifiers:
|
||||||
if find(i) is not None:
|
if find(i) is not None:
|
||||||
_AVAILABLE_LANGUAGES.append(i)
|
language_list.append(i)
|
||||||
return _AVAILABLE_LANGUAGES
|
_AVAILABLE_LANGUAGES[domain] = language_list
|
||||||
|
return copy.copy(language_list)
|
||||||
|
|
||||||
|
|
||||||
def get_localized_message(message, user_locale):
|
def get_localized_message(message, user_locale):
|
||||||
"""Gets a localized version of the given message in the given locale."""
|
"""Gets a localized version of the given message in the given locale."""
|
||||||
if (isinstance(message, Message)):
|
if isinstance(message, Message):
|
||||||
if user_locale:
|
if user_locale:
|
||||||
message.locale = user_locale
|
message.locale = user_locale
|
||||||
return unicode(message)
|
return unicode(message)
|
||||||
|
|
|
@ -38,14 +38,18 @@ import functools
|
||||||
import inspect
|
import inspect
|
||||||
import itertools
|
import itertools
|
||||||
import json
|
import json
|
||||||
import types
|
try:
|
||||||
import xmlrpclib
|
import xmlrpclib
|
||||||
|
except ImportError:
|
||||||
|
# NOTE(jd): xmlrpclib is not shipped with Python 3
|
||||||
|
xmlrpclib = None
|
||||||
|
|
||||||
import netaddr
|
|
||||||
import six
|
import six
|
||||||
|
|
||||||
|
from taskflow.openstack.common import importutils
|
||||||
from taskflow.openstack.common import timeutils
|
from taskflow.openstack.common import timeutils
|
||||||
|
|
||||||
|
netaddr = importutils.try_import("netaddr")
|
||||||
|
|
||||||
_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod,
|
_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod,
|
||||||
inspect.isfunction, inspect.isgeneratorfunction,
|
inspect.isfunction, inspect.isgeneratorfunction,
|
||||||
|
@ -53,7 +57,8 @@ _nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod,
|
||||||
inspect.iscode, inspect.isbuiltin, inspect.isroutine,
|
inspect.iscode, inspect.isbuiltin, inspect.isroutine,
|
||||||
inspect.isabstract]
|
inspect.isabstract]
|
||||||
|
|
||||||
_simple_types = (types.NoneType, int, basestring, bool, float, long)
|
_simple_types = (six.string_types + six.integer_types
|
||||||
|
+ (type(None), bool, float))
|
||||||
|
|
||||||
|
|
||||||
def to_primitive(value, convert_instances=False, convert_datetime=True,
|
def to_primitive(value, convert_instances=False, convert_datetime=True,
|
||||||
|
@ -125,7 +130,7 @@ def to_primitive(value, convert_instances=False, convert_datetime=True,
|
||||||
# It's not clear why xmlrpclib created their own DateTime type, but
|
# It's not clear why xmlrpclib created their own DateTime type, but
|
||||||
# for our purposes, make it a datetime type which is explicitly
|
# for our purposes, make it a datetime type which is explicitly
|
||||||
# handled
|
# handled
|
||||||
if isinstance(value, xmlrpclib.DateTime):
|
if xmlrpclib and isinstance(value, xmlrpclib.DateTime):
|
||||||
value = datetime.datetime(*tuple(value.timetuple())[:6])
|
value = datetime.datetime(*tuple(value.timetuple())[:6])
|
||||||
|
|
||||||
if convert_datetime and isinstance(value, datetime.datetime):
|
if convert_datetime and isinstance(value, datetime.datetime):
|
||||||
|
@ -138,7 +143,7 @@ def to_primitive(value, convert_instances=False, convert_datetime=True,
|
||||||
# Likely an instance of something. Watch for cycles.
|
# Likely an instance of something. Watch for cycles.
|
||||||
# Ignore class member vars.
|
# Ignore class member vars.
|
||||||
return recursive(value.__dict__, level=level + 1)
|
return recursive(value.__dict__, level=level + 1)
|
||||||
elif isinstance(value, netaddr.IPAddress):
|
elif netaddr and isinstance(value, netaddr.IPAddress):
|
||||||
return six.text_type(value)
|
return six.text_type(value)
|
||||||
else:
|
else:
|
||||||
if any(test(value) for test in _nasty_type_tests):
|
if any(test(value) for test in _nasty_type_tests):
|
||||||
|
|
|
@ -1,47 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2011 OpenStack Foundation.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""Local storage of variables using weak references"""
|
|
||||||
|
|
||||||
import threading
|
|
||||||
import weakref
|
|
||||||
|
|
||||||
|
|
||||||
class WeakLocal(threading.local):
|
|
||||||
def __getattribute__(self, attr):
|
|
||||||
rval = super(WeakLocal, self).__getattribute__(attr)
|
|
||||||
if rval:
|
|
||||||
# NOTE(mikal): this bit is confusing. What is stored is a weak
|
|
||||||
# reference, not the value itself. We therefore need to lookup
|
|
||||||
# the weak reference and return the inner value here.
|
|
||||||
rval = rval()
|
|
||||||
return rval
|
|
||||||
|
|
||||||
def __setattr__(self, attr, value):
|
|
||||||
value = weakref.ref(value)
|
|
||||||
return super(WeakLocal, self).__setattr__(attr, value)
|
|
||||||
|
|
||||||
|
|
||||||
# NOTE(mikal): the name "store" should be deprecated in the future
|
|
||||||
store = WeakLocal()
|
|
||||||
|
|
||||||
# A "weak" store uses weak references and allows an object to fall out of scope
|
|
||||||
# when it falls out of scope in the code that uses the thread local storage. A
|
|
||||||
# "strong" store will hold a reference to the object so that it never falls out
|
|
||||||
# of scope.
|
|
||||||
weak_store = WeakLocal()
|
|
||||||
strong_store = threading.local()
|
|
|
@ -1,276 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2011 OpenStack Foundation.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
|
|
||||||
import contextlib
|
|
||||||
import errno
|
|
||||||
import functools
|
|
||||||
import os
|
|
||||||
import time
|
|
||||||
import weakref
|
|
||||||
|
|
||||||
from eventlet import semaphore
|
|
||||||
from oslo.config import cfg
|
|
||||||
|
|
||||||
from taskflow.openstack.common import fileutils
|
|
||||||
from taskflow.openstack.common.gettextutils import _ # noqa
|
|
||||||
from taskflow.openstack.common import local
|
|
||||||
from taskflow.openstack.common import log as logging
|
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
util_opts = [
|
|
||||||
cfg.BoolOpt('disable_process_locking', default=False,
|
|
||||||
help='Whether to disable inter-process locks'),
|
|
||||||
cfg.StrOpt('lock_path',
|
|
||||||
help=('Directory to use for lock files.'))
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
CONF.register_opts(util_opts)
|
|
||||||
|
|
||||||
|
|
||||||
def set_defaults(lock_path):
|
|
||||||
cfg.set_defaults(util_opts, lock_path=lock_path)
|
|
||||||
|
|
||||||
|
|
||||||
class _InterProcessLock(object):
|
|
||||||
"""Lock implementation which allows multiple locks, working around
|
|
||||||
issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
|
|
||||||
not require any cleanup. Since the lock is always held on a file
|
|
||||||
descriptor rather than outside of the process, the lock gets dropped
|
|
||||||
automatically if the process crashes, even if __exit__ is not executed.
|
|
||||||
|
|
||||||
There are no guarantees regarding usage by multiple green threads in a
|
|
||||||
single process here. This lock works only between processes. Exclusive
|
|
||||||
access between local threads should be achieved using the semaphores
|
|
||||||
in the @synchronized decorator.
|
|
||||||
|
|
||||||
Note these locks are released when the descriptor is closed, so it's not
|
|
||||||
safe to close the file descriptor while another green thread holds the
|
|
||||||
lock. Just opening and closing the lock file can break synchronisation,
|
|
||||||
so lock files must be accessed only using this abstraction.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, name):
|
|
||||||
self.lockfile = None
|
|
||||||
self.fname = name
|
|
||||||
|
|
||||||
def __enter__(self):
|
|
||||||
self.lockfile = open(self.fname, 'w')
|
|
||||||
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
# Using non-blocking locks since green threads are not
|
|
||||||
# patched to deal with blocking locking calls.
|
|
||||||
# Also upon reading the MSDN docs for locking(), it seems
|
|
||||||
# to have a laughable 10 attempts "blocking" mechanism.
|
|
||||||
self.trylock()
|
|
||||||
return self
|
|
||||||
except IOError as e:
|
|
||||||
if e.errno in (errno.EACCES, errno.EAGAIN):
|
|
||||||
# external locks synchronise things like iptables
|
|
||||||
# updates - give it some time to prevent busy spinning
|
|
||||||
time.sleep(0.01)
|
|
||||||
else:
|
|
||||||
raise
|
|
||||||
|
|
||||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
||||||
try:
|
|
||||||
self.unlock()
|
|
||||||
self.lockfile.close()
|
|
||||||
except IOError:
|
|
||||||
LOG.exception(_("Could not release the acquired lock `%s`"),
|
|
||||||
self.fname)
|
|
||||||
|
|
||||||
def trylock(self):
|
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
def unlock(self):
|
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
|
|
||||||
class _WindowsLock(_InterProcessLock):
|
|
||||||
def trylock(self):
|
|
||||||
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1)
|
|
||||||
|
|
||||||
def unlock(self):
|
|
||||||
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1)
|
|
||||||
|
|
||||||
|
|
||||||
class _PosixLock(_InterProcessLock):
|
|
||||||
def trylock(self):
|
|
||||||
fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
|
||||||
|
|
||||||
def unlock(self):
|
|
||||||
fcntl.lockf(self.lockfile, fcntl.LOCK_UN)
|
|
||||||
|
|
||||||
|
|
||||||
if os.name == 'nt':
|
|
||||||
import msvcrt
|
|
||||||
InterProcessLock = _WindowsLock
|
|
||||||
else:
|
|
||||||
import fcntl
|
|
||||||
InterProcessLock = _PosixLock
|
|
||||||
|
|
||||||
_semaphores = weakref.WeakValueDictionary()
|
|
||||||
|
|
||||||
|
|
||||||
@contextlib.contextmanager
|
|
||||||
def lock(name, lock_file_prefix=None, external=False, lock_path=None):
|
|
||||||
"""Context based lock
|
|
||||||
|
|
||||||
This function yields a `semaphore.Semaphore` instance unless external is
|
|
||||||
True, in which case, it'll yield an InterProcessLock instance.
|
|
||||||
|
|
||||||
:param lock_file_prefix: The lock_file_prefix argument is used to provide
|
|
||||||
lock files on disk with a meaningful prefix.
|
|
||||||
|
|
||||||
:param external: The external keyword argument denotes whether this lock
|
|
||||||
should work across multiple processes. This means that if two different
|
|
||||||
workers both run a a method decorated with @synchronized('mylock',
|
|
||||||
external=True), only one of them will execute at a time.
|
|
||||||
|
|
||||||
:param lock_path: The lock_path keyword argument is used to specify a
|
|
||||||
special location for external lock files to live. If nothing is set, then
|
|
||||||
CONF.lock_path is used as a default.
|
|
||||||
"""
|
|
||||||
# NOTE(soren): If we ever go natively threaded, this will be racy.
|
|
||||||
# See http://stackoverflow.com/questions/5390569/dyn
|
|
||||||
# amically-allocating-and-destroying-mutexes
|
|
||||||
sem = _semaphores.get(name, semaphore.Semaphore())
|
|
||||||
if name not in _semaphores:
|
|
||||||
# this check is not racy - we're already holding ref locally
|
|
||||||
# so GC won't remove the item and there was no IO switch
|
|
||||||
# (only valid in greenthreads)
|
|
||||||
_semaphores[name] = sem
|
|
||||||
|
|
||||||
with sem:
|
|
||||||
LOG.debug(_('Got semaphore "%(lock)s"'), {'lock': name})
|
|
||||||
|
|
||||||
# NOTE(mikal): I know this looks odd
|
|
||||||
if not hasattr(local.strong_store, 'locks_held'):
|
|
||||||
local.strong_store.locks_held = []
|
|
||||||
local.strong_store.locks_held.append(name)
|
|
||||||
|
|
||||||
try:
|
|
||||||
if external and not CONF.disable_process_locking:
|
|
||||||
LOG.debug(_('Attempting to grab file lock "%(lock)s"'),
|
|
||||||
{'lock': name})
|
|
||||||
|
|
||||||
# We need a copy of lock_path because it is non-local
|
|
||||||
local_lock_path = lock_path or CONF.lock_path
|
|
||||||
if not local_lock_path:
|
|
||||||
raise cfg.RequiredOptError('lock_path')
|
|
||||||
|
|
||||||
if not os.path.exists(local_lock_path):
|
|
||||||
fileutils.ensure_tree(local_lock_path)
|
|
||||||
LOG.info(_('Created lock path: %s'), local_lock_path)
|
|
||||||
|
|
||||||
def add_prefix(name, prefix):
|
|
||||||
if not prefix:
|
|
||||||
return name
|
|
||||||
sep = '' if prefix.endswith('-') else '-'
|
|
||||||
return '%s%s%s' % (prefix, sep, name)
|
|
||||||
|
|
||||||
# NOTE(mikal): the lock name cannot contain directory
|
|
||||||
# separators
|
|
||||||
lock_file_name = add_prefix(name.replace(os.sep, '_'),
|
|
||||||
lock_file_prefix)
|
|
||||||
|
|
||||||
lock_file_path = os.path.join(local_lock_path, lock_file_name)
|
|
||||||
|
|
||||||
try:
|
|
||||||
lock = InterProcessLock(lock_file_path)
|
|
||||||
with lock as lock:
|
|
||||||
LOG.debug(_('Got file lock "%(lock)s" at %(path)s'),
|
|
||||||
{'lock': name, 'path': lock_file_path})
|
|
||||||
yield lock
|
|
||||||
finally:
|
|
||||||
LOG.debug(_('Released file lock "%(lock)s" at %(path)s'),
|
|
||||||
{'lock': name, 'path': lock_file_path})
|
|
||||||
else:
|
|
||||||
yield sem
|
|
||||||
|
|
||||||
finally:
|
|
||||||
local.strong_store.locks_held.remove(name)
|
|
||||||
|
|
||||||
|
|
||||||
def synchronized(name, lock_file_prefix=None, external=False, lock_path=None):
|
|
||||||
"""Synchronization decorator.
|
|
||||||
|
|
||||||
Decorating a method like so::
|
|
||||||
|
|
||||||
@synchronized('mylock')
|
|
||||||
def foo(self, *args):
|
|
||||||
...
|
|
||||||
|
|
||||||
ensures that only one thread will execute the foo method at a time.
|
|
||||||
|
|
||||||
Different methods can share the same lock::
|
|
||||||
|
|
||||||
@synchronized('mylock')
|
|
||||||
def foo(self, *args):
|
|
||||||
...
|
|
||||||
|
|
||||||
@synchronized('mylock')
|
|
||||||
def bar(self, *args):
|
|
||||||
...
|
|
||||||
|
|
||||||
This way only one of either foo or bar can be executing at a time.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def wrap(f):
|
|
||||||
@functools.wraps(f)
|
|
||||||
def inner(*args, **kwargs):
|
|
||||||
with lock(name, lock_file_prefix, external, lock_path):
|
|
||||||
LOG.debug(_('Got semaphore / lock "%(function)s"'),
|
|
||||||
{'function': f.__name__})
|
|
||||||
return f(*args, **kwargs)
|
|
||||||
|
|
||||||
LOG.debug(_('Semaphore / lock released "%(function)s"'),
|
|
||||||
{'function': f.__name__})
|
|
||||||
return inner
|
|
||||||
return wrap
|
|
||||||
|
|
||||||
|
|
||||||
def synchronized_with_prefix(lock_file_prefix):
|
|
||||||
"""Partial object generator for the synchronization decorator.
|
|
||||||
|
|
||||||
Redefine @synchronized in each project like so::
|
|
||||||
|
|
||||||
(in nova/utils.py)
|
|
||||||
from nova.openstack.common import lockutils
|
|
||||||
|
|
||||||
synchronized = lockutils.synchronized_with_prefix('nova-')
|
|
||||||
|
|
||||||
|
|
||||||
(in nova/foo.py)
|
|
||||||
from nova import utils
|
|
||||||
|
|
||||||
@utils.synchronized('mylock')
|
|
||||||
def bar(self, *args):
|
|
||||||
...
|
|
||||||
|
|
||||||
The lock_file_prefix argument is used to provide lock files on disk with a
|
|
||||||
meaningful prefix.
|
|
||||||
"""
|
|
||||||
|
|
||||||
return functools.partial(synchronized, lock_file_prefix=lock_file_prefix)
|
|
|
@ -1,559 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2011 OpenStack Foundation.
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""Openstack logging handler.
|
|
||||||
|
|
||||||
This module adds to logging functionality by adding the option to specify
|
|
||||||
a context object when calling the various log methods. If the context object
|
|
||||||
is not specified, default formatting is used. Additionally, an instance uuid
|
|
||||||
may be passed as part of the log message, which is intended to make it easier
|
|
||||||
for admins to find messages related to a specific instance.
|
|
||||||
|
|
||||||
It also allows setting of formatting information through conf.
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
import inspect
|
|
||||||
import itertools
|
|
||||||
import logging
|
|
||||||
import logging.config
|
|
||||||
import logging.handlers
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
import traceback
|
|
||||||
|
|
||||||
from oslo.config import cfg
|
|
||||||
from six import moves
|
|
||||||
|
|
||||||
from taskflow.openstack.common.gettextutils import _ # noqa
|
|
||||||
from taskflow.openstack.common import importutils
|
|
||||||
from taskflow.openstack.common import jsonutils
|
|
||||||
from taskflow.openstack.common import local
|
|
||||||
|
|
||||||
|
|
||||||
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
|
|
||||||
|
|
||||||
common_cli_opts = [
|
|
||||||
cfg.BoolOpt('debug',
|
|
||||||
short='d',
|
|
||||||
default=False,
|
|
||||||
help='Print debugging output (set logging level to '
|
|
||||||
'DEBUG instead of default WARNING level).'),
|
|
||||||
cfg.BoolOpt('verbose',
|
|
||||||
short='v',
|
|
||||||
default=False,
|
|
||||||
help='Print more verbose output (set logging level to '
|
|
||||||
'INFO instead of default WARNING level).'),
|
|
||||||
]
|
|
||||||
|
|
||||||
logging_cli_opts = [
|
|
||||||
cfg.StrOpt('log-config',
|
|
||||||
metavar='PATH',
|
|
||||||
help='If this option is specified, the logging configuration '
|
|
||||||
'file specified is used and overrides any other logging '
|
|
||||||
'options specified. Please see the Python logging module '
|
|
||||||
'documentation for details on logging configuration '
|
|
||||||
'files.'),
|
|
||||||
cfg.StrOpt('log-format',
|
|
||||||
default=None,
|
|
||||||
metavar='FORMAT',
|
|
||||||
help='DEPRECATED. '
|
|
||||||
'A logging.Formatter log message format string which may '
|
|
||||||
'use any of the available logging.LogRecord attributes. '
|
|
||||||
'This option is deprecated. Please use '
|
|
||||||
'logging_context_format_string and '
|
|
||||||
'logging_default_format_string instead.'),
|
|
||||||
cfg.StrOpt('log-date-format',
|
|
||||||
default=_DEFAULT_LOG_DATE_FORMAT,
|
|
||||||
metavar='DATE_FORMAT',
|
|
||||||
help='Format string for %%(asctime)s in log records. '
|
|
||||||
'Default: %(default)s'),
|
|
||||||
cfg.StrOpt('log-file',
|
|
||||||
metavar='PATH',
|
|
||||||
deprecated_name='logfile',
|
|
||||||
help='(Optional) Name of log file to output to. '
|
|
||||||
'If no default is set, logging will go to stdout.'),
|
|
||||||
cfg.StrOpt('log-dir',
|
|
||||||
deprecated_name='logdir',
|
|
||||||
help='(Optional) The base directory used for relative '
|
|
||||||
'--log-file paths'),
|
|
||||||
cfg.BoolOpt('use-syslog',
|
|
||||||
default=False,
|
|
||||||
help='Use syslog for logging.'),
|
|
||||||
cfg.StrOpt('syslog-log-facility',
|
|
||||||
default='LOG_USER',
|
|
||||||
help='syslog facility to receive log lines')
|
|
||||||
]
|
|
||||||
|
|
||||||
generic_log_opts = [
|
|
||||||
cfg.BoolOpt('use_stderr',
|
|
||||||
default=True,
|
|
||||||
help='Log output to standard error')
|
|
||||||
]
|
|
||||||
|
|
||||||
log_opts = [
|
|
||||||
cfg.StrOpt('logging_context_format_string',
|
|
||||||
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
|
|
||||||
'%(name)s [%(request_id)s %(user)s %(tenant)s] '
|
|
||||||
'%(instance)s%(message)s',
|
|
||||||
help='format string to use for log messages with context'),
|
|
||||||
cfg.StrOpt('logging_default_format_string',
|
|
||||||
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
|
|
||||||
'%(name)s [-] %(instance)s%(message)s',
|
|
||||||
help='format string to use for log messages without context'),
|
|
||||||
cfg.StrOpt('logging_debug_format_suffix',
|
|
||||||
default='%(funcName)s %(pathname)s:%(lineno)d',
|
|
||||||
help='data to append to log format when level is DEBUG'),
|
|
||||||
cfg.StrOpt('logging_exception_prefix',
|
|
||||||
default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
|
|
||||||
'%(instance)s',
|
|
||||||
help='prefix each line of exception output with this format'),
|
|
||||||
cfg.ListOpt('default_log_levels',
|
|
||||||
default=[
|
|
||||||
'amqplib=WARN',
|
|
||||||
'sqlalchemy=WARN',
|
|
||||||
'boto=WARN',
|
|
||||||
'suds=INFO',
|
|
||||||
'keystone=INFO',
|
|
||||||
'eventlet.wsgi.server=WARN'
|
|
||||||
],
|
|
||||||
help='list of logger=LEVEL pairs'),
|
|
||||||
cfg.BoolOpt('publish_errors',
|
|
||||||
default=False,
|
|
||||||
help='publish error events'),
|
|
||||||
cfg.BoolOpt('fatal_deprecations',
|
|
||||||
default=False,
|
|
||||||
help='make deprecations fatal'),
|
|
||||||
|
|
||||||
# NOTE(mikal): there are two options here because sometimes we are handed
|
|
||||||
# a full instance (and could include more information), and other times we
|
|
||||||
# are just handed a UUID for the instance.
|
|
||||||
cfg.StrOpt('instance_format',
|
|
||||||
default='[instance: %(uuid)s] ',
|
|
||||||
help='If an instance is passed with the log message, format '
|
|
||||||
'it like this'),
|
|
||||||
cfg.StrOpt('instance_uuid_format',
|
|
||||||
default='[instance: %(uuid)s] ',
|
|
||||||
help='If an instance UUID is passed with the log message, '
|
|
||||||
'format it like this'),
|
|
||||||
]
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
CONF.register_cli_opts(common_cli_opts)
|
|
||||||
CONF.register_cli_opts(logging_cli_opts)
|
|
||||||
CONF.register_opts(generic_log_opts)
|
|
||||||
CONF.register_opts(log_opts)
|
|
||||||
|
|
||||||
# our new audit level
|
|
||||||
# NOTE(jkoelker) Since we synthesized an audit level, make the logging
|
|
||||||
# module aware of it so it acts like other levels.
|
|
||||||
logging.AUDIT = logging.INFO + 1
|
|
||||||
logging.addLevelName(logging.AUDIT, 'AUDIT')
|
|
||||||
|
|
||||||
|
|
||||||
try:
|
|
||||||
NullHandler = logging.NullHandler
|
|
||||||
except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7
|
|
||||||
class NullHandler(logging.Handler):
|
|
||||||
def handle(self, record):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def emit(self, record):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def createLock(self):
|
|
||||||
self.lock = None
|
|
||||||
|
|
||||||
|
|
||||||
def _dictify_context(context):
|
|
||||||
if context is None:
|
|
||||||
return None
|
|
||||||
if not isinstance(context, dict) and getattr(context, 'to_dict', None):
|
|
||||||
context = context.to_dict()
|
|
||||||
return context
|
|
||||||
|
|
||||||
|
|
||||||
def _get_binary_name():
|
|
||||||
return os.path.basename(inspect.stack()[-1][1])
|
|
||||||
|
|
||||||
|
|
||||||
def _get_log_file_path(binary=None):
|
|
||||||
logfile = CONF.log_file
|
|
||||||
logdir = CONF.log_dir
|
|
||||||
|
|
||||||
if logfile and not logdir:
|
|
||||||
return logfile
|
|
||||||
|
|
||||||
if logfile and logdir:
|
|
||||||
return os.path.join(logdir, logfile)
|
|
||||||
|
|
||||||
if logdir:
|
|
||||||
binary = binary or _get_binary_name()
|
|
||||||
return '%s.log' % (os.path.join(logdir, binary),)
|
|
||||||
|
|
||||||
|
|
||||||
class BaseLoggerAdapter(logging.LoggerAdapter):
|
|
||||||
|
|
||||||
def audit(self, msg, *args, **kwargs):
|
|
||||||
self.log(logging.AUDIT, msg, *args, **kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
class LazyAdapter(BaseLoggerAdapter):
|
|
||||||
def __init__(self, name='unknown', version='unknown'):
|
|
||||||
self._logger = None
|
|
||||||
self.extra = {}
|
|
||||||
self.name = name
|
|
||||||
self.version = version
|
|
||||||
|
|
||||||
@property
|
|
||||||
def logger(self):
|
|
||||||
if not self._logger:
|
|
||||||
self._logger = getLogger(self.name, self.version)
|
|
||||||
return self._logger
|
|
||||||
|
|
||||||
|
|
||||||
class ContextAdapter(BaseLoggerAdapter):
|
|
||||||
warn = logging.LoggerAdapter.warning
|
|
||||||
|
|
||||||
def __init__(self, logger, project_name, version_string):
|
|
||||||
self.logger = logger
|
|
||||||
self.project = project_name
|
|
||||||
self.version = version_string
|
|
||||||
|
|
||||||
@property
|
|
||||||
def handlers(self):
|
|
||||||
return self.logger.handlers
|
|
||||||
|
|
||||||
def deprecated(self, msg, *args, **kwargs):
|
|
||||||
stdmsg = _("Deprecated: %s") % msg
|
|
||||||
if CONF.fatal_deprecations:
|
|
||||||
self.critical(stdmsg, *args, **kwargs)
|
|
||||||
raise DeprecatedConfig(msg=stdmsg)
|
|
||||||
else:
|
|
||||||
self.warn(stdmsg, *args, **kwargs)
|
|
||||||
|
|
||||||
def process(self, msg, kwargs):
|
|
||||||
if 'extra' not in kwargs:
|
|
||||||
kwargs['extra'] = {}
|
|
||||||
extra = kwargs['extra']
|
|
||||||
|
|
||||||
context = kwargs.pop('context', None)
|
|
||||||
if not context:
|
|
||||||
context = getattr(local.store, 'context', None)
|
|
||||||
if context:
|
|
||||||
extra.update(_dictify_context(context))
|
|
||||||
|
|
||||||
instance = kwargs.pop('instance', None)
|
|
||||||
instance_extra = ''
|
|
||||||
if instance:
|
|
||||||
instance_extra = CONF.instance_format % instance
|
|
||||||
else:
|
|
||||||
instance_uuid = kwargs.pop('instance_uuid', None)
|
|
||||||
if instance_uuid:
|
|
||||||
instance_extra = (CONF.instance_uuid_format
|
|
||||||
% {'uuid': instance_uuid})
|
|
||||||
extra.update({'instance': instance_extra})
|
|
||||||
|
|
||||||
extra.update({"project": self.project})
|
|
||||||
extra.update({"version": self.version})
|
|
||||||
extra['extra'] = extra.copy()
|
|
||||||
return msg, kwargs
|
|
||||||
|
|
||||||
|
|
||||||
class JSONFormatter(logging.Formatter):
|
|
||||||
def __init__(self, fmt=None, datefmt=None):
|
|
||||||
# NOTE(jkoelker) we ignore the fmt argument, but its still there
|
|
||||||
# since logging.config.fileConfig passes it.
|
|
||||||
self.datefmt = datefmt
|
|
||||||
|
|
||||||
def formatException(self, ei, strip_newlines=True):
|
|
||||||
lines = traceback.format_exception(*ei)
|
|
||||||
if strip_newlines:
|
|
||||||
lines = [itertools.ifilter(
|
|
||||||
lambda x: x,
|
|
||||||
line.rstrip().splitlines()) for line in lines]
|
|
||||||
lines = list(itertools.chain(*lines))
|
|
||||||
return lines
|
|
||||||
|
|
||||||
def format(self, record):
|
|
||||||
message = {'message': record.getMessage(),
|
|
||||||
'asctime': self.formatTime(record, self.datefmt),
|
|
||||||
'name': record.name,
|
|
||||||
'msg': record.msg,
|
|
||||||
'args': record.args,
|
|
||||||
'levelname': record.levelname,
|
|
||||||
'levelno': record.levelno,
|
|
||||||
'pathname': record.pathname,
|
|
||||||
'filename': record.filename,
|
|
||||||
'module': record.module,
|
|
||||||
'lineno': record.lineno,
|
|
||||||
'funcname': record.funcName,
|
|
||||||
'created': record.created,
|
|
||||||
'msecs': record.msecs,
|
|
||||||
'relative_created': record.relativeCreated,
|
|
||||||
'thread': record.thread,
|
|
||||||
'thread_name': record.threadName,
|
|
||||||
'process_name': record.processName,
|
|
||||||
'process': record.process,
|
|
||||||
'traceback': None}
|
|
||||||
|
|
||||||
if hasattr(record, 'extra'):
|
|
||||||
message['extra'] = record.extra
|
|
||||||
|
|
||||||
if record.exc_info:
|
|
||||||
message['traceback'] = self.formatException(record.exc_info)
|
|
||||||
|
|
||||||
return jsonutils.dumps(message)
|
|
||||||
|
|
||||||
|
|
||||||
def _create_logging_excepthook(product_name):
|
|
||||||
def logging_excepthook(type, value, tb):
|
|
||||||
extra = {}
|
|
||||||
if CONF.verbose:
|
|
||||||
extra['exc_info'] = (type, value, tb)
|
|
||||||
getLogger(product_name).critical(str(value), **extra)
|
|
||||||
return logging_excepthook
|
|
||||||
|
|
||||||
|
|
||||||
class LogConfigError(Exception):
|
|
||||||
|
|
||||||
message = _('Error loading logging config %(log_config)s: %(err_msg)s')
|
|
||||||
|
|
||||||
def __init__(self, log_config, err_msg):
|
|
||||||
self.log_config = log_config
|
|
||||||
self.err_msg = err_msg
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
return self.message % dict(log_config=self.log_config,
|
|
||||||
err_msg=self.err_msg)
|
|
||||||
|
|
||||||
|
|
||||||
def _load_log_config(log_config):
|
|
||||||
try:
|
|
||||||
logging.config.fileConfig(log_config)
|
|
||||||
except moves.configparser.Error as exc:
|
|
||||||
raise LogConfigError(log_config, str(exc))
|
|
||||||
|
|
||||||
|
|
||||||
def setup(product_name):
|
|
||||||
"""Setup logging."""
|
|
||||||
if CONF.log_config:
|
|
||||||
_load_log_config(CONF.log_config)
|
|
||||||
else:
|
|
||||||
_setup_logging_from_conf()
|
|
||||||
sys.excepthook = _create_logging_excepthook(product_name)
|
|
||||||
|
|
||||||
|
|
||||||
def set_defaults(logging_context_format_string):
|
|
||||||
cfg.set_defaults(log_opts,
|
|
||||||
logging_context_format_string=
|
|
||||||
logging_context_format_string)
|
|
||||||
|
|
||||||
|
|
||||||
def _find_facility_from_conf():
|
|
||||||
facility_names = logging.handlers.SysLogHandler.facility_names
|
|
||||||
facility = getattr(logging.handlers.SysLogHandler,
|
|
||||||
CONF.syslog_log_facility,
|
|
||||||
None)
|
|
||||||
|
|
||||||
if facility is None and CONF.syslog_log_facility in facility_names:
|
|
||||||
facility = facility_names.get(CONF.syslog_log_facility)
|
|
||||||
|
|
||||||
if facility is None:
|
|
||||||
valid_facilities = facility_names.keys()
|
|
||||||
consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON',
|
|
||||||
'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS',
|
|
||||||
'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP',
|
|
||||||
'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3',
|
|
||||||
'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7']
|
|
||||||
valid_facilities.extend(consts)
|
|
||||||
raise TypeError(_('syslog facility must be one of: %s') %
|
|
||||||
', '.join("'%s'" % fac
|
|
||||||
for fac in valid_facilities))
|
|
||||||
|
|
||||||
return facility
|
|
||||||
|
|
||||||
|
|
||||||
def _setup_logging_from_conf():
|
|
||||||
log_root = getLogger(None).logger
|
|
||||||
for handler in log_root.handlers:
|
|
||||||
log_root.removeHandler(handler)
|
|
||||||
|
|
||||||
if CONF.use_syslog:
|
|
||||||
facility = _find_facility_from_conf()
|
|
||||||
syslog = logging.handlers.SysLogHandler(address='/dev/log',
|
|
||||||
facility=facility)
|
|
||||||
log_root.addHandler(syslog)
|
|
||||||
|
|
||||||
logpath = _get_log_file_path()
|
|
||||||
if logpath:
|
|
||||||
filelog = logging.handlers.WatchedFileHandler(logpath)
|
|
||||||
log_root.addHandler(filelog)
|
|
||||||
|
|
||||||
if CONF.use_stderr:
|
|
||||||
streamlog = ColorHandler()
|
|
||||||
log_root.addHandler(streamlog)
|
|
||||||
|
|
||||||
elif not CONF.log_file:
|
|
||||||
# pass sys.stdout as a positional argument
|
|
||||||
# python2.6 calls the argument strm, in 2.7 it's stream
|
|
||||||
streamlog = logging.StreamHandler(sys.stdout)
|
|
||||||
log_root.addHandler(streamlog)
|
|
||||||
|
|
||||||
if CONF.publish_errors:
|
|
||||||
handler = importutils.import_object(
|
|
||||||
"taskflow.openstack.common.log_handler.PublishErrorsHandler",
|
|
||||||
logging.ERROR)
|
|
||||||
log_root.addHandler(handler)
|
|
||||||
|
|
||||||
datefmt = CONF.log_date_format
|
|
||||||
for handler in log_root.handlers:
|
|
||||||
# NOTE(alaski): CONF.log_format overrides everything currently. This
|
|
||||||
# should be deprecated in favor of context aware formatting.
|
|
||||||
if CONF.log_format:
|
|
||||||
handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
|
|
||||||
datefmt=datefmt))
|
|
||||||
log_root.info('Deprecated: log_format is now deprecated and will '
|
|
||||||
'be removed in the next release')
|
|
||||||
else:
|
|
||||||
handler.setFormatter(ContextFormatter(datefmt=datefmt))
|
|
||||||
|
|
||||||
if CONF.debug:
|
|
||||||
log_root.setLevel(logging.DEBUG)
|
|
||||||
elif CONF.verbose:
|
|
||||||
log_root.setLevel(logging.INFO)
|
|
||||||
else:
|
|
||||||
log_root.setLevel(logging.WARNING)
|
|
||||||
|
|
||||||
for pair in CONF.default_log_levels:
|
|
||||||
mod, _sep, level_name = pair.partition('=')
|
|
||||||
level = logging.getLevelName(level_name)
|
|
||||||
logger = logging.getLogger(mod)
|
|
||||||
logger.setLevel(level)
|
|
||||||
|
|
||||||
_loggers = {}
|
|
||||||
|
|
||||||
|
|
||||||
def getLogger(name='unknown', version='unknown'):
|
|
||||||
if name not in _loggers:
|
|
||||||
_loggers[name] = ContextAdapter(logging.getLogger(name),
|
|
||||||
name,
|
|
||||||
version)
|
|
||||||
return _loggers[name]
|
|
||||||
|
|
||||||
|
|
||||||
def getLazyLogger(name='unknown', version='unknown'):
|
|
||||||
"""Returns lazy logger.
|
|
||||||
|
|
||||||
Creates a pass-through logger that does not create the real logger
|
|
||||||
until it is really needed and delegates all calls to the real logger
|
|
||||||
once it is created.
|
|
||||||
"""
|
|
||||||
return LazyAdapter(name, version)
|
|
||||||
|
|
||||||
|
|
||||||
class WritableLogger(object):
|
|
||||||
"""A thin wrapper that responds to `write` and logs."""
|
|
||||||
|
|
||||||
def __init__(self, logger, level=logging.INFO):
|
|
||||||
self.logger = logger
|
|
||||||
self.level = level
|
|
||||||
|
|
||||||
def write(self, msg):
|
|
||||||
self.logger.log(self.level, msg)
|
|
||||||
|
|
||||||
|
|
||||||
class ContextFormatter(logging.Formatter):
|
|
||||||
"""A context.RequestContext aware formatter configured through flags.
|
|
||||||
|
|
||||||
The flags used to set format strings are: logging_context_format_string
|
|
||||||
and logging_default_format_string. You can also specify
|
|
||||||
logging_debug_format_suffix to append extra formatting if the log level is
|
|
||||||
debug.
|
|
||||||
|
|
||||||
For information about what variables are available for the formatter see:
|
|
||||||
http://docs.python.org/library/logging.html#formatter
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
def format(self, record):
|
|
||||||
"""Uses contextstring if request_id is set, otherwise default."""
|
|
||||||
# NOTE(sdague): default the fancier formating params
|
|
||||||
# to an empty string so we don't throw an exception if
|
|
||||||
# they get used
|
|
||||||
for key in ('instance', 'color'):
|
|
||||||
if key not in record.__dict__:
|
|
||||||
record.__dict__[key] = ''
|
|
||||||
|
|
||||||
if record.__dict__.get('request_id', None):
|
|
||||||
self._fmt = CONF.logging_context_format_string
|
|
||||||
else:
|
|
||||||
self._fmt = CONF.logging_default_format_string
|
|
||||||
|
|
||||||
if (record.levelno == logging.DEBUG and
|
|
||||||
CONF.logging_debug_format_suffix):
|
|
||||||
self._fmt += " " + CONF.logging_debug_format_suffix
|
|
||||||
|
|
||||||
# Cache this on the record, Logger will respect our formated copy
|
|
||||||
if record.exc_info:
|
|
||||||
record.exc_text = self.formatException(record.exc_info, record)
|
|
||||||
return logging.Formatter.format(self, record)
|
|
||||||
|
|
||||||
def formatException(self, exc_info, record=None):
|
|
||||||
"""Format exception output with CONF.logging_exception_prefix."""
|
|
||||||
if not record:
|
|
||||||
return logging.Formatter.formatException(self, exc_info)
|
|
||||||
|
|
||||||
stringbuffer = moves.StringIO()
|
|
||||||
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
|
|
||||||
None, stringbuffer)
|
|
||||||
lines = stringbuffer.getvalue().split('\n')
|
|
||||||
stringbuffer.close()
|
|
||||||
|
|
||||||
if CONF.logging_exception_prefix.find('%(asctime)') != -1:
|
|
||||||
record.asctime = self.formatTime(record, self.datefmt)
|
|
||||||
|
|
||||||
formatted_lines = []
|
|
||||||
for line in lines:
|
|
||||||
pl = CONF.logging_exception_prefix % record.__dict__
|
|
||||||
fl = '%s%s' % (pl, line)
|
|
||||||
formatted_lines.append(fl)
|
|
||||||
return '\n'.join(formatted_lines)
|
|
||||||
|
|
||||||
|
|
||||||
class ColorHandler(logging.StreamHandler):
|
|
||||||
LEVEL_COLORS = {
|
|
||||||
logging.DEBUG: '\033[00;32m', # GREEN
|
|
||||||
logging.INFO: '\033[00;36m', # CYAN
|
|
||||||
logging.AUDIT: '\033[01;36m', # BOLD CYAN
|
|
||||||
logging.WARN: '\033[01;33m', # BOLD YELLOW
|
|
||||||
logging.ERROR: '\033[01;31m', # BOLD RED
|
|
||||||
logging.CRITICAL: '\033[01;31m', # BOLD RED
|
|
||||||
}
|
|
||||||
|
|
||||||
def format(self, record):
|
|
||||||
record.color = self.LEVEL_COLORS[record.levelno]
|
|
||||||
return logging.StreamHandler.format(self, record)
|
|
||||||
|
|
||||||
|
|
||||||
class DeprecatedConfig(Exception):
|
|
||||||
message = _("Fatal call to deprecated config: %(msg)s")
|
|
||||||
|
|
||||||
def __init__(self, msg):
|
|
||||||
super(Exception, self).__init__(self.message % dict(msg=msg))
|
|
|
@ -21,6 +21,7 @@ Time related utilities and helper functions.
|
||||||
|
|
||||||
import calendar
|
import calendar
|
||||||
import datetime
|
import datetime
|
||||||
|
import time
|
||||||
|
|
||||||
import iso8601
|
import iso8601
|
||||||
import six
|
import six
|
||||||
|
@ -90,6 +91,11 @@ def is_newer_than(after, seconds):
|
||||||
|
|
||||||
def utcnow_ts():
|
def utcnow_ts():
|
||||||
"""Timestamp version of our utcnow function."""
|
"""Timestamp version of our utcnow function."""
|
||||||
|
if utcnow.override_time is None:
|
||||||
|
# NOTE(kgriffs): This is several times faster
|
||||||
|
# than going through calendar.timegm(...)
|
||||||
|
return int(time.time())
|
||||||
|
|
||||||
return calendar.timegm(utcnow().timetuple())
|
return calendar.timegm(utcnow().timetuple())
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -17,21 +17,26 @@
|
||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
from sqlalchemy import Column, String
|
from sqlalchemy import Column, String, DateTime
|
||||||
from sqlalchemy.ext.declarative import declarative_base
|
from sqlalchemy.ext.declarative import declarative_base
|
||||||
from sqlalchemy import ForeignKey
|
from sqlalchemy import ForeignKey
|
||||||
from sqlalchemy.orm import backref
|
from sqlalchemy.orm import backref
|
||||||
from sqlalchemy.orm import relationship
|
from sqlalchemy.orm import relationship
|
||||||
from sqlalchemy import types as types
|
from sqlalchemy import types as types
|
||||||
|
|
||||||
from taskflow.openstack.common.db.sqlalchemy import models as c_models
|
|
||||||
|
|
||||||
from taskflow.openstack.common import jsonutils
|
from taskflow.openstack.common import jsonutils
|
||||||
|
from taskflow.openstack.common import timeutils
|
||||||
from taskflow.openstack.common import uuidutils
|
from taskflow.openstack.common import uuidutils
|
||||||
|
|
||||||
BASE = declarative_base()
|
BASE = declarative_base()
|
||||||
|
|
||||||
|
|
||||||
|
# TODO(harlowja): remove when oslo.db exists
|
||||||
|
class TimestampMixin(object):
|
||||||
|
created_at = Column(DateTime, default=timeutils.utcnow)
|
||||||
|
updated_at = Column(DateTime, onupdate=timeutils.utcnow)
|
||||||
|
|
||||||
|
|
||||||
class Json(types.TypeDecorator, types.MutableType):
|
class Json(types.TypeDecorator, types.MutableType):
|
||||||
impl = types.Text
|
impl = types.Text
|
||||||
|
|
||||||
|
@ -42,7 +47,7 @@ class Json(types.TypeDecorator, types.MutableType):
|
||||||
return jsonutils.loads(value)
|
return jsonutils.loads(value)
|
||||||
|
|
||||||
|
|
||||||
class ModelBase(c_models.ModelBase, c_models.TimestampMixin):
|
class ModelBase(TimestampMixin):
|
||||||
"""Base model for all taskflow objects"""
|
"""Base model for all taskflow objects"""
|
||||||
uuid = Column(String, default=uuidutils.generate_uuid,
|
uuid = Column(String, default=uuidutils.generate_uuid,
|
||||||
primary_key=True, nullable=False, unique=True)
|
primary_key=True, nullable=False, unique=True)
|
||||||
|
|
|
@ -202,6 +202,7 @@ class Fedora(Distro):
|
||||||
RHEL: https://bugzilla.redhat.com/958868
|
RHEL: https://bugzilla.redhat.com/958868
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
if os.path.exists('contrib/redhat-eventlet.patch'):
|
||||||
# Install "patch" program if it's not there
|
# Install "patch" program if it's not there
|
||||||
if not self.check_pkg('patch'):
|
if not self.check_pkg('patch'):
|
||||||
self.die("Please install 'patch'.")
|
self.die("Please install 'patch'.")
|
||||||
|
|
|
@ -1,12 +1,6 @@
|
||||||
# Packages needed for using this library.
|
# Packages needed for using this library.
|
||||||
anyjson>=0.2.4
|
anyjson>=0.2.4
|
||||||
eventlet>=0.9.17
|
|
||||||
iso8601
|
iso8601
|
||||||
netaddr>=0.7.6
|
|
||||||
# Need currently the newest version of oslo.config for the DeprecatedOpt
|
|
||||||
# used inside oslo database layer.
|
|
||||||
-f http://tarballs.openstack.org/oslo.config/oslo.config-1.2.0a3.tar.gz#egg=oslo.config-1.2.0a3
|
|
||||||
oslo.config>=1.2.0a3
|
|
||||||
six
|
six
|
||||||
# Only needed if database backend used.
|
# Only needed if database backend used.
|
||||||
sqlalchemy>=0.7,<=0.7.99
|
sqlalchemy>=0.7,<=0.7.99
|
||||||
|
|
Loading…
Reference in New Issue