diff --git a/ceilometer/openstack/common/db/__init__.py b/ceilometer/openstack/common/db/__init__.py new file mode 100644 index 0000000000..1b9b60dec1 --- /dev/null +++ b/ceilometer/openstack/common/db/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Cloudscaling Group, Inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/ceilometer/openstack/common/db/api.py b/ceilometer/openstack/common/db/api.py new file mode 100644 index 0000000000..c3f1e2c6f3 --- /dev/null +++ b/ceilometer/openstack/common/db/api.py @@ -0,0 +1,101 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2013 Rackspace Hosting +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Multiple DB API backend support. + +Supported configuration options: + +`db_backend`: DB backend name or full module path to DB backend module. +`dbapi_use_tpool`: Enable thread pooling of DB API calls. + +A DB backend module should implement a method named 'get_backend' which +takes no arguments. The method can return any object that implements DB +API methods. + +*NOTE*: There are bugs in eventlet when using tpool combined with +threading locks. The python logging module happens to use such locks. To +work around this issue, be sure to specify thread=False with +eventlet.monkey_patch(). + +A bug for eventlet has been filed here: + +https://bitbucket.org/eventlet/eventlet/issue/137/ +""" +import functools + +from oslo.config import cfg + +from ceilometer.openstack.common import importutils +from ceilometer.openstack.common import lockutils + + +db_opts = [ + cfg.StrOpt('db_backend', + default='sqlalchemy', + help='The backend to use for db'), + cfg.BoolOpt('dbapi_use_tpool', + default=False, + help='Enable the experimental use of thread pooling for ' + 'all DB API calls') +] + +CONF = cfg.CONF +CONF.register_opts(db_opts) + + +class DBAPI(object): + def __init__(self, backend_mapping=None): + if backend_mapping is None: + backend_mapping = {} + self.__backend = None + self.__backend_mapping = backend_mapping + + @lockutils.synchronized('dbapi_backend', 'ceilometer-') + def __get_backend(self): + """Get the actual backend. May be a module or an instance of + a class. Doesn't matter to us. We do this synchronized as it's + possible multiple greenthreads started very quickly trying to do + DB calls and eventlet can switch threads before self.__backend gets + assigned. + """ + if self.__backend: + # Another thread assigned it + return self.__backend + backend_name = CONF.db_backend + self.__use_tpool = CONF.dbapi_use_tpool + if self.__use_tpool: + from eventlet import tpool + self.__tpool = tpool + # Import the untranslated name if we don't have a + # mapping. + backend_path = self.__backend_mapping.get(backend_name, + backend_name) + backend_mod = importutils.import_module(backend_path) + self.__backend = backend_mod.get_backend() + return self.__backend + + def __getattr__(self, key): + backend = self.__backend or self.__get_backend() + attr = getattr(backend, key) + if not self.__use_tpool or not hasattr(attr, '__call__'): + return attr + + def tpool_wrapper(*args, **kwargs): + return self.__tpool.execute(attr, *args, **kwargs) + + functools.update_wrapper(tpool_wrapper, attr) + return tpool_wrapper diff --git a/ceilometer/openstack/common/db/exception.py b/ceilometer/openstack/common/db/exception.py new file mode 100644 index 0000000000..50ce8d21c5 --- /dev/null +++ b/ceilometer/openstack/common/db/exception.py @@ -0,0 +1,45 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""DB related custom exceptions.""" + +from ceilometer.openstack.common.gettextutils import _ + + +class DBError(Exception): + """Wraps an implementation specific exception.""" + def __init__(self, inner_exception=None): + self.inner_exception = inner_exception + super(DBError, self).__init__(str(inner_exception)) + + +class DBDuplicateEntry(DBError): + """Wraps an implementation specific exception.""" + def __init__(self, columns=[], inner_exception=None): + self.columns = columns + super(DBDuplicateEntry, self).__init__(inner_exception) + + +class DBDeadlock(DBError): + def __init__(self, inner_exception=None): + super(DBDeadlock, self).__init__(inner_exception) + + +class DBInvalidUnicodeParameter(Exception): + message = _("Invalid Parameter: " + "Unicode is not supported by the current database.") diff --git a/ceilometer/openstack/common/db/sqlalchemy/__init__.py b/ceilometer/openstack/common/db/sqlalchemy/__init__.py new file mode 100644 index 0000000000..1b9b60dec1 --- /dev/null +++ b/ceilometer/openstack/common/db/sqlalchemy/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Cloudscaling Group, Inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/ceilometer/openstack/common/db/sqlalchemy/models.py b/ceilometer/openstack/common/db/sqlalchemy/models.py new file mode 100644 index 0000000000..8e3d333fe4 --- /dev/null +++ b/ceilometer/openstack/common/db/sqlalchemy/models.py @@ -0,0 +1,103 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Piston Cloud Computing, Inc. +# Copyright 2012 Cloudscaling Group, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +SQLAlchemy models. +""" + +from sqlalchemy import Column, Integer +from sqlalchemy import DateTime +from sqlalchemy.orm import object_mapper + +from ceilometer.openstack.common.db.sqlalchemy.session import get_session +from ceilometer.openstack.common import timeutils + + +class ModelBase(object): + """Base class for models.""" + __table_initialized__ = False + created_at = Column(DateTime, default=timeutils.utcnow) + updated_at = Column(DateTime, onupdate=timeutils.utcnow) + metadata = None + + def save(self, session=None): + """Save this object.""" + if not session: + session = get_session() + # NOTE(boris-42): This part of code should be look like: + # sesssion.add(self) + # session.flush() + # But there is a bug in sqlalchemy and eventlet that + # raises NoneType exception if there is no running + # transaction and rollback is called. As long as + # sqlalchemy has this bug we have to create transaction + # explicity. + with session.begin(subtransactions=True): + session.add(self) + session.flush() + + def __setitem__(self, key, value): + setattr(self, key, value) + + def __getitem__(self, key): + return getattr(self, key) + + def get(self, key, default=None): + return getattr(self, key, default) + + def __iter__(self): + columns = dict(object_mapper(self).columns).keys() + # NOTE(russellb): Allow models to specify other keys that can be looked + # up, beyond the actual db columns. An example would be the 'name' + # property for an Instance. + if hasattr(self, '_extra_keys'): + columns.extend(self._extra_keys()) + self._i = iter(columns) + return self + + def next(self): + n = self._i.next() + return n, getattr(self, n) + + def update(self, values): + """Make the model object behave like a dict.""" + for k, v in values.iteritems(): + setattr(self, k, v) + + def iteritems(self): + """Make the model object behave like a dict. + + Includes attributes from joins.""" + local = dict(self) + joined = dict([(k, v) for k, v in self.__dict__.iteritems() + if not k[0] == '_']) + local.update(joined) + return local.iteritems() + + +class SoftDeleteMixin(object): + deleted_at = Column(DateTime) + deleted = Column(Integer, default=0) + + def soft_delete(self, session=None): + """Mark this object as deleted.""" + self.deleted = self.id + self.deleted_at = timeutils.utcnow() + self.save(session=session) diff --git a/ceilometer/openstack/common/db/sqlalchemy/session.py b/ceilometer/openstack/common/db/sqlalchemy/session.py new file mode 100644 index 0000000000..60e784aca7 --- /dev/null +++ b/ceilometer/openstack/common/db/sqlalchemy/session.py @@ -0,0 +1,644 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Session Handling for SQLAlchemy backend. + +Initializing: + +* Call set_defaults with the minimal of the following kwargs: + sql_connection, sqlite_db + + Example: + + session.set_defaults( + sql_connection="sqlite:///var/lib/ceilometer/sqlite.db", + sqlite_db="/var/lib/ceilometer/sqlite.db") + +Recommended ways to use sessions within this framework: + +* Don't use them explicitly; this is like running with AUTOCOMMIT=1. + model_query() will implicitly use a session when called without one + supplied. This is the ideal situation because it will allow queries + to be automatically retried if the database connection is interrupted. + + Note: Automatic retry will be enabled in a future patch. + + It is generally fine to issue several queries in a row like this. Even though + they may be run in separate transactions and/or separate sessions, each one + will see the data from the prior calls. If needed, undo- or rollback-like + functionality should be handled at a logical level. For an example, look at + the code around quotas and reservation_rollback(). + + Examples: + + def get_foo(context, foo): + return model_query(context, models.Foo).\ + filter_by(foo=foo).\ + first() + + def update_foo(context, id, newfoo): + model_query(context, models.Foo).\ + filter_by(id=id).\ + update({'foo': newfoo}) + + def create_foo(context, values): + foo_ref = models.Foo() + foo_ref.update(values) + foo_ref.save() + return foo_ref + + +* Within the scope of a single method, keeping all the reads and writes within + the context managed by a single session. In this way, the session's __exit__ + handler will take care of calling flush() and commit() for you. + If using this approach, you should not explicitly call flush() or commit(). + Any error within the context of the session will cause the session to emit + a ROLLBACK. If the connection is dropped before this is possible, the + database will implicitly rollback the transaction. + + Note: statements in the session scope will not be automatically retried. + + If you create models within the session, they need to be added, but you + do not need to call model.save() + + def create_many_foo(context, foos): + session = get_session() + with session.begin(): + for foo in foos: + foo_ref = models.Foo() + foo_ref.update(foo) + session.add(foo_ref) + + def update_bar(context, foo_id, newbar): + session = get_session() + with session.begin(): + foo_ref = model_query(context, models.Foo, session).\ + filter_by(id=foo_id).\ + first() + model_query(context, models.Bar, session).\ + filter_by(id=foo_ref['bar_id']).\ + update({'bar': newbar}) + + Note: update_bar is a trivially simple example of using "with session.begin". + Whereas create_many_foo is a good example of when a transaction is needed, + it is always best to use as few queries as possible. The two queries in + update_bar can be better expressed using a single query which avoids + the need for an explicit transaction. It can be expressed like so: + + def update_bar(context, foo_id, newbar): + subq = model_query(context, models.Foo.id).\ + filter_by(id=foo_id).\ + limit(1).\ + subquery() + model_query(context, models.Bar).\ + filter_by(id=subq.as_scalar()).\ + update({'bar': newbar}) + + For reference, this emits approximagely the following SQL statement: + + UPDATE bar SET bar = ${newbar} + WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1); + +* Passing an active session between methods. Sessions should only be passed + to private methods. The private method must use a subtransaction; otherwise + SQLAlchemy will throw an error when you call session.begin() on an existing + transaction. Public methods should not accept a session parameter and should + not be involved in sessions within the caller's scope. + + Note that this incurs more overhead in SQLAlchemy than the above means + due to nesting transactions, and it is not possible to implicitly retry + failed database operations when using this approach. + + This also makes code somewhat more difficult to read and debug, because a + single database transaction spans more than one method. Error handling + becomes less clear in this situation. When this is needed for code clarity, + it should be clearly documented. + + def myfunc(foo): + session = get_session() + with session.begin(): + # do some database things + bar = _private_func(foo, session) + return bar + + def _private_func(foo, session=None): + if not session: + session = get_session() + with session.begin(subtransaction=True): + # do some other database things + return bar + + +There are some things which it is best to avoid: + +* Don't keep a transaction open any longer than necessary. + + This means that your "with session.begin()" block should be as short + as possible, while still containing all the related calls for that + transaction. + +* Avoid "with_lockmode('UPDATE')" when possible. + + In MySQL/InnoDB, when a "SELECT ... FOR UPDATE" query does not match + any rows, it will take a gap-lock. This is a form of write-lock on the + "gap" where no rows exist, and prevents any other writes to that space. + This can effectively prevent any INSERT into a table by locking the gap + at the end of the index. Similar problems will occur if the SELECT FOR UPDATE + has an overly broad WHERE clause, or doesn't properly use an index. + + One idea proposed at ODS Fall '12 was to use a normal SELECT to test the + number of rows matching a query, and if only one row is returned, + then issue the SELECT FOR UPDATE. + + The better long-term solution is to use INSERT .. ON DUPLICATE KEY UPDATE. + However, this can not be done until the "deleted" columns are removed and + proper UNIQUE constraints are added to the tables. + + +Enabling soft deletes: + +* To use/enable soft-deletes, the SoftDeleteMixin must be added + to your model class. For example: + + class NovaBase(models.SoftDeleteMixin, models.ModelBase): + pass + + +Efficient use of soft deletes: + +* There are two possible ways to mark a record as deleted: + model.soft_delete() and query.soft_delete(). + + model.soft_delete() method works with single already fetched entry. + query.soft_delete() makes only one db request for all entries that correspond + to query. + +* In almost all cases you should use query.soft_delete(). Some examples: + + def soft_delete_bar(): + count = model_query(BarModel).find(some_condition).soft_delete() + if count == 0: + raise Exception("0 entries were soft deleted") + + def complex_soft_delete_with_synchronization_bar(session=None): + if session is None: + session = get_session() + with session.begin(subtransactions=True): + count = model_query(BarModel).\ + find(some_condition).\ + soft_delete(synchronize_session=True) + # Here synchronize_session is required, because we + # don't know what is going on in outer session. + if count == 0: + raise Exception("0 entries were soft deleted") + +* There is only one situation where model.soft_delete() is appropriate: when + you fetch a single record, work with it, and mark it as deleted in the same + transaction. + + def soft_delete_bar_model(): + session = get_session() + with session.begin(): + bar_ref = model_query(BarModel).find(some_condition).first() + # Work with bar_ref + bar_ref.soft_delete(session=session) + + However, if you need to work with all entries that correspond to query and + then soft delete them you should use query.soft_delete() method: + + def soft_delete_multi_models(): + session = get_session() + with session.begin(): + query = model_query(BarModel, session=session).\ + find(some_condition) + model_refs = query.all() + # Work with model_refs + query.soft_delete(synchronize_session=False) + # synchronize_session=False should be set if there is no outer + # session and these entries are not used after this. + + When working with many rows, it is very important to use query.soft_delete, + which issues a single query. Using model.soft_delete(), as in the following + example, is very inefficient. + + for bar_ref in bar_refs: + bar_ref.soft_delete(session=session) + # This will produce count(bar_refs) db requests. +""" + +import os.path +import re +import time + +from eventlet import greenthread +from oslo.config import cfg +from sqlalchemy import exc as sqla_exc +import sqlalchemy.interfaces +import sqlalchemy.orm +from sqlalchemy.pool import NullPool, StaticPool +from sqlalchemy.sql.expression import literal_column + +from ceilometer.openstack.common.db import exception +from ceilometer.openstack.common import log as logging +from ceilometer.openstack.common.gettextutils import _ +from ceilometer.openstack.common import timeutils + + +sql_opts = [ + cfg.StrOpt('sql_connection', + default='sqlite:///' + + os.path.abspath(os.path.join(os.path.dirname(__file__), + '../', '$sqlite_db')), + help='The SQLAlchemy connection string used to connect to the ' + 'database', + secret=True), + cfg.StrOpt('sqlite_db', + default='ceilometer.sqlite', + help='the filename to use with sqlite'), + cfg.IntOpt('sql_idle_timeout', + default=3600, + help='timeout before idle sql connections are reaped'), + cfg.BoolOpt('sqlite_synchronous', + default=True, + help='If passed, use synchronous mode for sqlite'), + cfg.IntOpt('sql_min_pool_size', + default=1, + help='Minimum number of SQL connections to keep open in a ' + 'pool'), + cfg.IntOpt('sql_max_pool_size', + default=5, + help='Maximum number of SQL connections to keep open in a ' + 'pool'), + cfg.IntOpt('sql_max_retries', + default=10, + help='maximum db connection retries during startup. ' + '(setting -1 implies an infinite retry count)'), + cfg.IntOpt('sql_retry_interval', + default=10, + help='interval between retries of opening a sql connection'), + cfg.IntOpt('sql_max_overflow', + default=None, + help='If set, use this value for max_overflow with sqlalchemy'), + cfg.IntOpt('sql_connection_debug', + default=0, + help='Verbosity of SQL debugging information. 0=None, ' + '100=Everything'), + cfg.BoolOpt('sql_connection_trace', + default=False, + help='Add python stack traces to SQL as comment strings'), +] + +CONF = cfg.CONF +CONF.register_opts(sql_opts) +LOG = logging.getLogger(__name__) + +_ENGINE = None +_MAKER = None + + +def set_defaults(sql_connection, sqlite_db): + """Set defaults for configuration variables.""" + cfg.set_defaults(sql_opts, + sql_connection=sql_connection, + sqlite_db=sqlite_db) + + +def get_session(autocommit=True, expire_on_commit=False): + """Return a SQLAlchemy session.""" + global _MAKER + + if _MAKER is None: + engine = get_engine() + _MAKER = get_maker(engine, autocommit, expire_on_commit) + + session = _MAKER() + return session + + +# note(boris-42): In current versions of DB backends unique constraint +# violation messages follow the structure: +# +# sqlite: +# 1 column - (IntegrityError) column c1 is not unique +# N columns - (IntegrityError) column c1, c2, ..., N are not unique +# +# postgres: +# 1 column - (IntegrityError) duplicate key value violates unique +# constraint "users_c1_key" +# N columns - (IntegrityError) duplicate key value violates unique +# constraint "name_of_our_constraint" +# +# mysql: +# 1 column - (IntegrityError) (1062, "Duplicate entry 'value_of_c1' for key +# 'c1'") +# N columns - (IntegrityError) (1062, "Duplicate entry 'values joined +# with -' for key 'name_of_our_constraint'") +_DUP_KEY_RE_DB = { + "sqlite": re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"), + "postgresql": re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"), + "mysql": re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$") +} + + +def raise_if_duplicate_entry_error(integrity_error, engine_name): + """ + In this function will be raised DBDuplicateEntry exception if integrity + error wrap unique constraint violation. + """ + + def get_columns_from_uniq_cons_or_name(columns): + # note(boris-42): UniqueConstraint name convention: "uniq_c1_x_c2_x_c3" + # means that columns c1, c2, c3 are in UniqueConstraint. + uniqbase = "uniq_" + if not columns.startswith(uniqbase): + if engine_name == "postgresql": + return [columns[columns.index("_") + 1:columns.rindex("_")]] + return [columns] + return columns[len(uniqbase):].split("_x_") + + if engine_name not in ["mysql", "sqlite", "postgresql"]: + return + + m = _DUP_KEY_RE_DB[engine_name].match(integrity_error.message) + if not m: + return + columns = m.group(1) + + if engine_name == "sqlite": + columns = columns.strip().split(", ") + else: + columns = get_columns_from_uniq_cons_or_name(columns) + raise exception.DBDuplicateEntry(columns, integrity_error) + + +# NOTE(comstud): In current versions of DB backends, Deadlock violation +# messages follow the structure: +# +# mysql: +# (OperationalError) (1213, 'Deadlock found when trying to get lock; try ' +# 'restarting transaction') +_DEADLOCK_RE_DB = { + "mysql": re.compile(r"^.*\(1213, 'Deadlock.*") +} + + +def raise_if_deadlock_error(operational_error, engine_name): + """ + Raise DBDeadlock exception if OperationalError contains a Deadlock + condition. + """ + re = _DEADLOCK_RE_DB.get(engine_name) + if re is None: + return + m = re.match(operational_error.message) + if not m: + return + raise exception.DBDeadlock(operational_error) + + +def wrap_db_error(f): + def _wrap(*args, **kwargs): + try: + return f(*args, **kwargs) + except UnicodeEncodeError: + raise exception.DBInvalidUnicodeParameter() + # note(boris-42): We should catch unique constraint violation and + # wrap it by our own DBDuplicateEntry exception. Unique constraint + # violation is wrapped by IntegrityError. + except sqla_exc.OperationalError, e: + raise_if_deadlock_error(e, get_engine().name) + # NOTE(comstud): A lot of code is checking for OperationalError + # so let's not wrap it for now. + raise + except sqla_exc.IntegrityError, e: + # note(boris-42): SqlAlchemy doesn't unify errors from different + # DBs so we must do this. Also in some tables (for example + # instance_types) there are more than one unique constraint. This + # means we should get names of columns, which values violate + # unique constraint, from error message. + raise_if_duplicate_entry_error(e, get_engine().name) + raise exception.DBError(e) + except Exception, e: + LOG.exception(_('DB exception wrapped.')) + raise exception.DBError(e) + _wrap.func_name = f.func_name + return _wrap + + +def get_engine(): + """Return a SQLAlchemy engine.""" + global _ENGINE + if _ENGINE is None: + _ENGINE = create_engine(CONF.sql_connection) + return _ENGINE + + +def synchronous_switch_listener(dbapi_conn, connection_rec): + """Switch sqlite connections to non-synchronous mode.""" + dbapi_conn.execute("PRAGMA synchronous = OFF") + + +def add_regexp_listener(dbapi_con, con_record): + """Add REGEXP function to sqlite connections.""" + + def regexp(expr, item): + reg = re.compile(expr) + return reg.search(unicode(item)) is not None + dbapi_con.create_function('regexp', 2, regexp) + + +def greenthread_yield(dbapi_con, con_record): + """ + Ensure other greenthreads get a chance to execute by forcing a context + switch. With common database backends (eg MySQLdb and sqlite), there is + no implicit yield caused by network I/O since they are implemented by + C libraries that eventlet cannot monkey patch. + """ + greenthread.sleep(0) + + +def ping_listener(dbapi_conn, connection_rec, connection_proxy): + """ + Ensures that MySQL connections checked out of the + pool are alive. + + Borrowed from: + http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f + """ + try: + dbapi_conn.cursor().execute('select 1') + except dbapi_conn.OperationalError, ex: + if ex.args[0] in (2006, 2013, 2014, 2045, 2055): + LOG.warn(_('Got mysql server has gone away: %s'), ex) + raise sqla_exc.DisconnectionError("Database server went away") + else: + raise + + +def is_db_connection_error(args): + """Return True if error in connecting to db.""" + # NOTE(adam_g): This is currently MySQL specific and needs to be extended + # to support Postgres and others. + conn_err_codes = ('2002', '2003', '2006') + for err_code in conn_err_codes: + if args.find(err_code) != -1: + return True + return False + + +def create_engine(sql_connection): + """Return a new SQLAlchemy engine.""" + connection_dict = sqlalchemy.engine.url.make_url(sql_connection) + + engine_args = { + "pool_recycle": CONF.sql_idle_timeout, + "echo": False, + 'convert_unicode': True, + } + + # Map our SQL debug level to SQLAlchemy's options + if CONF.sql_connection_debug >= 100: + engine_args['echo'] = 'debug' + elif CONF.sql_connection_debug >= 50: + engine_args['echo'] = True + + if "sqlite" in connection_dict.drivername: + engine_args["poolclass"] = NullPool + + if CONF.sql_connection == "sqlite://": + engine_args["poolclass"] = StaticPool + engine_args["connect_args"] = {'check_same_thread': False} + else: + engine_args['pool_size'] = CONF.sql_max_pool_size + if CONF.sql_max_overflow is not None: + engine_args['max_overflow'] = CONF.sql_max_overflow + + engine = sqlalchemy.create_engine(sql_connection, **engine_args) + + sqlalchemy.event.listen(engine, 'checkin', greenthread_yield) + + if 'mysql' in connection_dict.drivername: + sqlalchemy.event.listen(engine, 'checkout', ping_listener) + elif 'sqlite' in connection_dict.drivername: + if not CONF.sqlite_synchronous: + sqlalchemy.event.listen(engine, 'connect', + synchronous_switch_listener) + sqlalchemy.event.listen(engine, 'connect', add_regexp_listener) + + if (CONF.sql_connection_trace and + engine.dialect.dbapi.__name__ == 'MySQLdb'): + patch_mysqldb_with_stacktrace_comments() + + try: + engine.connect() + except sqla_exc.OperationalError, e: + if not is_db_connection_error(e.args[0]): + raise + + remaining = CONF.sql_max_retries + if remaining == -1: + remaining = 'infinite' + while True: + msg = _('SQL connection failed. %s attempts left.') + LOG.warn(msg % remaining) + if remaining != 'infinite': + remaining -= 1 + time.sleep(CONF.sql_retry_interval) + try: + engine.connect() + break + except sqla_exc.OperationalError, e: + if (remaining != 'infinite' and remaining == 0) or \ + not is_db_connection_error(e.args[0]): + raise + return engine + + +class Query(sqlalchemy.orm.query.Query): + """Subclass of sqlalchemy.query with soft_delete() method.""" + def soft_delete(self, synchronize_session='evaluate'): + return self.update({'deleted': literal_column('id'), + 'updated_at': literal_column('updated_at'), + 'deleted_at': timeutils.utcnow()}, + synchronize_session=synchronize_session) + + +class Session(sqlalchemy.orm.session.Session): + """Custom Session class to avoid SqlAlchemy Session monkey patching.""" + @wrap_db_error + def query(self, *args, **kwargs): + return super(Session, self).query(*args, **kwargs) + + @wrap_db_error + def flush(self, *args, **kwargs): + return super(Session, self).flush(*args, **kwargs) + + @wrap_db_error + def execute(self, *args, **kwargs): + return super(Session, self).execute(*args, **kwargs) + + +def get_maker(engine, autocommit=True, expire_on_commit=False): + """Return a SQLAlchemy sessionmaker using the given engine.""" + return sqlalchemy.orm.sessionmaker(bind=engine, + class_=Session, + autocommit=autocommit, + expire_on_commit=expire_on_commit, + query_cls=Query) + + +def patch_mysqldb_with_stacktrace_comments(): + """Adds current stack trace as a comment in queries by patching + MySQLdb.cursors.BaseCursor._do_query. + """ + import MySQLdb.cursors + import traceback + + old_mysql_do_query = MySQLdb.cursors.BaseCursor._do_query + + def _do_query(self, q): + stack = '' + for file, line, method, function in traceback.extract_stack(): + # exclude various common things from trace + if file.endswith('session.py') and method == '_do_query': + continue + if file.endswith('api.py') and method == 'wrapper': + continue + if file.endswith('utils.py') and method == '_inner': + continue + if file.endswith('exception.py') and method == '_wrap': + continue + # db/api is just a wrapper around db/sqlalchemy/api + if file.endswith('db/api.py'): + continue + # only trace inside ceilometer + index = file.rfind('ceilometer') + if index == -1: + continue + stack += "File:%s:%s Method:%s() Line:%s | " \ + % (file[index:], line, method, function) + + # strip trailing " | " from stack + if stack: + stack = stack[:-3] + qq = "%s /* %s */" % (q, stack) + else: + qq = q + old_mysql_do_query(self, qq) + + setattr(MySQLdb.cursors.BaseCursor, '_do_query', _do_query) diff --git a/ceilometer/openstack/common/db/sqlalchemy/utils.py b/ceilometer/openstack/common/db/sqlalchemy/utils.py new file mode 100644 index 0000000000..048c009066 --- /dev/null +++ b/ceilometer/openstack/common/db/sqlalchemy/utils.py @@ -0,0 +1,132 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2010-2011 OpenStack Foundation. +# Copyright 2012 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Implementation of paginate query.""" + +import sqlalchemy + +from ceilometer.openstack.common.gettextutils import _ +from ceilometer.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +class InvalidSortKey(Exception): + message = _("Sort key supplied was not valid.") + + +# copy from glance/db/sqlalchemy/api.py +def paginate_query(query, model, limit, sort_keys, marker=None, + sort_dir=None, sort_dirs=None): + """Returns a query with sorting / pagination criteria added. + + Pagination works by requiring a unique sort_key, specified by sort_keys. + (If sort_keys is not unique, then we risk looping through values.) + We use the last row in the previous page as the 'marker' for pagination. + So we must return values that follow the passed marker in the order. + With a single-valued sort_key, this would be easy: sort_key > X. + With a compound-values sort_key, (k1, k2, k3) we must do this to repeat + the lexicographical ordering: + (k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3) + + We also have to cope with different sort_directions. + + Typically, the id of the last row is used as the client-facing pagination + marker, then the actual marker object must be fetched from the db and + passed in to us as marker. + + :param query: the query object to which we should add paging/sorting + :param model: the ORM model class + :param limit: maximum number of items to return + :param sort_keys: array of attributes by which results should be sorted + :param marker: the last item of the previous page; we returns the next + results after this value. + :param sort_dir: direction in which results should be sorted (asc, desc) + :param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys + + :rtype: sqlalchemy.orm.query.Query + :return: The query with sorting/pagination added. + """ + + if 'id' not in sort_keys: + # TODO(justinsb): If this ever gives a false-positive, check + # the actual primary key, rather than assuming its id + LOG.warn(_('Id not in sort_keys; is sort_keys unique?')) + + assert(not (sort_dir and sort_dirs)) + + # Default the sort direction to ascending + if sort_dirs is None and sort_dir is None: + sort_dir = 'asc' + + # Ensure a per-column sort direction + if sort_dirs is None: + sort_dirs = [sort_dir for _sort_key in sort_keys] + + assert(len(sort_dirs) == len(sort_keys)) + + # Add sorting + for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs): + sort_dir_func = { + 'asc': sqlalchemy.asc, + 'desc': sqlalchemy.desc, + }[current_sort_dir] + + try: + sort_key_attr = getattr(model, current_sort_key) + except AttributeError: + raise InvalidSortKey() + query = query.order_by(sort_dir_func(sort_key_attr)) + + # Add pagination + if marker is not None: + marker_values = [] + for sort_key in sort_keys: + v = getattr(marker, sort_key) + marker_values.append(v) + + # Build up an array of sort criteria as in the docstring + criteria_list = [] + for i in xrange(0, len(sort_keys)): + crit_attrs = [] + for j in xrange(0, i): + model_attr = getattr(model, sort_keys[j]) + crit_attrs.append((model_attr == marker_values[j])) + + model_attr = getattr(model, sort_keys[i]) + if sort_dirs[i] == 'desc': + crit_attrs.append((model_attr < marker_values[i])) + elif sort_dirs[i] == 'asc': + crit_attrs.append((model_attr > marker_values[i])) + else: + raise ValueError(_("Unknown sort direction, " + "must be 'desc' or 'asc'")) + + criteria = sqlalchemy.sql.and_(*crit_attrs) + criteria_list.append(criteria) + + f = sqlalchemy.sql.or_(*criteria_list) + query = query.filter(f) + + if limit is not None: + query = query.limit(limit) + + return query diff --git a/ceilometer/storage/impl_sqlalchemy.py b/ceilometer/storage/impl_sqlalchemy.py index fe310fa6c7..224a37b87e 100644 --- a/ceilometer/storage/impl_sqlalchemy.py +++ b/ceilometer/storage/impl_sqlalchemy.py @@ -26,12 +26,12 @@ from sqlalchemy.orm import exc from ceilometer.openstack.common import log from ceilometer.openstack.common import timeutils +import ceilometer.openstack.common.db.sqlalchemy.session as sqlalchemy_session from ceilometer.storage import base from ceilometer.storage import models as api_models from ceilometer.storage.sqlalchemy import migration from ceilometer.storage.sqlalchemy.models import Meter, Project, Resource from ceilometer.storage.sqlalchemy.models import Source, User, Base, Alarm -import ceilometer.storage.sqlalchemy.session as sqlalchemy_session LOG = log.getLogger(__name__) diff --git a/ceilometer/storage/sqlalchemy/session.py b/ceilometer/storage/sqlalchemy/session.py deleted file mode 100644 index ce95e12eb5..0000000000 --- a/ceilometer/storage/sqlalchemy/session.py +++ /dev/null @@ -1,193 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Session Handling for SQLAlchemy backend.""" - -import re -import time - -from oslo.config import cfg -import sqlalchemy -import sqlalchemy.exc as exc -import sqlalchemy.orm -import sqlalchemy.pool as pool - -from ceilometer.openstack.common import log - - -LOG = log.getLogger(__name__) - -_MAKER = None -_ENGINE = None - -sql_opts = [ - cfg.IntOpt('sql_connection_debug', - default=0, - help='Verbosity of SQL debugging information. 0=None, ' - '100=Everything'), - cfg.BoolOpt('sql_connection_trace', - default=False, - help='Add python stack traces to SQL as comment strings'), - cfg.BoolOpt('sqlite_synchronous', - default=True, - help='If passed, use synchronous mode for sqlite'), - cfg.IntOpt('sql_idle_timeout', - default=3600, - help='timeout before idle sql connections are reaped'), - cfg.IntOpt('sql_max_retries', - default=10, - help='maximum db connection retries during startup. ' - '(setting -1 implies an infinite retry count)'), - cfg.IntOpt('sql_retry_interval', - default=10, - help='interval between retries of opening a sql connection'), -] - -cfg.CONF.register_opts(sql_opts) - - -def get_session(database_connection, conf, - autocommit=True, expire_on_commit=False, autoflush=True): - """Return a SQLAlchemy session.""" - global _MAKER - - if _MAKER is None: - engine = get_engine(database_connection, conf) - _MAKER = get_maker(engine, autocommit, expire_on_commit, autoflush) - - session = _MAKER() - return session - - -def synchronous_switch_listener(dbapi_conn, connection_rec): - """Switch sqlite connections to non-synchronous mode.""" - dbapi_conn.execute("PRAGMA synchronous = OFF") - - -def add_regexp_listener(dbapi_con, con_record): - """Add REGEXP function to sqlite connections.""" - - def regexp(expr, item): - reg = re.compile(expr) - return reg.search(unicode(item)) is not None - dbapi_con.create_function('regexp', 2, regexp) - - -def ping_listener(dbapi_conn, connection_rec, connection_proxy): - """ - Ensures that MySQL connections checked out of the - pool are alive. - - Borrowed from: - http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f - """ - try: - dbapi_conn.cursor().execute('select 1') - except dbapi_conn.OperationalError, ex: - if ex.args[0] in (2006, 2013, 2014, 2045, 2055): - LOG.warn('Got mysql server has gone away: %s', ex) - raise exc.DisconnectionError("Database server went away") - else: - raise - - -def is_db_connection_error(args): - """Return True if error in connecting to db.""" - # NOTE(adam_g): This is currently MySQL specific and needs to be extended - # to support Postgres and others. - conn_err_codes = ('2002', '2003', '2006') - for err_code in conn_err_codes: - if args.find(err_code) != -1: - return True - return False - - -def get_engine(database_connection, conf): - """Return a SQLAlchemy engine.""" - global _ENGINE - if _ENGINE is None: - connection_dict = sqlalchemy.engine.url.make_url( - database_connection) - - engine_args = { - "pool_recycle": conf.sql_idle_timeout, - "echo": False, - 'convert_unicode': True, - } - - # Map our SQL debug level to SQLAlchemy's options - if conf.sql_connection_debug >= 100: - engine_args['echo'] = 'debug' - elif conf.sql_connection_debug >= 50: - engine_args['echo'] = True - - if "sqlite" in connection_dict.drivername: - engine_args["poolclass"] = pool.NullPool - - if database_connection == "sqlite://": - engine_args["poolclass"] = pool.StaticPool - engine_args["connect_args"] = {'check_same_thread': False} - - _ENGINE = sqlalchemy.create_engine(database_connection, - **engine_args) - - if 'mysql' in connection_dict.drivername: - sqlalchemy.event.listen(_ENGINE, 'checkout', ping_listener) - elif "sqlite" in connection_dict.drivername: - if not conf.sqlite_synchronous: - sqlalchemy.event.listen(_ENGINE, 'connect', - synchronous_switch_listener) - sqlalchemy.event.listen(_ENGINE, 'connect', add_regexp_listener) - - if (conf.sql_connection_trace and - _ENGINE.dialect.dbapi.__name__ == 'MySQLdb'): - import MySQLdb.cursors - _do_query = debug_mysql_do_query() - setattr(MySQLdb.cursors.BaseCursor, '_do_query', _do_query) - - try: - _ENGINE.connect() - except exc.OperationalError, e: - if not is_db_connection_error(e.args[0]): - raise - - remaining = conf.sql_max_retries - if remaining == -1: - remaining = 'infinite' - while True: - msg = 'SQL connection failed. %s attempts left.' - LOG.warn(msg % remaining) - if remaining != 'infinite': - remaining -= 1 - time.sleep(conf.sql_retry_interval) - try: - _ENGINE.connect() - break - except exc.OperationalError, e: - if (remaining != 'infinite' and remaining == 0) \ - or not is_db_connection_error(e.args[0]): - raise - return _ENGINE - - -def get_maker(engine, autocommit=True, expire_on_commit=False, autoflush=True): - """Return a SQLAlchemy sessionmaker using the given engine.""" - return sqlalchemy.orm.sessionmaker(bind=engine, - autocommit=autocommit, - autoflush=autoflush, - expire_on_commit=expire_on_commit) diff --git a/etc/ceilometer/ceilometer.conf.sample b/etc/ceilometer/ceilometer.conf.sample index 0f26fa5154..be2c421a3d 100644 --- a/etc/ceilometer/ceilometer.conf.sample +++ b/etc/ceilometer/ceilometer.conf.sample @@ -423,7 +423,7 @@ #mysql_engine=InnoDB -######## defined in ceilometer.storage.sqlalchemy.session ######## +######## defined in ceilometer.openstack.common.db.sqlalchemy.session ######## # Verbosity of SQL debugging information. 0=None, # 100=Everything (integer value) diff --git a/openstack-common.conf b/openstack-common.conf index 2fd4ac4c8e..2b2351154b 100644 --- a/openstack-common.conf +++ b/openstack-common.conf @@ -1,5 +1,7 @@ [DEFAULT] module=context +module=db +module=db/sqlalchemy module=eventlet_backdoor module=excutils module=gettextutils diff --git a/tox.ini b/tox.ini index 02d9802aa7..dc4d1424d5 100644 --- a/tox.ini +++ b/tox.ini @@ -21,7 +21,7 @@ commands = nosetests --no-path-adjustment --with-coverage --cover-erase --cover- [testenv:pep8] deps = pep8==1.3.3 commands = - pep8 --repeat --ignore=E125 --show-source ceilometer setup.py bin/ceilometer-agent-central bin/ceilometer-agent-compute bin/ceilometer-collector bin/ceilometer-api tests + pep8 --repeat --ignore=E125 --exclude=ceilometer/openstack/common/db/sqlalchemy/session.py --show-source ceilometer setup.py bin/ceilometer-agent-central bin/ceilometer-agent-compute bin/ceilometer-collector bin/ceilometer-api tests [testenv:hacking] deps = pep8==1.3.3