Browse Source

Move to oslo.db

Replace common oslo code nova.openstack.common.db by usage
of oslo.db library and remove common code.

Replaced catching of raw sqlalchemy exceptions by catches
of oslo.db exceptions(such as DBError, DBDuplicateEntry, etc).

Co-Authored-By: Eugeniya Kudryashova  <ekudryashova@mirantis.com>

Closes-Bug: #1364986
Closes-Bug: #1353131
Closes-Bug: #1283987
Closes-Bug: #1274523
Change-Id: I0649539e071b2318ec85ed5d70259c949408e64b
tags/2014.2.rc1
Andrey Kurilin 5 years ago
parent
commit
1b83b2f110
40 changed files with 107 additions and 3112 deletions
  1. +1
    -1
      HACKING.rst
  2. +1
    -1
      nova/cells/state.py
  3. +1
    -1
      nova/cmd/manage.py
  4. +1
    -1
      nova/compute/flavors.py
  5. +10
    -7
      nova/config.py
  6. +2
    -41
      nova/db/api.py
  7. +26
    -48
      nova/db/sqlalchemy/api.py
  8. +1
    -1
      nova/db/sqlalchemy/migrate_repo/versions/230_add_details_column_to_instance_actions_events.py
  9. +12
    -2
      nova/db/sqlalchemy/models.py
  10. +6
    -3
      nova/db/sqlalchemy/utils.py
  11. +0
    -0
      nova/openstack/common/db/__init__.py
  12. +0
    -162
      nova/openstack/common/db/api.py
  13. +0
    -56
      nova/openstack/common/db/exception.py
  14. +0
    -171
      nova/openstack/common/db/options.py
  15. +0
    -0
      nova/openstack/common/db/sqlalchemy/__init__.py
  16. +0
    -278
      nova/openstack/common/db/sqlalchemy/migration.py
  17. +0
    -119
      nova/openstack/common/db/sqlalchemy/models.py
  18. +0
    -157
      nova/openstack/common/db/sqlalchemy/provision.py
  19. +0
    -904
      nova/openstack/common/db/sqlalchemy/session.py
  20. +0
    -167
      nova/openstack/common/db/sqlalchemy/test_base.py
  21. +0
    -269
      nova/openstack/common/db/sqlalchemy/test_migrations.py
  22. +0
    -655
      nova/openstack/common/db/sqlalchemy/utils.py
  23. +0
    -5
      nova/test.py
  24. +1
    -1
      nova/tests/cells/test_cells_state_manager.py
  25. +11
    -0
      nova/tests/compute/test_compute.py
  26. +2
    -2
      nova/tests/conf_fixture.py
  27. +7
    -39
      nova/tests/db/test_db_api.py
  28. +1
    -1
      nova/tests/db/test_migration_utils.py
  29. +6
    -5
      nova/tests/db/test_migrations.py
  30. +1
    -1
      nova/tests/network/test_manager.py
  31. +2
    -1
      nova/tests/virt/baremetal/db/test_bm_interface.py
  32. +1
    -1
      nova/tests/virt/baremetal/test_pxe.py
  33. +1
    -1
      nova/tests/virt/baremetal/test_tilera.py
  34. +1
    -1
      nova/virt/baremetal/db/sqlalchemy/api.py
  35. +6
    -3
      nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/006_move_prov_mac_address.py
  36. +3
    -3
      nova/virt/baremetal/db/sqlalchemy/session.py
  37. +1
    -1
      nova/virt/baremetal/pxe.py
  38. +1
    -1
      nova/virt/baremetal/tilera.py
  39. +0
    -2
      openstack-common.conf
  40. +1
    -0
      requirements.txt

+ 1
- 1
HACKING.rst View File

@@ -10,7 +10,7 @@ Nova Specific Commandments

- ``nova.db`` imports are not allowed in ``nova/virt/*``
- [N309] no db session in public API methods (disabled)
This enforces a guideline defined in ``nova.openstack.common.db.sqlalchemy.session``
This enforces a guideline defined in ``oslo.db.sqlalchemy.session``
- [N310] timeutils.utcnow() wrapper must be used instead of direct calls to
datetime.datetime.utcnow() to make it easy to override its return value in tests
- [N311] importing code from other virt drivers forbidden

+ 1
- 1
nova/cells/state.py View File

@@ -22,13 +22,13 @@ import functools
import time

from oslo.config import cfg
from oslo.db import exception as db_exc

from nova.cells import rpc_driver
from nova import context
from nova.db import base
from nova import exception
from nova.i18n import _
from nova.openstack.common.db import exception as db_exc
from nova.openstack.common import fileutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging

+ 1
- 1
nova/cmd/manage.py View File

@@ -61,6 +61,7 @@ import sys
import decorator
import netaddr
from oslo.config import cfg
from oslo.db import exception as db_exc
from oslo import messaging
import six

@@ -75,7 +76,6 @@ from nova import exception
from nova.i18n import _
from nova import objects
from nova.openstack.common import cliutils
from nova.openstack.common.db import exception as db_exc
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova import quota

+ 1
- 1
nova/compute/flavors.py View File

@@ -22,6 +22,7 @@ import re
import uuid

from oslo.config import cfg
from oslo.db import exception as db_exc
import six

from nova import context
@@ -29,7 +30,6 @@ from nova import db
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.openstack.common.db import exception as db_exc
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova.pci import pci_request

+ 10
- 7
nova/config.py View File

@@ -16,23 +16,26 @@
# under the License.

from oslo.config import cfg
from oslo.db import options

from nova import debugger
from nova.openstack.common.db import options
from nova import paths
from nova import rpc
from nova import version


CONF = cfg.CONF

_DEFAULT_SQL_CONNECTION = 'sqlite:///' + paths.state_path_def('nova.sqlite')


def parse_args(argv, default_config_files=None):
options.set_defaults(sql_connection=_DEFAULT_SQL_CONNECTION,
options.set_defaults(CONF, connection=_DEFAULT_SQL_CONNECTION,
sqlite_db='nova.sqlite')
rpc.set_defaults(control_exchange='nova')
debugger.register_cli_opts()
cfg.CONF(argv[1:],
project='nova',
version=version.version_string(),
default_config_files=default_config_files)
rpc.init(cfg.CONF)
CONF(argv[1:],
project='nova',
version=version.version_string(),
default_config_files=default_config_files)
rpc.init(CONF)

+ 2
- 41
nova/db/api.py View File

@@ -27,12 +27,11 @@ these objects be simple dictionaries.

"""

from eventlet import tpool
from oslo.config import cfg
from oslo.db import concurrency

from nova.cells import rpcapi as cells_rpcapi
from nova.i18n import _
from nova.openstack.common.db import api as db_api
from nova.openstack.common import log as logging


@@ -48,51 +47,13 @@ db_opts = [
help='Template string to be used to generate snapshot names'),
]

tpool_opts = [
cfg.BoolOpt('use_tpool',
default=False,
deprecated_name='dbapi_use_tpool',
deprecated_group='DEFAULT',
help='Enable the experimental use of thread pooling for '
'all DB API calls'),
]

CONF = cfg.CONF
CONF.register_opts(db_opts)
CONF.register_opts(tpool_opts, 'database')
CONF.import_opt('backend', 'nova.openstack.common.db.options',
group='database')

_BACKEND_MAPPING = {'sqlalchemy': 'nova.db.sqlalchemy.api'}


class NovaDBAPI(object):
"""Nova's DB API wrapper class.

This wraps the oslo DB API with an option to be able to use eventlet's
thread pooling. Since the CONF variable may not be loaded at the time
this class is instantiated, we must look at it on the first DB API call.
"""

def __init__(self):
self.__db_api = None

@property
def _db_api(self):
if not self.__db_api:
nova_db_api = db_api.DBAPI(CONF.database.backend,
backend_mapping=_BACKEND_MAPPING)
if CONF.database.use_tpool:
self.__db_api = tpool.Proxy(nova_db_api)
else:
self.__db_api = nova_db_api
return self.__db_api

def __getattr__(self, key):
return getattr(self._db_api, key)


IMPL = NovaDBAPI()
IMPL = concurrency.TpoolDbapiWrapper(CONF, backend_mapping=_BACKEND_MAPPING)

LOG = logging.getLogger(__name__)


+ 26
- 48
nova/db/sqlalchemy/api.py View File

@@ -22,15 +22,17 @@ import copy
import datetime
import functools
import sys
import threading
import time
import uuid

from oslo.config import cfg
from oslo.db import exception as db_exc
from oslo.db.sqlalchemy import session as db_session
from oslo.db.sqlalchemy import utils as sqlalchemyutils
import six
from sqlalchemy import and_
from sqlalchemy import Boolean
from sqlalchemy.exc import DataError
from sqlalchemy.exc import IntegrityError
from sqlalchemy.exc import NoSuchTableError
from sqlalchemy import Integer
from sqlalchemy import MetaData
@@ -56,9 +58,6 @@ import nova.context
from nova.db.sqlalchemy import models
from nova import exception
from nova.i18n import _
from nova.openstack.common.db import exception as db_exc
from nova.openstack.common.db.sqlalchemy import session as db_session
from nova.openstack.common.db.sqlalchemy import utils as sqlalchemyutils
from nova.openstack.common import excutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
@@ -73,57 +72,34 @@ db_opts = [
'Should be empty, "project" or "global".'),
]

connection_opts = [
cfg.StrOpt('slave_connection',
secret=True,
help='The SQLAlchemy connection string used to connect to the '
'slave database'),
]

CONF = cfg.CONF
CONF.register_opts(db_opts)
CONF.register_opts(connection_opts, group='database')
CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
CONF.import_opt('connection',
'nova.openstack.common.db.options',
group='database')

LOG = logging.getLogger(__name__)


_MASTER_FACADE = None
_SLAVE_FACADE = None

_ENGINE_FACADE = None
_LOCK = threading.Lock()

def _create_facade_lazily(use_slave=False):
global _MASTER_FACADE
global _SLAVE_FACADE

return_slave = use_slave and CONF.database.slave_connection
if not return_slave:
if _MASTER_FACADE is None:
_MASTER_FACADE = db_session.EngineFacade(
CONF.database.connection,
**dict(CONF.database.iteritems())
)
return _MASTER_FACADE
else:
if _SLAVE_FACADE is None:
_SLAVE_FACADE = db_session.EngineFacade(
CONF.database.slave_connection,
**dict(CONF.database.iteritems())
)
return _SLAVE_FACADE
def _create_facade_lazily():
global _LOCK, _ENGINE_FACADE
if _ENGINE_FACADE is None:
with _LOCK:
if _ENGINE_FACADE is None:
_ENGINE_FACADE = db_session.EngineFacade.from_config(CONF)
return _ENGINE_FACADE


def get_engine(use_slave=False):
facade = _create_facade_lazily(use_slave)
return facade.get_engine()
facade = _create_facade_lazily()
return facade.get_engine(use_slave=use_slave)


def get_session(use_slave=False, **kwargs):
facade = _create_facade_lazily(use_slave)
return facade.get_session(**kwargs)
facade = _create_facade_lazily()
return facade.get_session(use_slave=use_slave, **kwargs)


_SHADOW_TABLE_PREFIX = 'shadow_'
@@ -749,7 +725,7 @@ def floating_ip_get(context, id):

if not result:
raise exception.FloatingIpNotFound(id=id)
except DataError:
except db_exc.DBError:
msg = _("Invalid floating ip id %s in request") % id
LOG.warn(msg)
raise exception.InvalidID(id=id)
@@ -1003,7 +979,7 @@ def _floating_ip_get_by_address(context, address, session=None):

if not result:
raise exception.FloatingIpNotFoundForAddress(address=address)
except DataError:
except db_exc.DBError:
msg = _("Invalid floating IP %s in request") % address
LOG.warn(msg)
raise exception.InvalidIpAddressError(msg)
@@ -1306,7 +1282,7 @@ def _fixed_ip_get_by_address(context, address, session=None,
result = result.filter_by(address=address).first()
if not result:
raise exception.FixedIpNotFoundForAddress(address=address)
except DataError:
except db_exc.DBError:
msg = _("Invalid fixed IP Address %s in request") % address
LOG.warn(msg)
raise exception.FixedIpInvalid(msg)
@@ -1345,7 +1321,7 @@ def fixed_ip_get_by_address_detailed(context, address):
if not result:
raise exception.FixedIpNotFoundForAddress(address=address)

except DataError:
except db_exc.DBError:
msg = _("Invalid fixed IP Address %s in request") % address
LOG.warn(msg)
raise exception.FixedIpInvalid(msg)
@@ -1480,7 +1456,7 @@ def virtual_interface_get_by_address(context, address):
vif_ref = _virtual_interface_query(context).\
filter_by(address=address).\
first()
except DataError:
except db_exc.DBError:
msg = _("Invalid virtual interface address %s in request") % address
LOG.warn(msg)
raise exception.InvalidIpAddressError(msg)
@@ -1735,7 +1711,7 @@ def instance_get(context, instance_id, columns_to_join=None):
raise exception.InstanceNotFound(instance_id=instance_id)

return result
except DataError:
except db_exc.DBError:
# NOTE(sdague): catch all in case the db engine chokes on the
# id because it's too long of an int to store.
msg = _("Invalid instance id %s in request") % instance_id
@@ -5729,7 +5705,9 @@ def archive_deleted_rows_for_table(context, tablename, max_rows):
with conn.begin():
conn.execute(insert_statement)
result_delete = conn.execute(delete_statement)
except IntegrityError:
except db_exc.DBError:
# TODO(ekudryashova): replace by DBReferenceError when db layer
# raise it.
# A foreign key constraint keeps us from deleting some of
# these rows until we clean up a dependent table. Just
# skip this table for now; we'll come back to it later.

+ 1
- 1
nova/db/sqlalchemy/migrate_repo/versions/230_add_details_column_to_instance_actions_events.py View File

@@ -12,10 +12,10 @@
# License for the specific language governing permissions and limitations
# under the License.

from oslo.db.sqlalchemy import utils
from sqlalchemy import Column, String, Text

from nova.db.sqlalchemy import api
from nova.openstack.common.db.sqlalchemy import utils


def upgrade(migrate_engine):

+ 12
- 2
nova/db/sqlalchemy/models.py View File

@@ -19,15 +19,15 @@
SQLAlchemy models for nova data.
"""

from oslo.config import cfg
from oslo.db.sqlalchemy import models
from sqlalchemy import Column, Index, Integer, BigInteger, Enum, String, schema
from sqlalchemy.dialects.mysql import MEDIUMTEXT
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import orm
from sqlalchemy import ForeignKey, DateTime, Boolean, Text, Float
from oslo.config import cfg

from nova.db.sqlalchemy import types
from nova.openstack.common.db.sqlalchemy import models
from nova.openstack.common import timeutils

CONF = cfg.CONF
@@ -43,6 +43,16 @@ class NovaBase(models.SoftDeleteMixin,
models.ModelBase):
metadata = None

# TODO(ekudryashova): remove this after both nova and oslo.db
# will use oslo.utils library
# NOTE: Both projects(nova and oslo.db) use `timeutils.utcnow`, which
# returns specified time(if override_time is set). Time overriding is
# only used by unit tests, but in a lot of places, temporarily overriding
# this columns helps to avoid lots of calls of timeutils.set_override
# from different places in unit tests.
created_at = Column(DateTime, default=lambda: timeutils.utcnow())
updated_at = Column(DateTime, onupdate=lambda: timeutils.utcnow())

def save(self, session=None):
from nova.db.sqlalchemy import api


+ 6
- 3
nova/db/sqlalchemy/utils.py View File

@@ -13,8 +13,9 @@
# License for the specific language governing permissions and limitations
# under the License.

from oslo.db import exception as db_exc
from oslo.db.sqlalchemy import utils as oslodbutils
from sqlalchemy.exc import OperationalError
from sqlalchemy.exc import ProgrammingError
from sqlalchemy.ext.compiler import compiles
from sqlalchemy import MetaData
from sqlalchemy.sql.expression import UpdateBase
@@ -24,7 +25,6 @@ from sqlalchemy.types import NullType
from nova.db.sqlalchemy import api as db
from nova import exception
from nova.i18n import _
from nova.openstack.common.db.sqlalchemy import utils as oslodbutils
from nova.openstack.common import log as logging


@@ -124,7 +124,10 @@ def create_shadow_table(migrate_engine, table_name=None, table=None,
try:
shadow_table.create()
return shadow_table
except (OperationalError, ProgrammingError):
except (db_exc.DBError, OperationalError):
# NOTE(ekudryashova): At the moment there is a case in oslo.db code,
# which raises unwrapped OperationalError, so we should catch it until
# oslo.db would wraps all such exceptions
LOG.info(repr(shadow_table))
LOG.exception(_('Exception while creating table.'))
raise exception.ShadowTableExists(name=shadow_table_name)

+ 0
- 0
nova/openstack/common/db/__init__.py View File


+ 0
- 162
nova/openstack/common/db/api.py View File

@@ -1,162 +0,0 @@
# Copyright (c) 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

"""Multiple DB API backend support.

A DB backend module should implement a method named 'get_backend' which
takes no arguments. The method can return any object that implements DB
API methods.
"""

import functools
import logging
import threading
import time

from nova.openstack.common.db import exception
from nova.openstack.common.gettextutils import _LE
from nova.openstack.common import importutils


LOG = logging.getLogger(__name__)


def safe_for_db_retry(f):
"""Enable db-retry for decorated function, if config option enabled."""
f.__dict__['enable_retry'] = True
return f


class wrap_db_retry(object):
"""Retry db.api methods, if DBConnectionError() raised

Retry decorated db.api methods. If we enabled `use_db_reconnect`
in config, this decorator will be applied to all db.api functions,
marked with @safe_for_db_retry decorator.
Decorator catchs DBConnectionError() and retries function in a
loop until it succeeds, or until maximum retries count will be reached.
"""

def __init__(self, retry_interval, max_retries, inc_retry_interval,
max_retry_interval):
super(wrap_db_retry, self).__init__()

self.retry_interval = retry_interval
self.max_retries = max_retries
self.inc_retry_interval = inc_retry_interval
self.max_retry_interval = max_retry_interval

def __call__(self, f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
next_interval = self.retry_interval
remaining = self.max_retries

while True:
try:
return f(*args, **kwargs)
except exception.DBConnectionError as e:
if remaining == 0:
LOG.exception(_LE('DB exceeded retry limit.'))
raise exception.DBError(e)
if remaining != -1:
remaining -= 1
LOG.exception(_LE('DB connection error.'))
# NOTE(vsergeyev): We are using patched time module, so
# this effectively yields the execution
# context to another green thread.
time.sleep(next_interval)
if self.inc_retry_interval:
next_interval = min(
next_interval * 2,
self.max_retry_interval
)
return wrapper


class DBAPI(object):
def __init__(self, backend_name, backend_mapping=None, lazy=False,
**kwargs):
"""Initialize the chosen DB API backend.

:param backend_name: name of the backend to load
:type backend_name: str

:param backend_mapping: backend name -> module/class to load mapping
:type backend_mapping: dict

:param lazy: load the DB backend lazily on the first DB API method call
:type lazy: bool

Keyword arguments:

:keyword use_db_reconnect: retry DB transactions on disconnect or not
:type use_db_reconnect: bool

:keyword retry_interval: seconds between transaction retries
:type retry_interval: int

:keyword inc_retry_interval: increase retry interval or not
:type inc_retry_interval: bool

:keyword max_retry_interval: max interval value between retries
:type max_retry_interval: int

:keyword max_retries: max number of retries before an error is raised
:type max_retries: int

"""

self._backend = None
self._backend_name = backend_name
self._backend_mapping = backend_mapping or {}
self._lock = threading.Lock()

if not lazy:
self._load_backend()

self.use_db_reconnect = kwargs.get('use_db_reconnect', False)
self.retry_interval = kwargs.get('retry_interval', 1)
self.inc_retry_interval = kwargs.get('inc_retry_interval', True)
self.max_retry_interval = kwargs.get('max_retry_interval', 10)
self.max_retries = kwargs.get('max_retries', 20)

def _load_backend(self):
with self._lock:
if not self._backend:
# Import the untranslated name if we don't have a mapping
backend_path = self._backend_mapping.get(self._backend_name,
self._backend_name)
backend_mod = importutils.import_module(backend_path)
self._backend = backend_mod.get_backend()

def __getattr__(self, key):
if not self._backend:
self._load_backend()

attr = getattr(self._backend, key)
if not hasattr(attr, '__call__'):
return attr
# NOTE(vsergeyev): If `use_db_reconnect` option is set to True, retry
# DB API methods, decorated with @safe_for_db_retry
# on disconnect.
if self.use_db_reconnect and hasattr(attr, 'enable_retry'):
attr = wrap_db_retry(
retry_interval=self.retry_interval,
max_retries=self.max_retries,
inc_retry_interval=self.inc_retry_interval,
max_retry_interval=self.max_retry_interval)(attr)

return attr

+ 0
- 56
nova/openstack/common/db/exception.py View File

@@ -1,56 +0,0 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

"""DB related custom exceptions."""

import six

from nova.openstack.common.gettextutils import _


class DBError(Exception):
"""Wraps an implementation specific exception."""
def __init__(self, inner_exception=None):
self.inner_exception = inner_exception
super(DBError, self).__init__(six.text_type(inner_exception))


class DBDuplicateEntry(DBError):
"""Wraps an implementation specific exception."""
def __init__(self, columns=[], inner_exception=None):
self.columns = columns
super(DBDuplicateEntry, self).__init__(inner_exception)


class DBDeadlock(DBError):
def __init__(self, inner_exception=None):
super(DBDeadlock, self).__init__(inner_exception)


class DBInvalidUnicodeParameter(Exception):
message = _("Invalid Parameter: "
"Unicode is not supported by the current database.")


class DbMigrationError(DBError):
"""Wraps migration specific exception."""
def __init__(self, message=None):
super(DbMigrationError, self).__init__(message)


class DBConnectionError(DBError):
"""Wraps connection specific exception."""
pass

+ 0
- 171
nova/openstack/common/db/options.py View File

@@ -1,171 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

import copy

from oslo.config import cfg


database_opts = [
cfg.StrOpt('sqlite_db',
deprecated_group='DEFAULT',
default='nova.sqlite',
help='The file name to use with SQLite'),
cfg.BoolOpt('sqlite_synchronous',
deprecated_group='DEFAULT',
default=True,
help='If True, SQLite uses synchronous mode'),
cfg.StrOpt('backend',
default='sqlalchemy',
deprecated_name='db_backend',
deprecated_group='DEFAULT',
help='The backend to use for db'),
cfg.StrOpt('connection',
help='The SQLAlchemy connection string used to connect to the '
'database',
secret=True,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_connection',
group='DATABASE'),
cfg.DeprecatedOpt('connection',
group='sql'), ]),
cfg.StrOpt('mysql_sql_mode',
default='TRADITIONAL',
help='The SQL mode to be used for MySQL sessions. '
'This option, including the default, overrides any '
'server-set SQL mode. To use whatever SQL mode '
'is set by the server configuration, '
'set this to no value. Example: mysql_sql_mode='),
cfg.IntOpt('idle_timeout',
default=3600,
deprecated_opts=[cfg.DeprecatedOpt('sql_idle_timeout',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_idle_timeout',
group='DATABASE'),
cfg.DeprecatedOpt('idle_timeout',
group='sql')],
help='Timeout before idle sql connections are reaped'),
cfg.IntOpt('min_pool_size',
default=1,
deprecated_opts=[cfg.DeprecatedOpt('sql_min_pool_size',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_min_pool_size',
group='DATABASE')],
help='Minimum number of SQL connections to keep open in a '
'pool'),
cfg.IntOpt('max_pool_size',
default=None,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_pool_size',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_max_pool_size',
group='DATABASE')],
help='Maximum number of SQL connections to keep open in a '
'pool'),
cfg.IntOpt('max_retries',
default=10,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_retries',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_max_retries',
group='DATABASE')],
help='Maximum db connection retries during startup. '
'(setting -1 implies an infinite retry count)'),
cfg.IntOpt('retry_interval',
default=10,
deprecated_opts=[cfg.DeprecatedOpt('sql_retry_interval',
group='DEFAULT'),
cfg.DeprecatedOpt('reconnect_interval',
group='DATABASE')],
help='Interval between retries of opening a sql connection'),
cfg.IntOpt('max_overflow',
default=None,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_overflow',
group='DEFAULT'),
cfg.DeprecatedOpt('sqlalchemy_max_overflow',
group='DATABASE')],
help='If set, use this value for max_overflow with sqlalchemy'),
cfg.IntOpt('connection_debug',
default=0,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_debug',
group='DEFAULT')],
help='Verbosity of SQL debugging information. 0=None, '
'100=Everything'),
cfg.BoolOpt('connection_trace',
default=False,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_trace',
group='DEFAULT')],
help='Add python stack traces to SQL as comment strings'),
cfg.IntOpt('pool_timeout',
default=None,
deprecated_opts=[cfg.DeprecatedOpt('sqlalchemy_pool_timeout',
group='DATABASE')],
help='If set, use this value for pool_timeout with sqlalchemy'),
cfg.BoolOpt('use_db_reconnect',
default=False,
help='Enable the experimental use of database reconnect '
'on connection lost'),
cfg.IntOpt('db_retry_interval',
default=1,
help='seconds between db connection retries'),
cfg.BoolOpt('db_inc_retry_interval',
default=True,
help='Whether to increase interval between db connection '
'retries, up to db_max_retry_interval'),
cfg.IntOpt('db_max_retry_interval',
default=10,
help='max seconds between db connection retries, if '
'db_inc_retry_interval is enabled'),
cfg.IntOpt('db_max_retries',
default=20,
help='maximum db connection retries before error is raised. '
'(setting -1 implies an infinite retry count)'),
]

CONF = cfg.CONF
CONF.register_opts(database_opts, 'database')


def set_defaults(sql_connection, sqlite_db, max_pool_size=None,
max_overflow=None, pool_timeout=None):
"""Set defaults for configuration variables."""
cfg.set_defaults(database_opts,
connection=sql_connection,
sqlite_db=sqlite_db)
# Update the QueuePool defaults
if max_pool_size is not None:
cfg.set_defaults(database_opts,
max_pool_size=max_pool_size)
if max_overflow is not None:
cfg.set_defaults(database_opts,
max_overflow=max_overflow)
if pool_timeout is not None:
cfg.set_defaults(database_opts,
pool_timeout=pool_timeout)


def list_opts():
"""Returns a list of oslo.config options available in the library.

The returned list includes all oslo.config options which may be registered
at runtime by the library.

Each element of the list is a tuple. The first element is the name of the
group under which the list of elements in the second element will be
registered. A group name of None corresponds to the [DEFAULT] group in
config files.

The purpose of this is to allow tools like the Oslo sample config file
generator to discover the options exposed to users by this library.

:returns: a list of (group_name, opts) tuples
"""
return [('database', copy.deepcopy(database_opts))]

+ 0
- 0
nova/openstack/common/db/sqlalchemy/__init__.py View File


+ 0
- 278
nova/openstack/common/db/sqlalchemy/migration.py View File

@@ -1,278 +0,0 @@
# coding: utf-8
#
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Base on code in migrate/changeset/databases/sqlite.py which is under
# the following license:
#
# The MIT License
#
# Copyright (c) 2009 Evan Rosson, Jan Dittberner, Domen Kožar
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.

import os
import re

from migrate.changeset import ansisql
from migrate.changeset.databases import sqlite
from migrate import exceptions as versioning_exceptions
from migrate.versioning import api as versioning_api
from migrate.versioning.repository import Repository
import sqlalchemy
from sqlalchemy.schema import UniqueConstraint

from nova.openstack.common.db import exception
from nova.openstack.common.gettextutils import _


def _get_unique_constraints(self, table):
"""Retrieve information about existing unique constraints of the table

This feature is needed for _recreate_table() to work properly.
Unfortunately, it's not available in sqlalchemy 0.7.x/0.8.x.

"""

data = table.metadata.bind.execute(
"""SELECT sql
FROM sqlite_master
WHERE
type='table' AND
name=:table_name""",
table_name=table.name
).fetchone()[0]

UNIQUE_PATTERN = "CONSTRAINT (\w+) UNIQUE \(([^\)]+)\)"
return [
UniqueConstraint(
*[getattr(table.columns, c.strip(' "')) for c in cols.split(",")],
name=name
)
for name, cols in re.findall(UNIQUE_PATTERN, data)
]


def _recreate_table(self, table, column=None, delta=None, omit_uniques=None):
"""Recreate the table properly

Unlike the corresponding original method of sqlalchemy-migrate this one
doesn't drop existing unique constraints when creating a new one.

"""

table_name = self.preparer.format_table(table)

# we remove all indexes so as not to have
# problems during copy and re-create
for index in table.indexes:
index.drop()

# reflect existing unique constraints
for uc in self._get_unique_constraints(table):
table.append_constraint(uc)
# omit given unique constraints when creating a new table if required
table.constraints = set([
cons for cons in table.constraints
if omit_uniques is None or cons.name not in omit_uniques
])

self.append('ALTER TABLE %s RENAME TO migration_tmp' % table_name)
self.execute()

insertion_string = self._modify_table(table, column, delta)

table.create(bind=self.connection)
self.append(insertion_string % {'table_name': table_name})
self.execute()
self.append('DROP TABLE migration_tmp')
self.execute()


def _visit_migrate_unique_constraint(self, *p, **k):
"""Drop the given unique constraint

The corresponding original method of sqlalchemy-migrate just
raises NotImplemented error

"""

self.recreate_table(p[0].table, omit_uniques=[p[0].name])


def patch_migrate():
"""A workaround for SQLite's inability to alter things

SQLite abilities to alter tables are very limited (please read
http://www.sqlite.org/lang_altertable.html for more details).
E. g. one can't drop a column or a constraint in SQLite. The
workaround for this is to recreate the original table omitting
the corresponding constraint (or column).

sqlalchemy-migrate library has recreate_table() method that
implements this workaround, but it does it wrong:

- information about unique constraints of a table
is not retrieved. So if you have a table with one
unique constraint and a migration adding another one
you will end up with a table that has only the
latter unique constraint, and the former will be lost

- dropping of unique constraints is not supported at all

The proper way to fix this is to provide a pull-request to
sqlalchemy-migrate, but the project seems to be dead. So we
can go on with monkey-patching of the lib at least for now.

"""

# this patch is needed to ensure that recreate_table() doesn't drop
# existing unique constraints of the table when creating a new one
helper_cls = sqlite.SQLiteHelper
helper_cls.recreate_table = _recreate_table
helper_cls._get_unique_constraints = _get_unique_constraints

# this patch is needed to be able to drop existing unique constraints
constraint_cls = sqlite.SQLiteConstraintDropper
constraint_cls.visit_migrate_unique_constraint = \
_visit_migrate_unique_constraint
constraint_cls.__bases__ = (ansisql.ANSIColumnDropper,
sqlite.SQLiteConstraintGenerator)


def db_sync(engine, abs_path, version=None, init_version=0, sanity_check=True):
"""Upgrade or downgrade a database.

Function runs the upgrade() or downgrade() functions in change scripts.

:param engine: SQLAlchemy engine instance for a given database
:param abs_path: Absolute path to migrate repository.
:param version: Database will upgrade/downgrade until this version.
If None - database will update to the latest
available version.
:param init_version: Initial database version
:param sanity_check: Require schema sanity checking for all tables
"""

if version is not None:
try:
version = int(version)
except ValueError:
raise exception.DbMigrationError(
message=_("version should be an integer"))

current_version = db_version(engine, abs_path, init_version)
repository = _find_migrate_repo(abs_path)
if sanity_check:
_db_schema_sanity_check(engine)
if version is None or version > current_version:
return versioning_api.upgrade(engine, repository, version)
else:
return versioning_api.downgrade(engine, repository,
version)


def _db_schema_sanity_check(engine):
"""Ensure all database tables were created with required parameters.

:param engine: SQLAlchemy engine instance for a given database

"""

if engine.name == 'mysql':
onlyutf8_sql = ('SELECT TABLE_NAME,TABLE_COLLATION '
'from information_schema.TABLES '
'where TABLE_SCHEMA=%s and '
'TABLE_COLLATION NOT LIKE "%%utf8%%"')

# NOTE(morganfainberg): exclude the sqlalchemy-migrate and alembic
# versioning tables from the tables we need to verify utf8 status on.
# Non-standard table names are not supported.
EXCLUDED_TABLES = ['migrate_version', 'alembic_version']

table_names = [res[0] for res in
engine.execute(onlyutf8_sql, engine.url.database) if
res[0].lower() not in EXCLUDED_TABLES]

if len(table_names) > 0:
raise ValueError(_('Tables "%s" have non utf8 collation, '
'please make sure all tables are CHARSET=utf8'
) % ','.join(table_names))


def db_version(engine, abs_path, init_version):
"""Show the current version of the repository.

:param engine: SQLAlchemy engine instance for a given database
:param abs_path: Absolute path to migrate repository
:param version: Initial database version
"""
repository = _find_migrate_repo(abs_path)
try:
return versioning_api.db_version(engine, repository)
except versioning_exceptions.DatabaseNotControlledError:
meta = sqlalchemy.MetaData()
meta.reflect(bind=engine)
tables = meta.tables
if len(tables) == 0 or 'alembic_version' in tables:
db_version_control(engine, abs_path, version=init_version)
return versioning_api.db_version(engine, repository)
else:
raise exception.DbMigrationError(
message=_(
"The database is not under version control, but has "
"tables. Please stamp the current version of the schema "
"manually."))


def db_version_control(engine, abs_path, version=None):
"""Mark a database as under this repository's version control.

Once a database is under version control, schema changes should
only be done via change scripts in this repository.

:param engine: SQLAlchemy engine instance for a given database
:param abs_path: Absolute path to migrate repository
:param version: Initial database version
"""
repository = _find_migrate_repo(abs_path)
versioning_api.version_control(engine, repository, version)
return version


def _find_migrate_repo(abs_path):
"""Get the project's change script repository

:param abs_path: Absolute path to migrate repository
"""
if not os.path.exists(abs_path):
raise exception.DbMigrationError("Path %s not found" % abs_path)
return Repository(abs_path)

+ 0
- 119
nova/openstack/common/db/sqlalchemy/models.py View File

@@ -1,119 +0,0 @@
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Piston Cloud Computing, Inc.
# Copyright 2012 Cloudscaling Group, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
SQLAlchemy models.
"""

import six

from sqlalchemy import Column, Integer
from sqlalchemy import DateTime
from sqlalchemy.orm import object_mapper

from nova.openstack.common import timeutils


class ModelBase(six.Iterator):
"""Base class for models."""
__table_initialized__ = False

def save(self, session):
"""Save this object."""

# NOTE(boris-42): This part of code should be look like:
# session.add(self)
# session.flush()
# But there is a bug in sqlalchemy and eventlet that
# raises NoneType exception if there is no running
# transaction and rollback is called. As long as
# sqlalchemy has this bug we have to create transaction
# explicitly.
with session.begin(subtransactions=True):
session.add(self)
session.flush()

def __setitem__(self, key, value):
setattr(self, key, value)

def __getitem__(self, key):
return getattr(self, key)

def get(self, key, default=None):
return getattr(self, key, default)

@property
def _extra_keys(self):
"""Specifies custom fields

Subclasses can override this property to return a list
of custom fields that should be included in their dict
representation.

For reference check tests/db/sqlalchemy/test_models.py
"""
return []

def __iter__(self):
columns = list(dict(object_mapper(self).columns).keys())
# NOTE(russellb): Allow models to specify other keys that can be looked
# up, beyond the actual db columns. An example would be the 'name'
# property for an Instance.
columns.extend(self._extra_keys)
self._i = iter(columns)
return self

# In Python 3, __next__() has replaced next().
def __next__(self):
n = six.advance_iterator(self._i)
return n, getattr(self, n)

def next(self):
return self.__next__()

def update(self, values):
"""Make the model object behave like a dict."""
for k, v in six.iteritems(values):
setattr(self, k, v)

def iteritems(self):
"""Make the model object behave like a dict.

Includes attributes from joins.
"""
local = dict(self)
joined = dict([(k, v) for k, v in six.iteritems(self.__dict__)
if not k[0] == '_'])
local.update(joined)
return six.iteritems(local)


class TimestampMixin(object):
created_at = Column(DateTime, default=lambda: timeutils.utcnow())
updated_at = Column(DateTime, onupdate=lambda: timeutils.utcnow())


class SoftDeleteMixin(object):
deleted_at = Column(DateTime)
deleted = Column(Integer, default=0)

def soft_delete(self, session):
"""Mark this object as deleted."""
self.deleted = self.id
self.deleted_at = timeutils.utcnow()
self.save(session=session)

+ 0
- 157
nova/openstack/common/db/sqlalchemy/provision.py View File

@@ -1,157 +0,0 @@
# Copyright 2013 Mirantis.inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

"""Provision test environment for specific DB backends"""

import argparse
import logging
import os
import random
import string

from six import moves
import sqlalchemy

from nova.openstack.common.db import exception as exc


LOG = logging.getLogger(__name__)


def get_engine(uri):
"""Engine creation

Call the function without arguments to get admin connection. Admin
connection required to create temporary user and database for each
particular test. Otherwise use existing connection to recreate connection
to the temporary database.
"""
return sqlalchemy.create_engine(uri, poolclass=sqlalchemy.pool.NullPool)


def _execute_sql(engine, sql, driver):
"""Initialize connection, execute sql query and close it."""
try:
with engine.connect() as conn:
if driver == 'postgresql':
conn.connection.set_isolation_level(0)
for s in sql:
conn.execute(s)
except sqlalchemy.exc.OperationalError:
msg = ('%s does not match database admin '
'credentials or database does not exist.')
LOG.exception(msg % engine.url)
raise exc.DBConnectionError(msg % engine.url)


def create_database(engine):
"""Provide temporary user and database for each particular test."""
driver = engine.name

auth = {
'database': ''.join(random.choice(string.ascii_lowercase)
for i in moves.range(10)),
'user': engine.url.username,
'passwd': engine.url.password,
}

sqls = [
"drop database if exists %(database)s;",
"create database %(database)s;"
]

if driver == 'sqlite':
return 'sqlite:////tmp/%s' % auth['database']
elif driver in ['mysql', 'postgresql']:
sql_query = map(lambda x: x % auth, sqls)
_execute_sql(engine, sql_query, driver)
else:
raise ValueError('Unsupported RDBMS %s' % driver)

params = auth.copy()
params['backend'] = driver
return "%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s" % params


def drop_database(admin_engine, current_uri):
"""Drop temporary database and user after each particular test."""

engine = get_engine(current_uri)
driver = engine.name
auth = {'database': engine.url.database, 'user': engine.url.username}

if driver == 'sqlite':
try:
os.remove(auth['database'])
except OSError:
pass
elif driver in ['mysql', 'postgresql']:
sql = "drop database if exists %(database)s;"
_execute_sql(admin_engine, [sql % auth], driver)
else:
raise ValueError('Unsupported RDBMS %s' % driver)


def main():
"""Controller to handle commands

::create: Create test user and database with random names.
::drop: Drop user and database created by previous command.
"""
parser = argparse.ArgumentParser(
description='Controller to handle database creation and dropping'
' commands.',
epilog='Under normal circumstances is not used directly.'
' Used in .testr.conf to automate test database creation'
' and dropping processes.')
subparsers = parser.add_subparsers(
help='Subcommands to manipulate temporary test databases.')

create = subparsers.add_parser(
'create',
help='Create temporary test '
'databases and users.')
create.set_defaults(which='create')
create.add_argument(
'instances_count',
type=int,
help='Number of databases to create.')

drop = subparsers.add_parser(
'drop',
help='Drop temporary test databases and users.')
drop.set_defaults(which='drop')
drop.add_argument(
'instances',
nargs='+',
help='List of databases uri to be dropped.')

args = parser.parse_args()

connection_string = os.getenv('OS_TEST_DBAPI_ADMIN_CONNECTION',
'sqlite://')
engine = get_engine(connection_string)
which = args.which

if which == "create":
for i in range(int(args.instances_count)):
print(create_database(engine))
elif which == "drop":
for db in args.instances:
drop_database(engine, db)


if __name__ == "__main__":
main()

+ 0
- 904
nova/openstack/common/db/sqlalchemy/session.py View File

@@ -1,904 +0,0 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

"""Session Handling for SQLAlchemy backend.

Recommended ways to use sessions within this framework:

* Don't use them explicitly; this is like running with ``AUTOCOMMIT=1``.
`model_query()` will implicitly use a session when called without one
supplied. This is the ideal situation because it will allow queries
to be automatically retried if the database connection is interrupted.

.. note:: Automatic retry will be enabled in a future patch.

It is generally fine to issue several queries in a row like this. Even though
they may be run in separate transactions and/or separate sessions, each one
will see the data from the prior calls. If needed, undo- or rollback-like
functionality should be handled at a logical level. For an example, look at
the code around quotas and `reservation_rollback()`.

Examples:

.. code:: python

def get_foo(context, foo):
return (model_query(context, models.Foo).
filter_by(foo=foo).
first())

def update_foo(context, id, newfoo):
(model_query(context, models.Foo).
filter_by(id=id).
update({'foo': newfoo}))

def create_foo(context, values):
foo_ref = models.Foo()
foo_ref.update(values)
foo_ref.save()
return foo_ref


* Within the scope of a single method, keep all the reads and writes within
the context managed by a single session. In this way, the session's
`__exit__` handler will take care of calling `flush()` and `commit()` for
you. If using this approach, you should not explicitly call `flush()` or
`commit()`. Any error within the context of the session will cause the
session to emit a `ROLLBACK`. Database errors like `IntegrityError` will be
raised in `session`'s `__exit__` handler, and any try/except within the
context managed by `session` will not be triggered. And catching other
non-database errors in the session will not trigger the ROLLBACK, so
exception handlers should always be outside the session, unless the
developer wants to do a partial commit on purpose. If the connection is
dropped before this is possible, the database will implicitly roll back the
transaction.

.. note:: Statements in the session scope will not be automatically retried.

If you create models within the session, they need to be added, but you
do not need to call `model.save()`:

.. code:: python

def create_many_foo(context, foos):
session = sessionmaker()
with session.begin():
for foo in foos:
foo_ref = models.Foo()
foo_ref.update(foo)
session.add(foo_ref)

def update_bar(context, foo_id, newbar):
session = sessionmaker()
with session.begin():
foo_ref = (model_query(context, models.Foo, session).
filter_by(id=foo_id).
first())
(model_query(context, models.Bar, session).
filter_by(id=foo_ref['bar_id']).
update({'bar': newbar}))

.. note:: `update_bar` is a trivially simple example of using
``with session.begin``. Whereas `create_many_foo` is a good example of
when a transaction is needed, it is always best to use as few queries as
possible.

The two queries in `update_bar` can be better expressed using a single query
which avoids the need for an explicit transaction. It can be expressed like
so:

.. code:: python

def update_bar(context, foo_id, newbar):
subq = (model_query(context, models.Foo.id).
filter_by(id=foo_id).
limit(1).
subquery())
(model_query(context, models.Bar).
filter_by(id=subq.as_scalar()).
update({'bar': newbar}))

For reference, this emits approximately the following SQL statement:

.. code:: sql

UPDATE bar SET bar = ${newbar}
WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1);

.. note:: `create_duplicate_foo` is a trivially simple example of catching an
exception while using ``with session.begin``. Here create two duplicate
instances with same primary key, must catch the exception out of context
managed by a single session:

.. code:: python

def create_duplicate_foo(context):
foo1 = models.Foo()
foo2 = models.Foo()
foo1.id = foo2.id = 1
session = sessionmaker()
try:
with session.begin():
session.add(foo1)
session.add(foo2)
except exception.DBDuplicateEntry as e:
handle_error(e)

* Passing an active session between methods. Sessions should only be passed
to private methods. The private method must use a subtransaction; otherwise
SQLAlchemy will throw an error when you call `session.begin()` on an existing
transaction. Public methods should not accept a session parameter and should
not be involved in sessions within the caller's scope.

Note that this incurs more overhead in SQLAlchemy than the above means
due to nesting transactions, and it is not possible to implicitly retry
failed database operations when using this approach.

This also makes code somewhat more difficult to read and debug, because a
single database transaction spans more than one method. Error handling
becomes less clear in this situation. When this is needed for code clarity,
it should be clearly documented.

.. code:: python

def myfunc(foo):
session = sessionmaker()
with session.begin():
# do some database things
bar = _private_func(foo, session)
return bar

def _private_func(foo, session=None):
if not session:
session = sessionmaker()
with session.begin(subtransaction=True):
# do some other database things
return bar


There are some things which it is best to avoid:

* Don't keep a transaction open any longer than necessary.

This means that your ``with session.begin()`` block should be as short
as possible, while still containing all the related calls for that
transaction.

* Avoid ``with_lockmode('UPDATE')`` when possible.

In MySQL/InnoDB, when a ``SELECT ... FOR UPDATE`` query does not match
any rows, it will take a gap-lock. This is a form of write-lock on the
"gap" where no rows exist, and prevents any other writes to that space.
This can effectively prevent any INSERT into a table by locking the gap
at the end of the index. Similar problems will occur if the SELECT FOR UPDATE
has an overly broad WHERE clause, or doesn't properly use an index.

One idea proposed at ODS Fall '12 was to use a normal SELECT to test the
number of rows matching a query, and if only one row is returned,
then issue the SELECT FOR UPDATE.

The better long-term solution is to use
``INSERT .. ON DUPLICATE KEY UPDATE``.
However, this can not be done until the "deleted" columns are removed and
proper UNIQUE constraints are added to the tables.


Enabling soft deletes:

* To use/enable soft-deletes, the `SoftDeleteMixin` must be added
to your model class. For example:

.. code:: python

class NovaBase(models.SoftDeleteMixin, models.ModelBase):
pass


Efficient use of soft deletes:

* There are two possible ways to mark a record as deleted:
`model.soft_delete()` and `query.soft_delete()`.

The `model.soft_delete()` method works with a single already-fetched entry.
`query.soft_delete()` makes only one db request for all entries that
correspond to the query.

* In almost all cases you should use `query.soft_delete()`. Some examples:

.. code:: python

def soft_delete_bar():
count = model_query(BarModel).find(some_condition).soft_delete()
if count == 0:
raise Exception("0 entries were soft deleted")

def complex_soft_delete_with_synchronization_bar(session=None):
if session is None:
session = sessionmaker()
with session.begin(subtransactions=True):
count = (model_query(BarModel).
find(some_condition).
soft_delete(synchronize_session=True))
# Here synchronize_session is required, because we
# don't know what is going on in outer session.
if count == 0:
raise Exception("0 entries were soft deleted")

* There is only one situation where `model.soft_delete()` is appropriate: when
you fetch a single record, work with it, and mark it as deleted in the same
transaction.

.. code:: python

def soft_delete_bar_model():
session = sessionmaker()
with session.begin():
bar_ref = model_query(BarModel).find(some_condition).first()
# Work with bar_ref
bar_ref.soft_delete(session=session)

However, if you need to work with all entries that correspond to query and
then soft delete them you should use the `query.soft_delete()` method:

.. code:: python

def soft_delete_multi_models():
session = sessionmaker()
with session.begin():
query = (model_query(BarModel, session=session).
find(some_condition))
model_refs = query.all()
# Work with model_refs
query.soft_delete(synchronize_session=False)
# synchronize_session=False should be set if there is no outer
# session and these entries are not used after this.

When working with many rows, it is very important to use query.soft_delete,
which issues a single query. Using `model.soft_delete()`, as in the following
example, is very inefficient.

.. code:: python

for bar_ref in bar_refs:
bar_ref.soft_delete(session=session)
# This will produce count(bar_refs) db requests.

"""

import functools
import logging
import re
import time

import six
from sqlalchemy import exc as sqla_exc
from sqlalchemy.interfaces import PoolListener
import sqlalchemy.orm
from sqlalchemy.pool import NullPool, StaticPool
from sqlalchemy.sql.expression import literal_column

from nova.openstack.common.db import exception
from nova.openstack.common.gettextutils import _LE, _LW
from nova.openstack.common import timeutils


LOG = logging.getLogger(__name__)


class SqliteForeignKeysListener(PoolListener):
"""Ensures that the foreign key constraints are enforced in SQLite.

The foreign key constraints are disabled by default in SQLite,
so the foreign key constraints will be enabled here for every
database connection
"""
def connect(self, dbapi_con, con_record):
dbapi_con.execute('pragma foreign_keys=ON')


# note(boris-42): In current versions of DB backends unique constraint
# violation messages follow the structure:
#
# sqlite:
# 1 column - (IntegrityError) column c1 is not unique
# N columns - (IntegrityError) column c1, c2, ..., N are not unique
#
# sqlite since 3.7.16:
# 1 column - (IntegrityError) UNIQUE constraint failed: tbl.k1
#
# N columns - (IntegrityError) UNIQUE constraint failed: tbl.k1, tbl.k2
#
# postgres:
# 1 column - (IntegrityError) duplicate key value violates unique
# constraint "users_c1_key"
# N columns - (IntegrityError) duplicate key value violates unique
# constraint "name_of_our_constraint"
#
# mysql:
# 1 column - (IntegrityError) (1062, "Duplicate entry 'value_of_c1' for key
# 'c1'")
# N columns - (IntegrityError) (1062, "Duplicate entry 'values joined
# with -' for key 'name_of_our_constraint'")
#
# ibm_db_sa:
# N columns - (IntegrityError) SQL0803N One or more values in the INSERT
# statement, UPDATE statement, or foreign key update caused by a
# DELETE statement are not valid because the primary key, unique
# constraint or unique index identified by "2" constrains table
# "NOVA.KEY_PAIRS" from having duplicate values for the index
# key.
_DUP_KEY_RE_DB = {
"sqlite": (re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"),
re.compile(r"^.*UNIQUE\s+constraint\s+failed:\s+(.+)$")),
"postgresql": (re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"),),
"mysql": (re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$"),),
"ibm_db_sa": (re.compile(r"^.*SQL0803N.*$"),),
}


def _raise_if_duplicate_entry_error(integrity_error, engine_name):
"""Raise exception if two entries are duplicated.

In this function will be raised DBDuplicateEntry exception if integrity
error wrap unique constraint violation.
"""

def get_columns_from_uniq_cons_or_name(columns):
# note(vsergeyev): UniqueConstraint name convention: "uniq_t0c10c2"
# where `t` it is table name and columns `c1`, `c2`
# are in UniqueConstraint.
uniqbase = "uniq_"
if not columns.startswith(uniqbase):
if engine_name == "postgresql":
return [columns[columns.index("_") + 1:columns.rindex("_")]]
return [columns]
return columns[len(uniqbase):].split("0")[1:]

if engine_name not in ("ibm_db_sa", "mysql", "sqlite", "postgresql"):
return

# FIXME(johannes): The usage of the .message attribute has been
# deprecated since Python 2.6. However, the exceptions raised by
# SQLAlchemy can differ when using unicode() and accessing .message.
# An audit across all three supported engines will be necessary to
# ensure there are no regressions.
for pattern in _DUP_KEY_RE_DB[engine_name]:
match = pattern.match(integrity_error.message)
if match:
break
else:
return

# NOTE(mriedem): The ibm_db_sa integrity error message doesn't provide the
# columns so we have to omit that from the DBDuplicateEntry error.
columns = ''

if engine_name != 'ibm_db_sa':
columns = match.group(1)

if engine_name == "sqlite":
columns = [c.split('.')[-1] for c in columns.strip().split(", ")]
else:
columns = get_columns_from_uniq_cons_or_name(columns)
raise exception.DBDuplicateEntry(columns, integrity_error)


# NOTE(comstud): In current versions of DB backends, Deadlock violation
# messages follow the structure:
#
# mysql:
# (OperationalError) (1213, 'Deadlock found when trying to get lock; try '
# 'restarting transaction') <query_str> <query_args>
_DEADLOCK_RE_DB = {
"mysql": re.compile(r"^.*\(1213, 'Deadlock.*")
}


def _raise_if_deadlock_error(operational_error, engine_name):
"""Raise exception on deadlock condition.

Raise DBDeadlock exception if OperationalError contains a Deadlock
condition.
"""
re = _DEADLOCK_RE_DB.get(engine_name)
if re is None:
return
# FIXME(johannes): The usage of the .message attribute has been
# deprecated since Python 2.6. However, the exceptions raised by
# SQLAlchemy can differ when using unicode() and accessing .message.
# An audit across all three supported engines will be necessary to
# ensure there are no regressions.
m = re.match(operational_error.message)
if not m:
return
raise exception.DBDeadlock(operational_error)


def _wrap_db_error(f):
@functools.wraps(f)
def _wrap(self, *args, **kwargs):
try:
assert issubclass(
self.__class__, sqlalchemy.orm.session.Session
), ('_wrap_db_error() can only be applied to methods of '
'subclasses of sqlalchemy.orm.session.Session.')

return f(self, *args, **kwargs)
except UnicodeEncodeError:
raise exception.DBInvalidUnicodeParameter()
except sqla_exc.OperationalError as e:
_raise_if_db_connection_lost(e, self.bind)
_raise_if_deadlock_error(e, self.bind.dialect.name)
# NOTE(comstud): A lot of code is checking for OperationalError
# so let's not wrap it for now.
raise
# note(boris-42): We should catch unique constraint violation and
# wrap it by our own DBDuplicateEntry exception. Unique constraint
# violation is wrapped by IntegrityError.
except sqla_exc.IntegrityError as e:
# note(boris-42): SqlAlchemy doesn't unify errors from different
# DBs so we must do this. Also in some tables (for example
# instance_types) there are more than one unique constraint. This
# means we should get names of columns, which values violate
# unique constraint, from error message.
_raise_if_duplicate_entry_error(e, self.bind.dialect.name)
raise exception.DBError(e)
except Exception as e:
LOG.exception(_LE('DB exception wrapped.'))
raise exception.DBError(e)
return _wrap


def _synchronous_switch_listener(dbapi_conn, connection_rec):
"""Switch sqlite connections to non-synchronous mode."""
dbapi_conn.execute("PRAGMA synchronous = OFF")


def _add_regexp_listener(dbapi_con, con_record):
"""Add REGEXP function to sqlite connections."""

def regexp(expr, item):
reg = re.compile(expr)
return reg.search(six.text_type(item)) is not None
dbapi_con.create_function('regexp', 2, regexp)


def _thread_yield(dbapi_con, con_record):
"""Ensure other greenthreads get a chance to be executed.

If we use eventlet.monkey_patch(), eventlet.greenthread.sleep(0) will
execute instead of time.sleep(0).
Force a context switch. With common database backends (eg MySQLdb and
sqlite), there is no implicit yield caused by network I/O since they are
implemented by C libraries that eventlet cannot monkey patch.
"""
time.sleep(0)


def _ping_listener(engine, dbapi_conn, connection_rec, connection_proxy):
"""Ensures that MySQL, PostgreSQL or DB2 connections are alive.

Borrowed from:
http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f
"""
cursor = dbapi_conn.cursor()
try:
ping_sql = 'select 1'
if engine.name == 'ibm_db_sa':
# DB2 requires a table expression
ping_sql = 'select 1 from (values (1)) AS t1'
cursor.execute(ping_sql)
except Exception as ex:
if engine.dialect.is_disconnect(ex, dbapi_conn, cursor):
msg = _LW('Database server has gone away: %s') % ex
LOG.warning(msg)

# if the database server has gone away, all connections in the pool
# have become invalid and we can safely close all of them here,
# rather than waste time on checking of every single connection
engine.dispose()

# this will be handled by SQLAlchemy and will force it to create
# a new connection and retry the original action
raise sqla_exc.DisconnectionError(msg)
else:
raise


def _set_session_sql_mode(dbapi_con, connection_rec, sql_mode=None):
"""Set the sql_mode session variable.

MySQL supports several server modes. The default is None, but sessions
may choose to enable server modes like TRADITIONAL, ANSI,
several STRICT_* modes and others.

Note: passing in '' (empty string) for sql_mode clears
the SQL mode for the session, overriding a potentially set
server default.
"""

cursor = dbapi_con.cursor()
cursor.execute("SET SESSION sql_mode = %s", [sql_mode])


def _mysql_get_effective_sql_mode(engine):
"""Returns the effective SQL mode for connections from the engine pool.

Returns ``None`` if the mode isn't available, otherwise returns the mode.

"""
# Get the real effective SQL mode. Even when unset by
# our own config, the server may still be operating in a specific
# SQL mode as set by the server configuration.
# Also note that the checkout listener will be called on execute to
# set the mode if it's registered.
row = engine.execute("SHOW VARIABLES LIKE 'sql_mode'").fetchone()
if row is None:
return
return row[1]


def _mysql_check_effective_sql_mode(engine):
"""Logs a message based on the effective SQL mode for MySQL connections."""
realmode = _mysql_get_effective_sql_mode(engine)

if realmode is None:
LOG.warning(_LW('Unable to detect effective SQL mode'))
return

LOG.debug('MySQL server mode set to %s', realmode)
# 'TRADITIONAL' mode enables several other modes, so
# we need a substring match here
if not ('TRADITIONAL' in realmode.upper() or
'STRICT_ALL_TABLES' in realmode.upper()):
LOG.warning(_LW("MySQL SQL mode is '%s', "
"consider enabling TRADITIONAL or STRICT_ALL_TABLES"),
realmode)


def _mysql_set_mode_callback(engine, sql_mode):
if sql_mode is not None:
mode_callback = functools.partial(_set_session_sql_mode,
sql_mode=sql_mode)
sqlalchemy.event.listen(engine, 'connect', mode_callback)
_mysql_check_effective_sql_mode(engine)


def _is_db_connection_error(args):
"""Return True if error in connecting to db."""
# NOTE(adam_g): This is currently MySQL specific and needs to be extended
# to support Postgres and others.
# For the db2, the error code is -30081 since the db2 is still not ready
conn_err_codes = ('2002', '2003', '2006', '2013', '-30081')
for err_code in conn_err_codes:
if args.find(err_code) != -1:
return True
return False


def _raise_if_db_connection_lost(error, engine):
# NOTE(vsergeyev): Function is_disconnect(e, connection, cursor)
# requires connection and cursor in incoming parameters,
# but we have no possibility to create connection if DB
# is not available, so in such case reconnect fails.
# But is_disconnect() ignores these parameters, so it
# makes sense to pass to function None as placeholder
# instead of connection and cursor.
if engine.dialect.is_disconnect(error, None, None):
raise exception.DBConnectionError(error)


def create_engine(sql_connection, sqlite_fk=False, mysql_sql_mode=None,
idle_timeout=3600,
connection_debug=0, max_pool_size=None, max_overflow=None,
pool_timeout=None, sqlite_synchronous=True,
connection_trace=False, max_retries=10, retry_interval=10):
"""Return a new SQLAlchemy engine."""

connection_dict = sqlalchemy.engine.url.make_url(sql_connection)

engine_args = {
"pool_recycle": idle_timeout,
'convert_unicode': True,
}

logger = logging.getLogger('sqlalchemy.engine')

# Map SQL debug level to Python log level
if connection_debug >= 100:
logger.setLevel(logging.DEBUG)
elif connection_debug >= 50:
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.WARNING)

if "sqlite" in connection_dict.drivername:
if sqlite_fk:
engine_args["listeners"] = [SqliteForeignKeysListener()]
engine_args["poolclass"] = NullPool

if sql_connection == "sqlite://":
engine_args["poolclass"] = StaticPool
engine_args["connect_args"] = {'check_same_thread': False}
else:
if max_pool_size is not None:
engine_args['pool_size'] = max_pool_size
if max_overflow is not None:
engine_args['max_overflow'] = max_overflow
if pool_timeout is not None:
engine_args['pool_timeout'] = pool_timeout

engine = sqlalchemy.create_engine(sql_connection, **engine_args)

sqlalchemy.event.listen(engine, 'checkin', _thread_yield)

if engine.name in ('ibm_db_sa', 'mysql', 'postgresql'):
ping_callback = functools.partial(_ping_listener, engine)
sqlalchemy.event.listen(engine, 'checkout', ping_callback)
if engine.name == 'mysql':
if mysql_sql_mode:
_mysql_set_mode_callback(engine, mysql_sql_mode)
elif 'sqlite' in connection_dict.drivername:
if not sqlite_synchronous:
sqlalchemy.event.listen(engine, 'connect',
_synchronous_switch_listener)
sqlalchemy.event.listen(engine, 'connect', _add_regexp_listener)

if connection_trace and engine.dialect.dbapi.__name__ == 'MySQLdb':
_patch_mysqldb_with_stacktrace_comments()

try:
engine.connect()
except sqla_exc.OperationalError as e:
if not _is_db_connection_error(e.args[0]):
raise

remaining = max_retries
if remaining == -1:
remaining = 'infinite'