db: Use module-level imports for sqlalchemy (for real)

Change If90d9295b231166a28c2cc350d324691821a696b kicked off this effort
but only change the migrations. This change completes the job.

Change-Id: Ic0f2c326ebce8d7c89b0debf5225cbe471daca03
Signed-off-by: Stephen Finucane <stephenfin@redhat.com>
This commit is contained in:
Stephen Finucane 2021-06-15 15:05:17 +01:00
parent e7a7fd51d1
commit 43de2421b3
15 changed files with 1128 additions and 1098 deletions

View File

@ -25,8 +25,8 @@ from oslo_config import cfg
from oslo_upgradecheck import common_checks from oslo_upgradecheck import common_checks
from oslo_upgradecheck import upgradecheck from oslo_upgradecheck import upgradecheck
import pkg_resources import pkg_resources
import sqlalchemy as sa
from sqlalchemy import func as sqlfunc from sqlalchemy import func as sqlfunc
from sqlalchemy import MetaData, Table, select
from nova.cmd import common as cmd_common from nova.cmd import common as cmd_common
import nova.conf import nova.conf
@ -86,10 +86,10 @@ class UpgradeCommands(upgradecheck.UpgradeCommands):
# table, or by only counting compute nodes with a service version of at # table, or by only counting compute nodes with a service version of at
# least 15 which was the highest service version when Newton was # least 15 which was the highest service version when Newton was
# released. # released.
meta = MetaData(bind=db_session.get_engine(context=context)) meta = sa.MetaData(bind=db_session.get_engine(context=context))
compute_nodes = Table('compute_nodes', meta, autoload=True) compute_nodes = sa.Table('compute_nodes', meta, autoload=True)
return select([sqlfunc.count()]).select_from(compute_nodes).where( return sa.select([sqlfunc.count()]).select_from(compute_nodes).where(
compute_nodes.c.deleted == 0).scalar() compute_nodes.c.deleted == 0).scalar()
def _check_cellsv2(self): def _check_cellsv2(self):
"""Checks to see if cells v2 has been setup. """Checks to see if cells v2 has been setup.
@ -102,7 +102,7 @@ class UpgradeCommands(upgradecheck.UpgradeCommands):
this on an initial install. This also has to be careful about checking this on an initial install. This also has to be careful about checking
for compute nodes if there are no host mappings on a fresh install. for compute nodes if there are no host mappings on a fresh install.
""" """
meta = MetaData() meta = sa.MetaData()
meta.bind = db_session.get_api_engine() meta.bind = db_session.get_api_engine()
cell_mappings = self._get_cell_mappings() cell_mappings = self._get_cell_mappings()
@ -122,8 +122,9 @@ class UpgradeCommands(upgradecheck.UpgradeCommands):
'retry.') 'retry.')
return upgradecheck.Result(upgradecheck.Code.FAILURE, msg) return upgradecheck.Result(upgradecheck.Code.FAILURE, msg)
host_mappings = Table('host_mappings', meta, autoload=True) host_mappings = sa.Table('host_mappings', meta, autoload=True)
count = select([sqlfunc.count()]).select_from(host_mappings).scalar() count = sa.select([sqlfunc.count()]).select_from(host_mappings)\
.scalar()
if count == 0: if count == 0:
# This may be a fresh install in which case there may not be any # This may be a fresh install in which case there may not be any
# compute_nodes in the cell database if the nova-compute service # compute_nodes in the cell database if the nova-compute service

View File

@ -35,26 +35,12 @@ from oslo_utils import importutils
from oslo_utils import timeutils from oslo_utils import timeutils
from oslo_utils import uuidutils from oslo_utils import uuidutils
import sqlalchemy as sa import sqlalchemy as sa
from sqlalchemy import and_ from sqlalchemy import exc as sqla_exc
from sqlalchemy import Boolean from sqlalchemy import orm
from sqlalchemy.exc import NoSuchTableError from sqlalchemy import schema
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import or_
from sqlalchemy.orm import aliased
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import noload
from sqlalchemy.orm import subqueryload
from sqlalchemy.orm import undefer
from sqlalchemy.schema import Table
from sqlalchemy import sql from sqlalchemy import sql
from sqlalchemy.sql.expression import asc from sqlalchemy.sql import expression
from sqlalchemy.sql.expression import cast
from sqlalchemy.sql.expression import desc
from sqlalchemy.sql import false
from sqlalchemy.sql import func from sqlalchemy.sql import func
from sqlalchemy.sql import null
from sqlalchemy.sql import true
from nova import block_device from nova import block_device
from nova.compute import task_states from nova.compute import task_states
@ -94,7 +80,7 @@ def _context_manager_from_context(context):
def _joinedload_all(column): def _joinedload_all(column):
elements = column.split('.') elements = column.split('.')
joined = joinedload(elements.pop(0)) joined = orm.joinedload(elements.pop(0))
for element in elements: for element in elements:
joined = joined.joinedload(element) joined = joined.joinedload(element)
@ -293,9 +279,10 @@ def model_query(
# us to return both our projects and unowned projects. # us to return both our projects and unowned projects.
if nova.context.is_user_context(context) and project_only: if nova.context.is_user_context(context) and project_only:
if project_only == 'allow_none': if project_only == 'allow_none':
query = query.\ query = query.filter(sql.or_(
filter(or_(model.project_id == context.project_id, model.project_id == context.project_id,
model.project_id == null())) model.project_id == sql.null()
))
else: else:
query = query.filter_by(project_id=context.project_id) query = query.filter_by(project_id=context.project_id)
@ -367,7 +354,7 @@ class EqualityCondition(object):
def clauses(self, field): def clauses(self, field):
# method signature requires us to return an iterable even if for OR # method signature requires us to return an iterable even if for OR
# operator this will actually be a single clause # operator this will actually be a single clause
return [or_(*[field == value for value in self.values])] return [sql.or_(*[field == value for value in self.values])]
class InequalityCondition(object): class InequalityCondition(object):
@ -395,9 +382,10 @@ def service_destroy(context, service_id):
# TODO(sbauza): Remove the service_id filter in a later release # TODO(sbauza): Remove the service_id filter in a later release
# once we are sure that all compute nodes report the host field # once we are sure that all compute nodes report the host field
model_query(context, models.ComputeNode).\ model_query(context, models.ComputeNode).\
filter(or_(models.ComputeNode.service_id == service_id, filter(sql.or_(
models.ComputeNode.host == service['host'])).\ models.ComputeNode.service_id == service_id,
soft_delete(synchronize_session=False) models.ComputeNode.host == service['host'])).\
soft_delete(synchronize_session=False)
@pick_context_manager_reader @pick_context_manager_reader
@ -434,7 +422,7 @@ def service_get_minimum_version(context, binaries):
func.min(models.Service.version)).\ func.min(models.Service.version)).\
filter(models.Service.binary.in_(binaries)).\ filter(models.Service.binary.in_(binaries)).\
filter(models.Service.deleted == 0).\ filter(models.Service.deleted == 0).\
filter(models.Service.forced_down == false()).\ filter(models.Service.forced_down == sql.false()).\
group_by(models.Service.binary) group_by(models.Service.binary)
return dict(min_versions) return dict(min_versions)
@ -615,7 +603,7 @@ def _compute_node_select(context, filters=None, limit=None, marker=None):
select = select.limit(limit) select = select.limit(limit)
# Explicitly order by id, so we're not dependent on the native sort # Explicitly order by id, so we're not dependent on the native sort
# order of the underlying DB. # order of the underlying DB.
select = select.order_by(asc("id")) select = select.order_by(expression.asc("id"))
return select return select
@ -921,7 +909,7 @@ def compute_node_statistics(context):
inner_sel.c.host == services_tbl.c.host, inner_sel.c.host == services_tbl.c.host,
inner_sel.c.service_id == services_tbl.c.id inner_sel.c.service_id == services_tbl.c.id
), ),
services_tbl.c.disabled == false(), services_tbl.c.disabled == sql.false(),
services_tbl.c.binary == 'nova-compute', services_tbl.c.binary == 'nova-compute',
services_tbl.c.deleted == 0 services_tbl.c.deleted == 0
) )
@ -1106,9 +1094,9 @@ def virtual_interface_get_by_instance(context, instance_uuid):
:param instance_uuid: UUID of the instance to filter on. :param instance_uuid: UUID of the instance to filter on.
""" """
vif_refs = _virtual_interface_query(context).\ vif_refs = _virtual_interface_query(context).\
filter_by(instance_uuid=instance_uuid).\ filter_by(instance_uuid=instance_uuid).\
order_by(asc("created_at"), asc("id")).\ order_by(expression.asc("created_at"), expression.asc("id")).\
all() all()
return vif_refs return vif_refs
@ -1414,7 +1402,7 @@ def instance_get(context, instance_id, columns_to_join=None):
def _build_instance_get(context, columns_to_join=None): def _build_instance_get(context, columns_to_join=None):
query = model_query(context, models.Instance, project_only=True).\ query = model_query(context, models.Instance, project_only=True).\
options(_joinedload_all('security_groups.rules')).\ options(_joinedload_all('security_groups.rules')).\
options(joinedload('info_cache')) options(orm.joinedload('info_cache'))
if columns_to_join is None: if columns_to_join is None:
columns_to_join = ['metadata', 'system_metadata'] columns_to_join = ['metadata', 'system_metadata']
for column in columns_to_join: for column in columns_to_join:
@ -1422,7 +1410,7 @@ def _build_instance_get(context, columns_to_join=None):
# Already always joined above # Already always joined above
continue continue
if 'extra.' in column: if 'extra.' in column:
query = query.options(undefer(column)) query = query.options(orm.undefer(column))
elif column in ['metadata', 'system_metadata']: elif column in ['metadata', 'system_metadata']:
# NOTE(melwitt): We use subqueryload() instead of joinedload() for # NOTE(melwitt): We use subqueryload() instead of joinedload() for
# metadata and system_metadata because of the one-to-many # metadata and system_metadata because of the one-to-many
@ -1432,13 +1420,13 @@ def _build_instance_get(context, columns_to_join=None):
# in a large data transfer. Instead, the subqueryload() will # in a large data transfer. Instead, the subqueryload() will
# perform additional queries to obtain metadata and system_metadata # perform additional queries to obtain metadata and system_metadata
# for the instance. # for the instance.
query = query.options(subqueryload(column)) query = query.options(orm.subqueryload(column))
else: else:
query = query.options(joinedload(column)) query = query.options(orm.joinedload(column))
# NOTE(alaski) Stop lazy loading of columns not needed. # NOTE(alaski) Stop lazy loading of columns not needed.
for col in ['metadata', 'system_metadata']: for col in ['metadata', 'system_metadata']:
if col not in columns_to_join: if col not in columns_to_join:
query = query.options(noload(col)) query = query.options(orm.noload(col))
# NOTE(melwitt): We need to use order_by(<unique column>) so that the # NOTE(melwitt): We need to use order_by(<unique column>) so that the
# additional queries emitted by subqueryload() include the same ordering as # additional queries emitted by subqueryload() include the same ordering as
# used by the parent query. # used by the parent query.
@ -1531,7 +1519,7 @@ def instance_get_all(context, columns_to_join=None):
_manual_join_columns(columns_to_join)) _manual_join_columns(columns_to_join))
query = model_query(context, models.Instance) query = model_query(context, models.Instance)
for column in columns_to_join_new: for column in columns_to_join_new:
query = query.options(joinedload(column)) query = query.options(orm.joinedload(column))
if not context.is_admin: if not context.is_admin:
# If we're not admin context, add appropriate filter.. # If we're not admin context, add appropriate filter..
if context.project_id: if context.project_id:
@ -1673,9 +1661,9 @@ def instance_get_all_by_filters_sort(context, filters, limit=None, marker=None,
query_prefix = context.session.query(models.Instance) query_prefix = context.session.query(models.Instance)
for column in columns_to_join_new: for column in columns_to_join_new:
if 'extra.' in column: if 'extra.' in column:
query_prefix = query_prefix.options(undefer(column)) query_prefix = query_prefix.options(orm.undefer(column))
else: else:
query_prefix = query_prefix.options(joinedload(column)) query_prefix = query_prefix.options(orm.joinedload(column))
# Note: order_by is done in the sqlalchemy.utils.py paginate_query(), # Note: order_by is done in the sqlalchemy.utils.py paginate_query(),
# no need to do it here as well # no need to do it here as well
@ -1695,7 +1683,7 @@ def instance_get_all_by_filters_sort(context, filters, limit=None, marker=None,
deleted = filters.pop('deleted') deleted = filters.pop('deleted')
if deleted: if deleted:
if filters.pop('soft_deleted', True): if filters.pop('soft_deleted', True):
delete = or_( delete = sql.or_(
models.Instance.deleted == models.Instance.id, models.Instance.deleted == models.Instance.id,
models.Instance.vm_state == vm_states.SOFT_DELETED models.Instance.vm_state == vm_states.SOFT_DELETED
) )
@ -1710,10 +1698,10 @@ def instance_get_all_by_filters_sort(context, filters, limit=None, marker=None,
if not filters.pop('soft_deleted', False): if not filters.pop('soft_deleted', False):
# It would be better to have vm_state not be nullable # It would be better to have vm_state not be nullable
# but until then we test it explicitly as a workaround. # but until then we test it explicitly as a workaround.
not_soft_deleted = or_( not_soft_deleted = sql.or_(
models.Instance.vm_state != vm_states.SOFT_DELETED, models.Instance.vm_state != vm_states.SOFT_DELETED,
models.Instance.vm_state == null() models.Instance.vm_state == sql.null()
) )
query_prefix = query_prefix.filter(not_soft_deleted) query_prefix = query_prefix.filter(not_soft_deleted)
if 'cleaned' in filters: if 'cleaned' in filters:
@ -1730,14 +1718,14 @@ def instance_get_all_by_filters_sort(context, filters, limit=None, marker=None,
query_prefix = query_prefix.filter(models.Tag.tag == first_tag) query_prefix = query_prefix.filter(models.Tag.tag == first_tag)
for tag in tags: for tag in tags:
tag_alias = aliased(models.Tag) tag_alias = orm.aliased(models.Tag)
query_prefix = query_prefix.join(tag_alias, query_prefix = query_prefix.join(tag_alias,
models.Instance.tags) models.Instance.tags)
query_prefix = query_prefix.filter(tag_alias.tag == tag) query_prefix = query_prefix.filter(tag_alias.tag == tag)
if 'tags-any' in filters: if 'tags-any' in filters:
tags = filters.pop('tags-any') tags = filters.pop('tags-any')
tag_alias = aliased(models.Tag) tag_alias = orm.aliased(models.Tag)
query_prefix = query_prefix.join(tag_alias, models.Instance.tags) query_prefix = query_prefix.join(tag_alias, models.Instance.tags)
query_prefix = query_prefix.filter(tag_alias.tag.in_(tags)) query_prefix = query_prefix.filter(tag_alias.tag.in_(tags))
@ -1749,7 +1737,7 @@ def instance_get_all_by_filters_sort(context, filters, limit=None, marker=None,
subq = subq.filter(models.Tag.tag == first_tag) subq = subq.filter(models.Tag.tag == first_tag)
for tag in tags: for tag in tags:
tag_alias = aliased(models.Tag) tag_alias = orm.aliased(models.Tag)
subq = subq.join(tag_alias, models.Instance.tags) subq = subq.join(tag_alias, models.Instance.tags)
subq = subq.filter(tag_alias.tag == tag) subq = subq.filter(tag_alias.tag == tag)
@ -1768,14 +1756,15 @@ def instance_get_all_by_filters_sort(context, filters, limit=None, marker=None,
filters['user_id'] = context.user_id filters['user_id'] = context.user_id
if filters.pop('hidden', False): if filters.pop('hidden', False):
query_prefix = query_prefix.filter(models.Instance.hidden == true()) query_prefix = query_prefix.filter(
models.Instance.hidden == sql.true())
else: else:
# If the query should not include hidden instances, then # If the query should not include hidden instances, then
# filter instances with hidden=False or hidden=NULL because # filter instances with hidden=False or hidden=NULL because
# older records may have no value set. # older records may have no value set.
query_prefix = query_prefix.filter(or_( query_prefix = query_prefix.filter(sql.or_(
models.Instance.hidden == false(), models.Instance.hidden == sql.false(),
models.Instance.hidden == null())) models.Instance.hidden == sql.null()))
# Filters for exact matches that we can do along with the SQL query... # Filters for exact matches that we can do along with the SQL query...
# For other filters that don't match this, we will do regexp matching # For other filters that don't match this, we will do regexp matching
@ -1856,9 +1845,9 @@ def _model_get_uuid_by_sort_filters(context, model, sort_keys, sort_dirs,
for skey, sdir, val in zip(sort_keys, sort_dirs, values): for skey, sdir, val in zip(sort_keys, sort_dirs, values):
# Apply ordering to our query for the key, direction we're processing # Apply ordering to our query for the key, direction we're processing
if sdir == 'desc': if sdir == 'desc':
query = query.order_by(desc(getattr(model, skey))) query = query.order_by(expression.desc(getattr(model, skey)))
else: else:
query = query.order_by(asc(getattr(model, skey))) query = query.order_by(expression.asc(getattr(model, skey)))
# Build a list of equivalence requirements on keys we've already # Build a list of equivalence requirements on keys we've already
# processed through the loop. In other words, if we're adding # processed through the loop. In other words, if we're adding
@ -1869,8 +1858,8 @@ def _model_get_uuid_by_sort_filters(context, model, sort_keys, sort_dirs,
(getattr(model, sort_keys[equal_attr]) == values[equal_attr])) (getattr(model, sort_keys[equal_attr]) == values[equal_attr]))
model_attr = getattr(model, skey) model_attr = getattr(model, skey)
if isinstance(model_attr.type, Boolean): if isinstance(model_attr.type, sa.Boolean):
model_attr = cast(model_attr, Integer) model_attr = expression.cast(model_attr, sa.Integer)
val = int(val) val = int(val)
if skey == sort_keys[-1]: if skey == sort_keys[-1]:
@ -1890,11 +1879,11 @@ def _model_get_uuid_by_sort_filters(context, model, sort_keys, sort_dirs,
# AND together all the above # AND together all the above
crit_attrs.append(crit) crit_attrs.append(crit)
criteria.append(and_(*crit_attrs)) criteria.append(sql.and_(*crit_attrs))
key_index += 1 key_index += 1
# OR together all the ANDs # OR together all the ANDs
query = query.filter(or_(*criteria)) query = query.filter(sql.or_(*criteria))
# We can't raise InstanceNotFound because we don't have a uuid to # We can't raise InstanceNotFound because we don't have a uuid to
# be looking for, so just return nothing if no match. # be looking for, so just return nothing if no match.
@ -2130,12 +2119,13 @@ def instance_get_active_by_window_joined(context, begin, end=None,
for column in columns_to_join_new: for column in columns_to_join_new:
if 'extra.' in column: if 'extra.' in column:
query = query.options(undefer(column)) query = query.options(orm.undefer(column))
else: else:
query = query.options(joinedload(column)) query = query.options(orm.joinedload(column))
query = query.filter(or_(models.Instance.terminated_at == null(), query = query.filter(sql.or_(
models.Instance.terminated_at > begin)) models.Instance.terminated_at == sql.null(),
models.Instance.terminated_at > begin))
if end: if end:
query = query.filter(models.Instance.launched_at < end) query = query.filter(models.Instance.launched_at < end)
if project_id: if project_id:
@ -2165,9 +2155,9 @@ def _instance_get_all_query(context, project_only=False, joins=None):
project_only=project_only) project_only=project_only)
for column in joins: for column in joins:
if 'extra.' in column: if 'extra.' in column:
query = query.options(undefer(column)) query = query.options(orm.undefer(column))
else: else:
query = query.options(joinedload(column)) query = query.options(orm.joinedload(column))
return query return query
@ -2581,7 +2571,7 @@ def instance_extra_get_by_instance_uuid(
columns = ['numa_topology', 'pci_requests', 'flavor', 'vcpu_model', columns = ['numa_topology', 'pci_requests', 'flavor', 'vcpu_model',
'trusted_certs', 'resources', 'migration_context'] 'trusted_certs', 'resources', 'migration_context']
for column in columns: for column in columns:
query = query.options(undefer(column)) query = query.options(orm.undefer(column))
instance_extra = query.first() instance_extra = query.first()
return instance_extra return instance_extra
@ -2875,7 +2865,7 @@ def _block_device_mapping_get_query(context, columns_to_join=None):
query = model_query(context, models.BlockDeviceMapping) query = model_query(context, models.BlockDeviceMapping)
for column in columns_to_join: for column in columns_to_join:
query = query.options(joinedload(column)) query = query.options(orm.joinedload(column))
return query return query
@ -3408,10 +3398,13 @@ def migration_get_in_progress_by_host_and_node(context, host, node):
# and the instance is in VERIFY_RESIZE state, so the end state # and the instance is in VERIFY_RESIZE state, so the end state
# for a resize is actually 'confirmed' or 'reverted'. # for a resize is actually 'confirmed' or 'reverted'.
return model_query(context, models.Migration).\ return model_query(context, models.Migration).\
filter(or_(and_(models.Migration.source_compute == host, filter(sql.or_(
models.Migration.source_node == node), sql.and_(
and_(models.Migration.dest_compute == host, models.Migration.source_compute == host,
models.Migration.dest_node == node))).\ models.Migration.source_node == node),
sql.and_(
models.Migration.dest_compute == host,
models.Migration.dest_node == node))).\
filter(~models.Migration.status.in_(['confirmed', 'reverted', filter(~models.Migration.status.in_(['confirmed', 'reverted',
'error', 'failed', 'error', 'failed',
'completed', 'cancelled', 'completed', 'cancelled',
@ -3465,15 +3458,17 @@ def migration_get_all_by_filters(context, filters,
query = query.filter(models.Migration.status.in_(status)) query = query.filter(models.Migration.status.in_(status))
if "host" in filters: if "host" in filters:
host = filters["host"] host = filters["host"]
query = query.filter(or_(models.Migration.source_compute == host, query = query.filter(sql.or_(
models.Migration.dest_compute == host)) models.Migration.source_compute == host,
models.Migration.dest_compute == host))
elif "source_compute" in filters: elif "source_compute" in filters:
host = filters['source_compute'] host = filters['source_compute']
query = query.filter(models.Migration.source_compute == host) query = query.filter(models.Migration.source_compute == host)
if "node" in filters: if "node" in filters:
node = filters['node'] node = filters['node']
query = query.filter(or_(models.Migration.source_node == node, query = query.filter(sql.or_(
models.Migration.dest_node == node)) models.Migration.source_node == node,
models.Migration.dest_node == node))
if "migration_type" in filters: if "migration_type" in filters:
migtype = filters["migration_type"] migtype = filters["migration_type"]
query = query.filter(models.Migration.migration_type == migtype) query = query.filter(models.Migration.migration_type == migtype)
@ -3551,10 +3546,13 @@ def migration_get_in_progress_and_error_by_host_and_node(context, host, node):
host and node. host and node.
""" """
return model_query(context, models.Migration).\ return model_query(context, models.Migration).\
filter(or_(and_(models.Migration.source_compute == host, filter(sql.or_(
models.Migration.source_node == node), sql.and_(
and_(models.Migration.dest_compute == host, models.Migration.source_compute == host,
models.Migration.dest_node == node))).\ models.Migration.source_node == node),
sql.and_(
models.Migration.dest_compute == host,
models.Migration.dest_node == node))).\
filter(~models.Migration.status.in_(['confirmed', 'reverted', filter(~models.Migration.status.in_(['confirmed', 'reverted',
'failed', 'completed', 'failed', 'completed',
'cancelled', 'done'])).\ 'cancelled', 'done'])).\
@ -3738,11 +3736,11 @@ def bw_usage_update(
# same record is updated every time. It can be removed after adding # same record is updated every time. It can be removed after adding
# unique constraint to this model. # unique constraint to this model.
bw_usage = model_query(context, models.BandwidthUsage, bw_usage = model_query(context, models.BandwidthUsage,
read_deleted='yes').\ read_deleted='yes').\
filter_by(start_period=ts_values['start_period']).\ filter_by(start_period=ts_values['start_period']).\
filter_by(uuid=uuid).\ filter_by(uuid=uuid).\
filter_by(mac=mac).\ filter_by(mac=mac).\
order_by(asc(models.BandwidthUsage.id)).first() order_by(expression.asc(models.BandwidthUsage.id)).first()
if bw_usage: if bw_usage:
bw_usage.update(values) bw_usage.update(values)
@ -3770,11 +3768,12 @@ def bw_usage_update(
def vol_get_usage_by_time(context, begin): def vol_get_usage_by_time(context, begin):
"""Return volumes usage that have been updated after a specified time.""" """Return volumes usage that have been updated after a specified time."""
return model_query(context, models.VolumeUsage, read_deleted="yes").\ return model_query(context, models.VolumeUsage, read_deleted="yes").\
filter(or_(models.VolumeUsage.tot_last_refreshed == null(), filter(sql.or_(
models.VolumeUsage.tot_last_refreshed > begin, models.VolumeUsage.tot_last_refreshed == sql.null(),
models.VolumeUsage.curr_last_refreshed == null(), models.VolumeUsage.tot_last_refreshed > begin,
models.VolumeUsage.curr_last_refreshed > begin, models.VolumeUsage.curr_last_refreshed == sql.null(),
)).all() models.VolumeUsage.curr_last_refreshed > begin,
)).all()
@require_context @require_context
@ -3992,8 +3991,9 @@ def instance_fault_get_by_instance_uuids(
query = query.join(latest_faults, query = query.join(latest_faults,
faults_tbl.c.id == latest_faults.c.max_id) faults_tbl.c.id == latest_faults.c.max_id)
else: else:
query = query.filter(models.InstanceFault.instance_uuid.in_( query = query.filter(
instance_uuids)).order_by(desc("id")) models.InstanceFault.instance_uuid.in_(instance_uuids)
).order_by(expression.desc("id"))
output = {} output = {}
for instance_uuid in instance_uuids: for instance_uuid in instance_uuids:
@ -4071,18 +4071,18 @@ def action_get_by_request_id(context, instance_uuid, request_id):
def _action_get_by_request_id(context, instance_uuid, request_id): def _action_get_by_request_id(context, instance_uuid, request_id):
result = model_query(context, models.InstanceAction).\ result = model_query(context, models.InstanceAction).\
filter_by(instance_uuid=instance_uuid).\ filter_by(instance_uuid=instance_uuid).\
filter_by(request_id=request_id).\ filter_by(request_id=request_id).\
order_by(desc("created_at"), desc("id")).\ order_by(expression.desc("created_at"), expression.desc("id")).\
first() first()
return result return result
def _action_get_last_created_by_instance_uuid(context, instance_uuid): def _action_get_last_created_by_instance_uuid(context, instance_uuid):
result = (model_query(context, models.InstanceAction). result = model_query(context, models.InstanceAction).\
filter_by(instance_uuid=instance_uuid). filter_by(instance_uuid=instance_uuid).\
order_by(desc("created_at"), desc("id")). order_by(expression.desc("created_at"), expression.desc("id")).\
first()) first()
return result return result
@ -4180,9 +4180,9 @@ def action_event_finish(context, values):
def action_events_get(context, action_id): def action_events_get(context, action_id):
"""Get the events by action id.""" """Get the events by action id."""
events = model_query(context, models.InstanceActionEvent).\ events = model_query(context, models.InstanceActionEvent).\
filter_by(action_id=action_id).\ filter_by(action_id=action_id).\
order_by(desc("created_at"), desc("id")).\ order_by(expression.desc("created_at"), expression.desc("id")).\
all() all()
return events return events
@ -4376,9 +4376,9 @@ def _get_fk_stmts(metadata, conn, table, column, records):
# Create the shadow table for the referencing table. # Create the shadow table for the referencing table.
fk_shadow_tablename = _SHADOW_TABLE_PREFIX + fk_table.name fk_shadow_tablename = _SHADOW_TABLE_PREFIX + fk_table.name
try: try:
fk_shadow_table = Table(fk_shadow_tablename, metadata, fk_shadow_table = schema.Table(
autoload=True) fk_shadow_tablename, metadata, autoload=True)
except NoSuchTableError: except sqla_exc.NoSuchTableError:
# No corresponding shadow table; skip it. # No corresponding shadow table; skip it.
continue continue
@ -4472,8 +4472,8 @@ def _archive_deleted_rows_for_table(metadata, tablename, max_rows, before,
rows_archived = 0 rows_archived = 0
deleted_instance_uuids = [] deleted_instance_uuids = []
try: try:
shadow_table = Table(shadow_tablename, metadata, autoload=True) shadow_table = schema.Table(shadow_tablename, metadata, autoload=True)
except NoSuchTableError: except sqla_exc.NoSuchTableError:
# No corresponding shadow table; skip it. # No corresponding shadow table; skip it.
return rows_archived, deleted_instance_uuids, {} return rows_archived, deleted_instance_uuids, {}
@ -4589,7 +4589,7 @@ def archive_deleted_rows(context=None, max_rows=None, before=None,
table_to_rows_archived = collections.defaultdict(int) table_to_rows_archived = collections.defaultdict(int)
deleted_instance_uuids = [] deleted_instance_uuids = []
total_rows_archived = 0 total_rows_archived = 0
meta = MetaData(get_engine(use_slave=True, context=context)) meta = sa.MetaData(get_engine(use_slave=True, context=context))
meta.reflect() meta.reflect()
# Get the sorted list of tables in order of foreign key dependency. # Get the sorted list of tables in order of foreign key dependency.
# Process the parent tables and find their dependent records in order to # Process the parent tables and find their dependent records in order to
@ -4634,7 +4634,7 @@ def _purgeable_tables(metadata):
def purge_shadow_tables(context, before_date, status_fn=None): def purge_shadow_tables(context, before_date, status_fn=None):
engine = get_engine(context=context) engine = get_engine(context=context)
conn = engine.connect() conn = engine.connect()
metadata = MetaData() metadata = sa.MetaData()
metadata.bind = engine metadata.bind = engine
metadata.reflect() metadata.reflect()
total_deleted = 0 total_deleted = 0

View File

@ -13,28 +13,18 @@
from oslo_db.sqlalchemy import models from oslo_db.sqlalchemy import models
from oslo_log import log as logging from oslo_log import log as logging
from sqlalchemy import Boolean import sqlalchemy as sa
from sqlalchemy import Column import sqlalchemy.dialects.mysql
from sqlalchemy import DateTime
from sqlalchemy.dialects.mysql import MEDIUMTEXT
from sqlalchemy import Enum
from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Float
from sqlalchemy import ForeignKey
from sqlalchemy import Index
from sqlalchemy import Integer
from sqlalchemy import orm from sqlalchemy import orm
from sqlalchemy.orm import backref
from sqlalchemy import schema from sqlalchemy import schema
from sqlalchemy import String
from sqlalchemy import Text
from sqlalchemy import Unicode
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
def MediumText(): def MediumText():
return Text().with_variant(MEDIUMTEXT(), 'mysql') return sa.Text().with_variant(
sqlalchemy.dialects.mysql.MEDIUMTEXT(), 'mysql')
class _NovaAPIBase(models.ModelBase, models.TimestampMixin): class _NovaAPIBase(models.ModelBase, models.TimestampMixin):
@ -52,9 +42,10 @@ class AggregateHost(API_BASE):
name="uniq_aggregate_hosts0host0aggregate_id" name="uniq_aggregate_hosts0host0aggregate_id"
), ),
) )
id = Column(Integer, primary_key=True, autoincrement=True) id = sa.Column(sa.Integer, primary_key=True, autoincrement=True)
host = Column(String(255)) host = sa.Column(sa.String(255))
aggregate_id = Column(Integer, ForeignKey('aggregates.id'), nullable=False) aggregate_id = sa.Column(
sa.Integer, sa.ForeignKey('aggregates.id'), nullable=False)
class AggregateMetadata(API_BASE): class AggregateMetadata(API_BASE):
@ -64,30 +55,33 @@ class AggregateMetadata(API_BASE):
schema.UniqueConstraint("aggregate_id", "key", schema.UniqueConstraint("aggregate_id", "key",
name="uniq_aggregate_metadata0aggregate_id0key" name="uniq_aggregate_metadata0aggregate_id0key"
), ),
Index('aggregate_metadata_key_idx', 'key'), sa.Index('aggregate_metadata_key_idx', 'key'),
) )
id = Column(Integer, primary_key=True) id = sa.Column(sa.Integer, primary_key=True)
key = Column(String(255), nullable=False) key = sa.Column(sa.String(255), nullable=False)
value = Column(String(255), nullable=False) value = sa.Column(sa.String(255), nullable=False)
aggregate_id = Column(Integer, ForeignKey('aggregates.id'), nullable=False) aggregate_id = sa.Column(
sa.Integer, sa.ForeignKey('aggregates.id'), nullable=False)
class Aggregate(API_BASE): class Aggregate(API_BASE):
"""Represents a cluster of hosts that exists in this zone.""" """Represents a cluster of hosts that exists in this zone."""
__tablename__ = 'aggregates' __tablename__ = 'aggregates'
__table_args__ = (Index('aggregate_uuid_idx', 'uuid'), __table_args__ = (
schema.UniqueConstraint( sa.Index('aggregate_uuid_idx', 'uuid'),
"name", name="uniq_aggregate0name") schema.UniqueConstraint("name", name="uniq_aggregate0name")
) )
id = Column(Integer, primary_key=True, autoincrement=True) id = sa.Column(sa.Integer, primary_key=True, autoincrement=True)
uuid = Column(String(36)) uuid = sa.Column(sa.String(36))
name = Column(String(255)) name = sa.Column(sa.String(255))
_hosts = orm.relationship(AggregateHost, _hosts = orm.relationship(
primaryjoin='Aggregate.id == AggregateHost.aggregate_id', AggregateHost,
cascade='delete') primaryjoin='Aggregate.id == AggregateHost.aggregate_id',
_metadata = orm.relationship(AggregateMetadata, cascade='delete')
primaryjoin='Aggregate.id == AggregateMetadata.aggregate_id', _metadata = orm.relationship(
cascade='delete') AggregateMetadata,
primaryjoin='Aggregate.id == AggregateMetadata.aggregate_id',
cascade='delete')
@property @property
def _extra_keys(self): def _extra_keys(self):
@ -111,59 +105,67 @@ class Aggregate(API_BASE):
class CellMapping(API_BASE): class CellMapping(API_BASE):
"""Contains information on communicating with a cell""" """Contains information on communicating with a cell"""
__tablename__ = 'cell_mappings' __tablename__ = 'cell_mappings'
__table_args__ = (Index('uuid_idx', 'uuid'), __table_args__ = (
schema.UniqueConstraint('uuid', sa.Index('uuid_idx', 'uuid'),
name='uniq_cell_mappings0uuid')) schema.UniqueConstraint('uuid', name='uniq_cell_mappings0uuid'),
)
id = Column(Integer, primary_key=True) id = sa.Column(sa.Integer, primary_key=True)
uuid = Column(String(36), nullable=False) uuid = sa.Column(sa.String(36), nullable=False)
name = Column(String(255)) name = sa.Column(sa.String(255))
transport_url = Column(Text()) transport_url = sa.Column(sa.Text())
database_connection = Column(Text()) database_connection = sa.Column(sa.Text())
disabled = Column(Boolean, default=False) disabled = sa.Column(sa.Boolean, default=False)
host_mapping = orm.relationship('HostMapping', host_mapping = orm.relationship(
backref=backref('cell_mapping', uselist=False), 'HostMapping',
foreign_keys=id, backref=orm.backref('cell_mapping', uselist=False),
primaryjoin=( foreign_keys=id,
'CellMapping.id == HostMapping.cell_id')) primaryjoin='CellMapping.id == HostMapping.cell_id')
class InstanceMapping(API_BASE): class InstanceMapping(API_BASE):
"""Contains the mapping of an instance to which cell it is in""" """Contains the mapping of an instance to which cell it is in"""
__tablename__ = 'instance_mappings' __tablename__ = 'instance_mappings'
__table_args__ = (Index('project_id_idx', 'project_id'), __table_args__ = (
Index('instance_uuid_idx', 'instance_uuid'), sa.Index('project_id_idx', 'project_id'),
schema.UniqueConstraint('instance_uuid', sa.Index('instance_uuid_idx', 'instance_uuid'),
name='uniq_instance_mappings0instance_uuid'), schema.UniqueConstraint(
Index('instance_mappings_user_id_project_id_idx', 'instance_uuid', name='uniq_instance_mappings0instance_uuid'),
'user_id', 'project_id')) sa.Index(
'instance_mappings_user_id_project_id_idx',
'user_id',
'project_id',
),
)
id = Column(Integer, primary_key=True) id = sa.Column(sa.Integer, primary_key=True)
instance_uuid = Column(String(36), nullable=False) instance_uuid = sa.Column(sa.String(36), nullable=False)
cell_id = Column(Integer, ForeignKey('cell_mappings.id'), cell_id = sa.Column(
nullable=True) sa.Integer, sa.ForeignKey('cell_mappings.id'), nullable=True)
project_id = Column(String(255), nullable=False) project_id = sa.Column(sa.String(255), nullable=False)
# FIXME(melwitt): This should eventually be non-nullable, but we need a # FIXME(melwitt): This should eventually be non-nullable, but we need a
# transition period first. # transition period first.
user_id = Column(String(255), nullable=True) user_id = sa.Column(sa.String(255), nullable=True)
queued_for_delete = Column(Boolean) queued_for_delete = sa.Column(sa.Boolean)
cell_mapping = orm.relationship('CellMapping', cell_mapping = orm.relationship(
backref=backref('instance_mapping', uselist=False), 'CellMapping',
foreign_keys=cell_id, backref=orm.backref('instance_mapping', uselist=False),
primaryjoin=('InstanceMapping.cell_id == CellMapping.id')) foreign_keys=cell_id,
primaryjoin='InstanceMapping.cell_id == CellMapping.id')
class HostMapping(API_BASE): class HostMapping(API_BASE):
"""Contains mapping of a compute host to which cell it is in""" """Contains mapping of a compute host to which cell it is in"""
__tablename__ = "host_mappings" __tablename__ = "host_mappings"
__table_args__ = (Index('host_idx', 'host'), __table_args__ = (
schema.UniqueConstraint('host', sa.Index('host_idx', 'host'),
name='uniq_host_mappings0host')) schema.UniqueConstraint('host', name='uniq_host_mappings0host'),
)
id = Column(Integer, primary_key=True) id = sa.Column(sa.Integer, primary_key=True)
cell_id = Column(Integer, ForeignKey('cell_mappings.id'), cell_id = sa.Column(
nullable=False) sa.Integer, sa.ForeignKey('cell_mappings.id'), nullable=False)
host = Column(String(255), nullable=False) host = sa.Column(sa.String(255), nullable=False)
class RequestSpec(API_BASE): class RequestSpec(API_BASE):
@ -171,14 +173,14 @@ class RequestSpec(API_BASE):
__tablename__ = 'request_specs' __tablename__ = 'request_specs'
__table_args__ = ( __table_args__ = (
Index('request_spec_instance_uuid_idx', 'instance_uuid'), sa.Index('request_spec_instance_uuid_idx', 'instance_uuid'),
schema.UniqueConstraint('instance_uuid', schema.UniqueConstraint(
name='uniq_request_specs0instance_uuid'), 'instance_uuid', name='uniq_request_specs0instance_uuid'),
) )
id = Column(Integer, primary_key=True) id = sa.Column(sa.Integer, primary_key=True)
instance_uuid = Column(String(36), nullable=False) instance_uuid = sa.Column(sa.String(36), nullable=False)
spec = Column(MediumText(), nullable=False) spec = sa.Column(MediumText(), nullable=False)
class Flavors(API_BASE): class Flavors(API_BASE):
@ -188,39 +190,40 @@ class Flavors(API_BASE):
schema.UniqueConstraint("flavorid", name="uniq_flavors0flavorid"), schema.UniqueConstraint("flavorid", name="uniq_flavors0flavorid"),
schema.UniqueConstraint("name", name="uniq_flavors0name")) schema.UniqueConstraint("name", name="uniq_flavors0name"))
id = Column(Integer, primary_key=True) id = sa.Column(sa.Integer, primary_key=True)
name = Column(String(255), nullable=False) name = sa.Column(sa.String(255), nullable=False)
memory_mb = Column(Integer, nullable=False) memory_mb = sa.Column(sa.Integer, nullable=False)
vcpus = Column(Integer, nullable=False) vcpus = sa.Column(sa.Integer, nullable=False)
root_gb = Column(Integer) root_gb = sa.Column(sa.Integer)
ephemeral_gb = Column(Integer) ephemeral_gb = sa.Column(sa.Integer)
flavorid = Column(String(255), nullable=False) flavorid = sa.Column(sa.String(255), nullable=False)
swap = Column(Integer, nullable=False, default=0) swap = sa.Column(sa.Integer, nullable=False, default=0)
rxtx_factor = Column(Float, default=1) rxtx_factor = sa.Column(sa.Float, default=1)
vcpu_weight = Column(Integer) vcpu_weight = sa.Column(sa.Integer)
disabled = Column(Boolean, default=False) disabled = sa.Column(sa.Boolean, default=False)
is_public = Column(Boolean, default=True) is_public = sa.Column(sa.Boolean, default=True)
description = Column(Text) description = sa.Column(sa.Text)
class FlavorExtraSpecs(API_BASE): class FlavorExtraSpecs(API_BASE):
"""Represents additional specs as key/value pairs for a flavor""" """Represents additional specs as key/value pairs for a flavor"""
__tablename__ = 'flavor_extra_specs' __tablename__ = 'flavor_extra_specs'
__table_args__ = ( __table_args__ = (
Index('flavor_extra_specs_flavor_id_key_idx', 'flavor_id', 'key'), sa.Index('flavor_extra_specs_flavor_id_key_idx', 'flavor_id', 'key'),
schema.UniqueConstraint('flavor_id', 'key', schema.UniqueConstraint('flavor_id', 'key',
name='uniq_flavor_extra_specs0flavor_id0key'), name='uniq_flavor_extra_specs0flavor_id0key'),
{'mysql_collate': 'utf8_bin'}, {'mysql_collate': 'utf8_bin'},
) )
id = Column(Integer, primary_key=True) id = sa.Column(sa.Integer, primary_key=True)
key = Column(String(255), nullable=False) key = sa.Column(sa.String(255), nullable=False)
value = Column(String(255)) value = sa.Column(sa.String(255))
flavor_id = Column(Integer, ForeignKey('flavors.id'), nullable=False) flavor_id = sa.Column(
flavor = orm.relationship(Flavors, backref='extra_specs', sa.Integer, sa.ForeignKey('flavors.id'), nullable=False)
foreign_keys=flavor_id, flavor = orm.relationship(
primaryjoin=( Flavors, backref='extra_specs',
'FlavorExtraSpecs.flavor_id == Flavors.id')) foreign_keys=flavor_id,
primaryjoin='FlavorExtraSpecs.flavor_id == Flavors.id')
class FlavorProjects(API_BASE): class FlavorProjects(API_BASE):
@ -229,13 +232,14 @@ class FlavorProjects(API_BASE):
__table_args__ = (schema.UniqueConstraint('flavor_id', 'project_id', __table_args__ = (schema.UniqueConstraint('flavor_id', 'project_id',
name='uniq_flavor_projects0flavor_id0project_id'),) name='uniq_flavor_projects0flavor_id0project_id'),)
id = Column(Integer, primary_key=True) id = sa.Column(sa.Integer, primary_key=True)
flavor_id = Column(Integer, ForeignKey('flavors.id'), nullable=False) flavor_id = sa.Column(
project_id = Column(String(255), nullable=False) sa.Integer, sa.ForeignKey('flavors.id'), nullable=False)
flavor = orm.relationship(Flavors, backref='projects', project_id = sa.Column(sa.String(255), nullable=False)
foreign_keys=flavor_id, flavor = orm.relationship(
primaryjoin=( Flavors, backref='projects',
'FlavorProjects.flavor_id == Flavors.id')) foreign_keys=flavor_id,
primaryjoin='FlavorProjects.flavor_id == Flavors.id')
class BuildRequest(API_BASE): class BuildRequest(API_BASE):
@ -243,19 +247,19 @@ class BuildRequest(API_BASE):
__tablename__ = 'build_requests' __tablename__ = 'build_requests'
__table_args__ = ( __table_args__ = (
Index('build_requests_instance_uuid_idx', 'instance_uuid'), sa.Index('build_requests_instance_uuid_idx', 'instance_uuid'),
Index('build_requests_project_id_idx', 'project_id'), sa.Index('build_requests_project_id_idx', 'project_id'),
schema.UniqueConstraint('instance_uuid', schema.UniqueConstraint(
name='uniq_build_requests0instance_uuid'), 'instance_uuid', name='uniq_build_requests0instance_uuid'),
) )
id = Column(Integer, primary_key=True) id = sa.Column(sa.Integer, primary_key=True)
# TODO(mriedem): instance_uuid should be nullable=False # TODO(mriedem): instance_uuid should be nullable=False
instance_uuid = Column(String(36)) instance_uuid = sa.Column(sa.String(36))
project_id = Column(String(255), nullable=False) project_id = sa.Column(sa.String(255), nullable=False)
instance = Column(MediumText()) instance = sa.Column(MediumText())
block_device_mappings = Column(MediumText()) block_device_mappings = sa.Column(MediumText())
tags = Column(Text()) tags = sa.Column(sa.Text())
# TODO(alaski): Drop these from the db in Ocata # TODO(alaski): Drop these from the db in Ocata
# columns_to_drop = ['request_spec_id', 'user_id', 'display_name', # columns_to_drop = ['request_spec_id', 'user_id', 'display_name',
# 'instance_metadata', 'progress', 'vm_state', 'task_state', # 'instance_metadata', 'progress', 'vm_state', 'task_state',
@ -269,19 +273,18 @@ class KeyPair(API_BASE):
"""Represents a public key pair for ssh / WinRM.""" """Represents a public key pair for ssh / WinRM."""
__tablename__ = 'key_pairs' __tablename__ = 'key_pairs'
__table_args__ = ( __table_args__ = (
schema.UniqueConstraint("user_id", "name", schema.UniqueConstraint(
name="uniq_key_pairs0user_id0name"), "user_id", "name", name="uniq_key_pairs0user_id0name"),
) )
id = Column(Integer, primary_key=True, nullable=False)
name = Column(String(255), nullable=False) id = sa.Column(sa.Integer, primary_key=True, nullable=False)
name = sa.Column(sa.String(255), nullable=False)
user_id = Column(String(255), nullable=False) user_id = sa.Column(sa.String(255), nullable=False)
fingerprint = sa.Column(sa.String(255))
fingerprint = Column(String(255)) public_key = sa.Column(sa.Text())
public_key = Column(Text()) type = sa.Column(
type = Column(Enum('ssh', 'x509', name='keypair_types'), sa.Enum('ssh', 'x509', name='keypair_types'),
nullable=False, server_default='ssh') nullable=False, server_default='ssh')
# TODO(stephenfin): Remove this as it's now unused post-placement split # TODO(stephenfin): Remove this as it's now unused post-placement split
@ -292,8 +295,8 @@ class ResourceClass(API_BASE):
schema.UniqueConstraint("name", name="uniq_resource_classes0name"), schema.UniqueConstraint("name", name="uniq_resource_classes0name"),
) )
id = Column(Integer, primary_key=True, nullable=False) id = sa.Column(sa.Integer, primary_key=True, nullable=False)
name = Column(String(255), nullable=False) name = sa.Column(sa.String(255), nullable=False)
# TODO(stephenfin): Remove this as it's now unused post-placement split # TODO(stephenfin): Remove this as it's now unused post-placement split
@ -302,29 +305,28 @@ class ResourceProvider(API_BASE):
__tablename__ = "resource_providers" __tablename__ = "resource_providers"
__table_args__ = ( __table_args__ = (
Index('resource_providers_uuid_idx', 'uuid'), sa.Index('resource_providers_uuid_idx', 'uuid'),
schema.UniqueConstraint('uuid', schema.UniqueConstraint('uuid', name='uniq_resource_providers0uuid'),
name='uniq_resource_providers0uuid'), sa.Index('resource_providers_name_idx', 'name'),
Index('resource_providers_name_idx', 'name'), sa.Index(
Index('resource_providers_root_provider_id_idx', 'resource_providers_root_provider_id_idx', 'root_provider_id'),
'root_provider_id'), sa.Index(
Index('resource_providers_parent_provider_id_idx', 'resource_providers_parent_provider_id_idx', 'parent_provider_id'),
'parent_provider_id'), schema.UniqueConstraint(
schema.UniqueConstraint('name', 'name', name='uniq_resource_providers0name')
name='uniq_resource_providers0name')
) )
id = Column(Integer, primary_key=True, nullable=False) id = sa.Column(sa.Integer, primary_key=True, nullable=False)
uuid = Column(String(36), nullable=False) uuid = sa.Column(sa.String(36), nullable=False)
name = Column(Unicode(200), nullable=True) name = sa.Column(sa.Unicode(200), nullable=True)
generation = Column(Integer, default=0) generation = sa.Column(sa.Integer, default=0)
# Represents the root of the "tree" that the provider belongs to # Represents the root of the "tree" that the provider belongs to
root_provider_id = Column(Integer, ForeignKey('resource_providers.id'), root_provider_id = sa.Column(
nullable=True) sa.Integer, sa.ForeignKey('resource_providers.id'), nullable=True)
# The immediate parent provider of this provider, or NULL if there is no # The immediate parent provider of this provider, or NULL if there is no
# parent. If parent_provider_id == NULL then root_provider_id == id # parent. If parent_provider_id == NULL then root_provider_id == id
parent_provider_id = Column(Integer, ForeignKey('resource_providers.id'), parent_provider_id = sa.Column(
nullable=True) sa.Integer, sa.ForeignKey('resource_providers.id'), nullable=True)
# TODO(stephenfin): Remove this as it's now unused post-placement split # TODO(stephenfin): Remove this as it's now unused post-placement split
@ -333,29 +335,34 @@ class Inventory(API_BASE):
__tablename__ = "inventories" __tablename__ = "inventories"
__table_args__ = ( __table_args__ = (
Index('inventories_resource_provider_id_idx', sa.Index(
'resource_provider_id'), 'inventories_resource_provider_id_idx', 'resource_provider_id'),
Index('inventories_resource_class_id_idx', sa.Index(
'resource_class_id'), 'inventories_resource_class_id_idx', 'resource_class_id'),
Index('inventories_resource_provider_resource_class_idx', sa.Index(
'resource_provider_id', 'resource_class_id'), 'inventories_resource_provider_resource_class_idx',
schema.UniqueConstraint('resource_provider_id', 'resource_class_id', 'resource_provider_id',
name='uniq_inventories0resource_provider_resource_class') 'resource_class_id',
),
schema.UniqueConstraint(
'resource_provider_id',
'resource_class_id',
name='uniq_inventories0resource_provider_resource_class'
),
) )
id = Column(Integer, primary_key=True, nullable=False) id = sa.Column(sa.Integer, primary_key=True, nullable=False)
resource_provider_id = Column(Integer, nullable=False) resource_provider_id = sa.Column(sa.Integer, nullable=False)
resource_class_id = Column(Integer, nullable=False) resource_class_id = sa.Column(sa.Integer, nullable=False)
total = Column(Integer, nullable=False) total = sa.Column(sa.Integer, nullable=False)
reserved = Column(Integer, nullable=False) reserved = sa.Column(sa.Integer, nullable=False)
min_unit = Column(Integer, nullable=False) min_unit = sa.Column(sa.Integer, nullable=False)
max_unit = Column(Integer, nullable=False) max_unit = sa.Column(sa.Integer, nullable=False)
step_size = Column(Integer, nullable=False) step_size = sa.Column(sa.Integer, nullable=False)
allocation_ratio = Column(Float, nullable=False) allocation_ratio = sa.Column(sa.Float, nullable=False)
resource_provider = orm.relationship( resource_provider = orm.relationship(
"ResourceProvider", "ResourceProvider",
primaryjoin=('Inventory.resource_provider_id == ' primaryjoin='Inventory.resource_provider_id == ResourceProvider.id',
'ResourceProvider.id'),
foreign_keys=resource_provider_id) foreign_keys=resource_provider_id)
@ -365,23 +372,24 @@ class Allocation(API_BASE):
__tablename__ = "allocations" __tablename__ = "allocations"
__table_args__ = ( __table_args__ = (
Index('allocations_resource_provider_class_used_idx', sa.Index(
'resource_provider_id', 'resource_class_id', 'allocations_resource_provider_class_used_idx',
'used'), 'resource_provider_id',
Index('allocations_resource_class_id_idx', 'resource_class_id',
'resource_class_id'), 'used',
Index('allocations_consumer_id_idx', 'consumer_id') ),
sa.Index('allocations_resource_class_id_idx', 'resource_class_id'),
sa.Index('allocations_consumer_id_idx', 'consumer_id')
) )
id = Column(Integer, primary_key=True, nullable=False) id = sa.Column(sa.Integer, primary_key=True, nullable=False)
resource_provider_id = Column(Integer, nullable=False) resource_provider_id = sa.Column(sa.Integer, nullable=False)
consumer_id = Column(String(36), nullable=False) consumer_id = sa.Column(sa.String(36), nullable=False)
resource_class_id = Column(Integer, nullable=False) resource_class_id = sa.Column(sa.Integer, nullable=False)
used = Column(Integer, nullable=False) used = sa.Column(sa.Integer, nullable=False)
resource_provider = orm.relationship( resource_provider = orm.relationship(
"ResourceProvider", "ResourceProvider",
primaryjoin=('Allocation.resource_provider_id == ' primaryjoin='Allocation.resource_provider_id == ResourceProvider.id',
'ResourceProvider.id'),
foreign_keys=resource_provider_id) foreign_keys=resource_provider_id)
@ -391,12 +399,13 @@ class ResourceProviderAggregate(API_BASE):
__tablename__ = 'resource_provider_aggregates' __tablename__ = 'resource_provider_aggregates'
__table_args__ = ( __table_args__ = (
Index('resource_provider_aggregates_aggregate_id_idx', sa.Index(
'aggregate_id'), 'resource_provider_aggregates_aggregate_id_idx', 'aggregate_id'),
) )
resource_provider_id = Column(Integer, primary_key=True, nullable=False) resource_provider_id = sa.Column(
aggregate_id = Column(Integer, primary_key=True, nullable=False) sa.Integer, primary_key=True, nullable=False)
aggregate_id = sa.Column(sa.Integer, primary_key=True, nullable=False)
# TODO(stephenfin): Remove this as it's now unused post-placement split # TODO(stephenfin): Remove this as it's now unused post-placement split
@ -407,33 +416,34 @@ class PlacementAggregate(API_BASE):
schema.UniqueConstraint("uuid", name="uniq_placement_aggregates0uuid"), schema.UniqueConstraint("uuid", name="uniq_placement_aggregates0uuid"),
) )
id = Column(Integer, primary_key=True, autoincrement=True) id = sa.Column(sa.Integer, primary_key=True, autoincrement=True)
uuid = Column(String(36), index=True) uuid = sa.Column(sa.String(36), index=True)
class InstanceGroupMember(API_BASE): class InstanceGroupMember(API_BASE):
"""Represents the members for an instance group.""" """Represents the members for an instance group."""
__tablename__ = 'instance_group_member' __tablename__ = 'instance_group_member'
__table_args__ = ( __table_args__ = (
Index('instance_group_member_instance_idx', 'instance_uuid'), sa.Index('instance_group_member_instance_idx', 'instance_uuid'),
) )
id = Column(Integer, primary_key=True, nullable=False) id = sa.Column(sa.Integer, primary_key=True, nullable=False)
instance_uuid = Column(String(255)) instance_uuid = sa.Column(sa.String(255))
group_id = Column(Integer, ForeignKey('instance_groups.id'), group_id = sa.Column(
nullable=False) sa.Integer, sa.ForeignKey('instance_groups.id'), nullable=False)
class InstanceGroupPolicy(API_BASE): class InstanceGroupPolicy(API_BASE):
"""Represents the policy type for an instance group.""" """Represents the policy type for an instance group."""
__tablename__ = 'instance_group_policy' __tablename__ = 'instance_group_policy'
__table_args__ = ( __table_args__ = (
Index('instance_group_policy_policy_idx', 'policy'), sa.Index('instance_group_policy_policy_idx', 'policy'),
) )
id = Column(Integer, primary_key=True, nullable=False)
policy = Column(String(255)) id = sa.Column(sa.Integer, primary_key=True, nullable=False)
group_id = Column(Integer, ForeignKey('instance_groups.id'), policy = sa.Column(sa.String(255))
nullable=False) group_id = sa.Column(
rules = Column(Text) sa.Integer, sa.ForeignKey('instance_groups.id'), nullable=False)
rules = sa.Column(sa.Text)
class InstanceGroup(API_BASE): class InstanceGroup(API_BASE):
@ -448,15 +458,17 @@ class InstanceGroup(API_BASE):
schema.UniqueConstraint('uuid', name='uniq_instance_groups0uuid'), schema.UniqueConstraint('uuid', name='uniq_instance_groups0uuid'),
) )
id = Column(Integer, primary_key=True, autoincrement=True) id = sa.Column(sa.Integer, primary_key=True, autoincrement=True)
user_id = Column(String(255)) user_id = sa.Column(sa.String(255))
project_id = Column(String(255)) project_id = sa.Column(sa.String(255))
uuid = Column(String(36), nullable=False) uuid = sa.Column(sa.String(36), nullable=False)
name = Column(String(255)) name = sa.Column(sa.String(255))
_policies = orm.relationship(InstanceGroupPolicy, _policies = orm.relationship(
primaryjoin='InstanceGroup.id == InstanceGroupPolicy.group_id') InstanceGroupPolicy,
_members = orm.relationship(InstanceGroupMember, primaryjoin='InstanceGroup.id == InstanceGroupPolicy.group_id')
primaryjoin='InstanceGroup.id == InstanceGroupMember.group_id') _members = orm.relationship(
InstanceGroupMember,
primaryjoin='InstanceGroup.id == InstanceGroupMember.group_id')
@property @property
def policy(self): def policy(self):
@ -482,41 +494,43 @@ class Quota(API_BASE):
deployment is used. If the row is present but the hard limit is deployment is used. If the row is present but the hard limit is
Null, then the resource is unlimited. Null, then the resource is unlimited.
""" """
__tablename__ = 'quotas' __tablename__ = 'quotas'
__table_args__ = ( __table_args__ = (
schema.UniqueConstraint("project_id", "resource", schema.UniqueConstraint(
name="uniq_quotas0project_id0resource" "project_id",
"resource",
name="uniq_quotas0project_id0resource"
), ),
) )
id = Column(Integer, primary_key=True)
project_id = Column(String(255)) id = sa.Column(sa.Integer, primary_key=True)
project_id = sa.Column(sa.String(255))
resource = Column(String(255), nullable=False) resource = sa.Column(sa.String(255), nullable=False)
hard_limit = Column(Integer) hard_limit = sa.Column(sa.Integer)
class ProjectUserQuota(API_BASE): class ProjectUserQuota(API_BASE):
"""Represents a single quota override for a user with in a project.""" """Represents a single quota override for a user with in a project."""
__tablename__ = 'project_user_quotas' __tablename__ = 'project_user_quotas'
uniq_name = "uniq_project_user_quotas0user_id0project_id0resource"
__table_args__ = ( __table_args__ = (
schema.UniqueConstraint("user_id", "project_id", "resource", schema.UniqueConstraint(
name=uniq_name), "user_id",
Index('project_user_quotas_project_id_idx', "project_id",
'project_id'), "resource",
Index('project_user_quotas_user_id_idx', name="uniq_project_user_quotas0user_id0project_id0resource",
'user_id',) ),
sa.Index(
'project_user_quotas_project_id_idx', 'project_id'),
sa.Index(
'project_user_quotas_user_id_idx', 'user_id',)
) )
id = Column(Integer, primary_key=True, nullable=False)
project_id = Column(String(255), nullable=False) id = sa.Column(sa.Integer, primary_key=True, nullable=False)
user_id = Column(String(255), nullable=False) project_id = sa.Column(sa.String(255), nullable=False)
user_id = sa.Column(sa.String(255), nullable=False)
resource = Column(String(255), nullable=False) resource = sa.Column(sa.String(255), nullable=False)
hard_limit = Column(Integer) hard_limit = sa.Column(sa.Integer)
class QuotaClass(API_BASE): class QuotaClass(API_BASE):
@ -529,14 +543,14 @@ class QuotaClass(API_BASE):
__tablename__ = 'quota_classes' __tablename__ = 'quota_classes'
__table_args__ = ( __table_args__ = (
Index('quota_classes_class_name_idx', 'class_name'), sa.Index('quota_classes_class_name_idx', 'class_name'),
) )
id = Column(Integer, primary_key=True) id = sa.Column(sa.Integer, primary_key=True)
class_name = Column(String(255)) class_name = sa.Column(sa.String(255))
resource = Column(String(255)) resource = sa.Column(sa.String(255))
hard_limit = Column(Integer) hard_limit = sa.Column(sa.Integer)
class QuotaUsage(API_BASE): class QuotaUsage(API_BASE):
@ -544,23 +558,22 @@ class QuotaUsage(API_BASE):
__tablename__ = 'quota_usages' __tablename__ = 'quota_usages'
__table_args__ = ( __table_args__ = (
Index('quota_usages_project_id_idx', 'project_id'), sa.Index('quota_usages_project_id_idx', 'project_id'),
Index('quota_usages_user_id_idx', 'user_id'), sa.Index('quota_usages_user_id_idx', 'user_id'),
) )
id = Column(Integer, primary_key=True)
project_id = Column(String(255)) id = sa.Column(sa.Integer, primary_key=True)
user_id = Column(String(255)) project_id = sa.Column(sa.String(255))
resource = Column(String(255), nullable=False) user_id = sa.Column(sa.String(255))
resource = sa.Column(sa.String(255), nullable=False)
in_use = Column(Integer, nullable=False) in_use = sa.Column(sa.Integer, nullable=False)
reserved = Column(Integer, nullable=False) reserved = sa.Column(sa.Integer, nullable=False)
@property @property
def total(self): def total(self):
return self.in_use + self.reserved return self.in_use + self.reserved
until_refresh = Column(Integer) until_refresh = sa.Column(sa.Integer)
class Reservation(API_BASE): class Reservation(API_BASE):
@ -568,23 +581,21 @@ class Reservation(API_BASE):
__tablename__ = 'reservations' __tablename__ = 'reservations'
__table_args__ = ( __table_args__ = (
Index('reservations_project_id_idx', 'project_id'), sa.Index('reservations_project_id_idx', 'project_id'),
Index('reservations_uuid_idx', 'uuid'), sa.Index('reservations_uuid_idx', 'uuid'),
Index('reservations_expire_idx', 'expire'), sa.Index('reservations_expire_idx', 'expire'),
Index('reservations_user_id_idx', 'user_id'), sa.Index('reservations_user_id_idx', 'user_id'),
) )
id = Column(Integer, primary_key=True, nullable=False)
uuid = Column(String(36), nullable=False)
usage_id = Column(Integer, ForeignKey('quota_usages.id'), nullable=False)
project_id = Column(String(255))
user_id = Column(String(255))
resource = Column(String(255))
delta = Column(Integer, nullable=False)
expire = Column(DateTime)
id = sa.Column(sa.Integer, primary_key=True, nullable=False)
uuid = sa.Column(sa.String(36), nullable=False)
usage_id = sa.Column(
sa.Integer, sa.ForeignKey('quota_usages.id'), nullable=False)
project_id = sa.Column(sa.String(255))
user_id = sa.Column(sa.String(255))
resource = sa.Column(sa.String(255))
delta = sa.Column(sa.Integer, nullable=False)
expire = sa.Column(sa.DateTime)
usage = orm.relationship( usage = orm.relationship(
"QuotaUsage", "QuotaUsage",
foreign_keys=usage_id, foreign_keys=usage_id,
@ -599,8 +610,9 @@ class Trait(API_BASE):
schema.UniqueConstraint('name', name='uniq_traits0name'), schema.UniqueConstraint('name', name='uniq_traits0name'),
) )
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True) id = sa.Column(
name = Column(Unicode(255), nullable=False) sa.Integer, primary_key=True, nullable=False, autoincrement=True)
name = sa.Column(sa.Unicode(255), nullable=False)
# TODO(stephenfin): Remove this as it's now unused post-placement split # TODO(stephenfin): Remove this as it's now unused post-placement split
@ -609,16 +621,18 @@ class ResourceProviderTrait(API_BASE):
__tablename__ = "resource_provider_traits" __tablename__ = "resource_provider_traits"
__table_args__ = ( __table_args__ = (
Index('resource_provider_traits_resource_provider_trait_idx', sa.Index('resource_provider_traits_resource_provider_trait_idx',
'resource_provider_id', 'trait_id'), 'resource_provider_id', 'trait_id'),
) )
trait_id = Column(Integer, ForeignKey('traits.id'), primary_key=True, trait_id = sa.Column(
nullable=False) sa.Integer, sa.ForeignKey('traits.id'), primary_key=True,
resource_provider_id = Column(Integer, nullable=False)
ForeignKey('resource_providers.id'), resource_provider_id = sa.Column(
primary_key=True, sa.Integer,
nullable=False) sa.ForeignKey('resource_providers.id'),
primary_key=True,
nullable=False)
# TODO(stephenfin): Remove this as it's unused # TODO(stephenfin): Remove this as it's unused
@ -633,8 +647,9 @@ class Project(API_BASE):
), ),
) )
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True) id = sa.Column(
external_id = Column(String(255), nullable=False) sa.Integer, primary_key=True, nullable=False, autoincrement=True)
external_id = sa.Column(sa.String(255), nullable=False)
# TODO(stephenfin): Remove this as it's unused # TODO(stephenfin): Remove this as it's unused
@ -643,14 +658,12 @@ class User(API_BASE):
__tablename__ = 'users' __tablename__ = 'users'
__table_args__ = ( __table_args__ = (
schema.UniqueConstraint( schema.UniqueConstraint('external_id', name='uniq_users0external_id'),
'external_id',
name='uniq_users0external_id',
),
) )
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True) id = sa.Column(
external_id = Column(String(255), nullable=False) sa.Integer, primary_key=True, nullable=False, autoincrement=True)
external_id = sa.Column(sa.String(255), nullable=False)
# TODO(stephenfin): Remove this as it's unused # TODO(stephenfin): Remove this as it's unused
@ -659,16 +672,22 @@ class Consumer(API_BASE):
__tablename__ = 'consumers' __tablename__ = 'consumers'
__table_args__ = ( __table_args__ = (
Index('consumers_project_id_uuid_idx', 'project_id', 'uuid'), sa.Index('consumers_project_id_uuid_idx', 'project_id', 'uuid'),
Index('consumers_project_id_user_id_uuid_idx', 'project_id', 'user_id', sa.Index(
'uuid'), 'consumers_project_id_user_id_uuid_idx',
'project_id',
'user_id',
'uuid',
),
schema.UniqueConstraint('uuid', name='uniq_consumers0uuid'), schema.UniqueConstraint('uuid', name='uniq_consumers0uuid'),
) )
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True) id = sa.Column(
uuid = Column(String(36), nullable=False) sa.Integer, primary_key=True, nullable=False, autoincrement=True)
project_id = Column(Integer, nullable=False) uuid = sa.Column(sa.String(36), nullable=False)
user_id = Column(Integer, nullable=False) project_id = sa.Column(sa.Integer, nullable=False)
user_id = sa.Column(sa.Integer, nullable=False)
# FIXME(mriedem): Change this to server_default=text("0") to match the # FIXME(mriedem): Change this to server_default=text("0") to match the
# 059_add_consumer_generation script once bug 1776527 is fixed. # 059_add_consumer_generation script once bug 1776527 is fixed.
generation = Column(Integer, nullable=False, server_default="0", default=0) generation = sa.Column(
sa.Integer, nullable=False, server_default="0", default=0)

File diff suppressed because it is too large Load Diff

View File

@ -16,8 +16,7 @@ from oslo_db import exception as db_exc
from oslo_log import log as logging from oslo_log import log as logging
from oslo_utils import excutils from oslo_utils import excutils
from oslo_utils import uuidutils from oslo_utils import uuidutils
from sqlalchemy.orm import contains_eager from sqlalchemy import orm
from sqlalchemy.orm import joinedload
from nova.compute import utils as compute_utils from nova.compute import utils as compute_utils
from nova.db.sqlalchemy import api as db_api from nova.db.sqlalchemy import api as db_api
@ -36,8 +35,8 @@ DEPRECATED_FIELDS = ['deleted', 'deleted_at']
@db_api.api_context_manager.reader @db_api.api_context_manager.reader
def _aggregate_get_from_db(context, aggregate_id): def _aggregate_get_from_db(context, aggregate_id):
query = context.session.query(api_models.Aggregate).\ query = context.session.query(api_models.Aggregate).\
options(joinedload('_hosts')).\ options(orm.joinedload('_hosts')).\
options(joinedload('_metadata')) options(orm.joinedload('_metadata'))
query = query.filter(api_models.Aggregate.id == aggregate_id) query = query.filter(api_models.Aggregate.id == aggregate_id)
aggregate = query.first() aggregate = query.first()
@ -51,8 +50,8 @@ def _aggregate_get_from_db(context, aggregate_id):
@db_api.api_context_manager.reader @db_api.api_context_manager.reader
def _aggregate_get_from_db_by_uuid(context, aggregate_uuid): def _aggregate_get_from_db_by_uuid(context, aggregate_uuid):
query = context.session.query(api_models.Aggregate).\ query = context.session.query(api_models.Aggregate).\
options(joinedload('_hosts')).\ options(orm.joinedload('_hosts')).\
options(joinedload('_metadata')) options(orm.joinedload('_metadata'))
query = query.filter(api_models.Aggregate.uuid == aggregate_uuid) query = query.filter(api_models.Aggregate.uuid == aggregate_uuid)
aggregate = query.first() aggregate = query.first()
@ -415,8 +414,8 @@ class Aggregate(base.NovaPersistentObject, base.NovaObject):
@db_api.api_context_manager.reader @db_api.api_context_manager.reader
def _get_all_from_db(context): def _get_all_from_db(context):
query = context.session.query(api_models.Aggregate).\ query = context.session.query(api_models.Aggregate).\
options(joinedload('_hosts')).\ options(orm.joinedload('_hosts')).\
options(joinedload('_metadata')) options(orm.joinedload('_metadata'))
return query.all() return query.all()
@ -424,8 +423,8 @@ def _get_all_from_db(context):
@db_api.api_context_manager.reader @db_api.api_context_manager.reader
def _get_by_host_from_db(context, host, key=None): def _get_by_host_from_db(context, host, key=None):
query = context.session.query(api_models.Aggregate).\ query = context.session.query(api_models.Aggregate).\
options(joinedload('_hosts')).\ options(orm.joinedload('_hosts')).\
options(joinedload('_metadata')) options(orm.joinedload('_metadata'))
query = query.join('_hosts') query = query.join('_hosts')
query = query.filter(api_models.AggregateHost.host == host) query = query.filter(api_models.AggregateHost.host == host)
@ -445,8 +444,8 @@ def _get_by_metadata_from_db(context, key=None, value=None):
query = query.filter(api_models.AggregateMetadata.key == key) query = query.filter(api_models.AggregateMetadata.key == key)
if value is not None: if value is not None:
query = query.filter(api_models.AggregateMetadata.value == value) query = query.filter(api_models.AggregateMetadata.value == value)
query = query.options(contains_eager("_metadata")) query = query.options(orm.contains_eager("_metadata"))
query = query.options(joinedload("_hosts")) query = query.options(orm.joinedload("_hosts"))
return query.all() return query.all()
@ -477,8 +476,8 @@ def _get_non_matching_by_metadata_keys_from_db(context, ignored_keys,
query = query.filter(~api_models.AggregateMetadata.key.in_( query = query.filter(~api_models.AggregateMetadata.key.in_(
ignored_keys)) ignored_keys))
query = query.options(contains_eager("_metadata")) query = query.options(orm.contains_eager("_metadata"))
query = query.options(joinedload("_hosts")) query = query.options(orm.joinedload("_hosts"))
return query.all() return query.all()

View File

@ -14,9 +14,8 @@ from urllib import parse as urlparse
from oslo_log import log as logging from oslo_log import log as logging
from oslo_utils import versionutils from oslo_utils import versionutils
from sqlalchemy.sql.expression import asc from sqlalchemy import sql
from sqlalchemy.sql import false from sqlalchemy.sql import expression
from sqlalchemy.sql import true
import nova.conf import nova.conf
from nova.db.sqlalchemy import api as db_api from nova.db.sqlalchemy import api as db_api
@ -250,7 +249,7 @@ class CellMappingList(base.ObjectListBase, base.NovaObject):
@db_api.api_context_manager.reader @db_api.api_context_manager.reader
def _get_all_from_db(context): def _get_all_from_db(context):
return context.session.query(api_models.CellMapping).order_by( return context.session.query(api_models.CellMapping).order_by(
asc(api_models.CellMapping.id)).all() expression.asc(api_models.CellMapping.id)).all()
@base.remotable_classmethod @base.remotable_classmethod
def get_all(cls, context): def get_all(cls, context):
@ -261,12 +260,13 @@ class CellMappingList(base.ObjectListBase, base.NovaObject):
@db_api.api_context_manager.reader @db_api.api_context_manager.reader
def _get_by_disabled_from_db(context, disabled): def _get_by_disabled_from_db(context, disabled):
if disabled: if disabled:
return context.session.query(api_models.CellMapping).filter_by( return context.session.query(api_models.CellMapping)\
disabled=true()).order_by(asc(api_models.CellMapping.id)).all() .filter_by(disabled=sql.true())\
.order_by(expression.asc(api_models.CellMapping.id)).all()
else: else:
return context.session.query(api_models.CellMapping).filter_by( return context.session.query(api_models.CellMapping)\
disabled=false()).order_by(asc( .filter_by(disabled=sql.false())\
api_models.CellMapping.id)).all() .order_by(expression.asc(api_models.CellMapping.id)).all()
@base.remotable_classmethod @base.remotable_classmethod
def get_by_disabled(cls, context, disabled): def get_by_disabled(cls, context, disabled):

View File

@ -12,12 +12,11 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
from oslo_serialization import jsonutils from oslo_serialization import jsonutils
from oslo_utils import uuidutils from oslo_utils import uuidutils
from oslo_utils import versionutils from oslo_utils import versionutils
from sqlalchemy import or_ import sqlalchemy as sa
from sqlalchemy.sql import null from sqlalchemy import sql
import nova.conf import nova.conf
from nova.db import api as db from nova.db import api as db
@ -500,13 +499,13 @@ def _get_node_empty_ratio(context, max_count):
Results are limited by ``max_count``. Results are limited by ``max_count``.
""" """
return context.session.query(models.ComputeNode).filter(or_( return context.session.query(models.ComputeNode).filter(sa.or_(
models.ComputeNode.ram_allocation_ratio == '0.0', models.ComputeNode.ram_allocation_ratio == '0.0',
models.ComputeNode.cpu_allocation_ratio == '0.0', models.ComputeNode.cpu_allocation_ratio == '0.0',
models.ComputeNode.disk_allocation_ratio == '0.0', models.ComputeNode.disk_allocation_ratio == '0.0',
models.ComputeNode.ram_allocation_ratio == null(), models.ComputeNode.ram_allocation_ratio == sql.null(),
models.ComputeNode.cpu_allocation_ratio == null(), models.ComputeNode.cpu_allocation_ratio == sql.null(),
models.ComputeNode.disk_allocation_ratio == null() models.ComputeNode.disk_allocation_ratio == sql.null()
)).filter(models.ComputeNode.deleted == 0).limit(max_count).all() )).filter(models.ComputeNode.deleted == 0).limit(max_count).all()

View File

@ -15,10 +15,10 @@
from oslo_db import exception as db_exc from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import utils as sqlalchemyutils from oslo_db.sqlalchemy import utils as sqlalchemyutils
from oslo_utils import versionutils from oslo_utils import versionutils
from sqlalchemy import or_ import sqlalchemy as sa
from sqlalchemy.orm import joinedload from sqlalchemy import orm
from sqlalchemy.sql.expression import asc from sqlalchemy import sql
from sqlalchemy.sql import true from sqlalchemy.sql import expression
import nova.conf import nova.conf
from nova.db.sqlalchemy import api as db_api from nova.db.sqlalchemy import api as db_api
@ -55,9 +55,9 @@ def _dict_with_extra_specs(flavor_model):
@db_api.api_context_manager.reader @db_api.api_context_manager.reader
def _get_projects_from_db(context, flavorid): def _get_projects_from_db(context, flavorid):
db_flavor = context.session.query(api_models.Flavors).\ db_flavor = context.session.query(api_models.Flavors).\
filter_by(flavorid=flavorid).\ filter_by(flavorid=flavorid).\
options(joinedload('projects')).\ options(orm.joinedload('projects')).\
first() first()
if not db_flavor: if not db_flavor:
raise exception.FlavorNotFound(flavor_id=flavorid) raise exception.FlavorNotFound(flavor_id=flavorid)
return [x['project_id'] for x in db_flavor['projects']] return [x['project_id'] for x in db_flavor['projects']]
@ -272,13 +272,13 @@ class Flavor(base.NovaPersistentObject, base.NovaObject,
@db_api.api_context_manager.reader @db_api.api_context_manager.reader
def _flavor_get_query_from_db(context): def _flavor_get_query_from_db(context):
query = context.session.query(api_models.Flavors).\ query = context.session.query(api_models.Flavors).\
options(joinedload('extra_specs')) options(orm.joinedload('extra_specs'))
if not context.is_admin: if not context.is_admin:
the_filter = [api_models.Flavors.is_public == true()] the_filter = [api_models.Flavors.is_public == sql.true()]
the_filter.extend([ the_filter.extend([
api_models.Flavors.projects.any(project_id=context.project_id) api_models.Flavors.projects.any(project_id=context.project_id)
]) ])
query = query.filter(or_(*the_filter)) query = query.filter(sa.or_(*the_filter))
return query return query
@staticmethod @staticmethod
@ -309,7 +309,7 @@ class Flavor(base.NovaPersistentObject, base.NovaObject,
"""Returns a dict describing specific flavor_id.""" """Returns a dict describing specific flavor_id."""
result = Flavor._flavor_get_query_from_db(context).\ result = Flavor._flavor_get_query_from_db(context).\
filter_by(flavorid=flavor_id).\ filter_by(flavorid=flavor_id).\
order_by(asc(api_models.Flavors.id)).\ order_by(expression.asc(api_models.Flavors.id)).\
first() first()
if not result: if not result:
raise exception.FlavorNotFound(flavor_id=flavor_id) raise exception.FlavorNotFound(flavor_id=flavor_id)
@ -609,7 +609,7 @@ def _flavor_get_all_from_db(context, inactive, filters, sort_key, sort_dir,
the_filter.extend([api_models.Flavors.projects.any( the_filter.extend([api_models.Flavors.projects.any(
project_id=context.project_id)]) project_id=context.project_id)])
if len(the_filter) > 1: if len(the_filter) > 1:
query = query.filter(or_(*the_filter)) query = query.filter(sa.or_(*the_filter))
else: else:
query = query.filter(the_filter[0]) query = query.filter(the_filter[0])
marker_row = None marker_row = None

View File

@ -11,7 +11,7 @@
# under the License. # under the License.
from oslo_db import exception as db_exc from oslo_db import exception as db_exc
from sqlalchemy.orm import joinedload from sqlalchemy import orm
from nova import context from nova import context
from nova.db.sqlalchemy import api as db_api from nova.db.sqlalchemy import api as db_api
@ -89,9 +89,9 @@ class HostMapping(base.NovaTimestampObject, base.NovaObject):
@staticmethod @staticmethod
@db_api.api_context_manager.reader @db_api.api_context_manager.reader
def _get_by_host_from_db(context, host): def _get_by_host_from_db(context, host):
db_mapping = (context.session.query(api_models.HostMapping) db_mapping = context.session.query(api_models.HostMapping)\
.options(joinedload('cell_mapping')) .options(orm.joinedload('cell_mapping'))\
.filter(api_models.HostMapping.host == host)).first() .filter(api_models.HostMapping.host == host).first()
if not db_mapping: if not db_mapping:
raise exception.HostMappingNotFound(name=host) raise exception.HostMappingNotFound(name=host)
return db_mapping return db_mapping
@ -160,7 +160,7 @@ class HostMappingList(base.ObjectListBase, base.NovaObject):
@db_api.api_context_manager.reader @db_api.api_context_manager.reader
def _get_from_db(context, cell_id=None): def _get_from_db(context, cell_id=None):
query = (context.session.query(api_models.HostMapping) query = (context.session.query(api_models.HostMapping)
.options(joinedload('cell_mapping'))) .options(orm.joinedload('cell_mapping')))
if cell_id: if cell_id:
query = query.filter(api_models.HostMapping.cell_id == cell_id) query = query.filter(api_models.HostMapping.cell_id == cell_id)
return query.all() return query.all()

View File

@ -20,10 +20,9 @@ from oslo_log import log as logging
from oslo_serialization import jsonutils from oslo_serialization import jsonutils
from oslo_utils import timeutils from oslo_utils import timeutils
from oslo_utils import versionutils from oslo_utils import versionutils
from sqlalchemy import or_ import sqlalchemy as sa
from sqlalchemy.sql import false from sqlalchemy import sql
from sqlalchemy.sql import func from sqlalchemy.sql import func
from sqlalchemy.sql import null
from nova import availability_zones as avail_zone from nova import availability_zones as avail_zone
from nova.compute import task_states from nova.compute import task_states
@ -1516,9 +1515,9 @@ class InstanceList(base.ObjectListBase, base.NovaObject):
# NOTE(melwitt): Copied from nova/db/sqlalchemy/api.py: # NOTE(melwitt): Copied from nova/db/sqlalchemy/api.py:
# It would be better to have vm_state not be nullable # It would be better to have vm_state not be nullable
# but until then we test it explicitly as a workaround. # but until then we test it explicitly as a workaround.
not_soft_deleted = or_( not_soft_deleted = sa.or_(
models.Instance.vm_state != vm_states.SOFT_DELETED, models.Instance.vm_state != vm_states.SOFT_DELETED,
models.Instance.vm_state == null() models.Instance.vm_state == sql.null()
) )
project_query = context.session.query( project_query = context.session.query(
func.count(models.Instance.id), func.count(models.Instance.id),
@ -1531,8 +1530,10 @@ class InstanceList(base.ObjectListBase, base.NovaObject):
# non-hidden version of the instance in another cell database and the # non-hidden version of the instance in another cell database and the
# API will only show one of them, so we don't count the hidden copy. # API will only show one of them, so we don't count the hidden copy.
project_query = project_query.filter( project_query = project_query.filter(
or_(models.Instance.hidden == false(), sa.or_(
models.Instance.hidden == null())) models.Instance.hidden == sql.false(),
models.Instance.hidden == sql.null(),
))
project_result = project_query.first() project_result = project_query.first()
fields = ('instances', 'cores', 'ram') fields = ('instances', 'cores', 'ram')

View File

@ -19,8 +19,7 @@ from oslo_log import log as logging
from oslo_serialization import jsonutils from oslo_serialization import jsonutils
from oslo_utils import uuidutils from oslo_utils import uuidutils
from oslo_utils import versionutils from oslo_utils import versionutils
from sqlalchemy.orm import contains_eager from sqlalchemy import orm
from sqlalchemy.orm import joinedload
from nova.compute import utils as compute_utils from nova.compute import utils as compute_utils
from nova.db.sqlalchemy import api as db_api from nova.db.sqlalchemy import api as db_api
@ -37,8 +36,8 @@ LOG = logging.getLogger(__name__)
def _instance_group_get_query(context, id_field=None, id=None): def _instance_group_get_query(context, id_field=None, id=None):
query = context.session.query(api_models.InstanceGroup).\ query = context.session.query(api_models.InstanceGroup).\
options(joinedload('_policies')).\ options(orm.joinedload('_policies')).\
options(joinedload('_members')) options(orm.joinedload('_members'))
if not context.is_admin: if not context.is_admin:
query = query.filter_by(project_id=context.project_id) query = query.filter_by(project_id=context.project_id)
if id and id_field: if id and id_field:
@ -89,7 +88,7 @@ def _instance_group_members_add_by_uuid(context, group_uuid, members):
outerjoin(api_models.InstanceGroupMember, outerjoin(api_models.InstanceGroupMember,
api_models.InstanceGroupMember.instance_uuid.in_(set(members))).\ api_models.InstanceGroupMember.instance_uuid.in_(set(members))).\
filter(api_models.InstanceGroup.uuid == group_uuid).\ filter(api_models.InstanceGroup.uuid == group_uuid).\
options(contains_eager('_members')).first() options(orm.contains_eager('_members')).first()
if not group: if not group:
raise exception.InstanceGroupNotFound(group_uuid=group_uuid) raise exception.InstanceGroupNotFound(group_uuid=group_uuid)
return _instance_group_model_add(context, api_models.InstanceGroupMember, return _instance_group_model_add(context, api_models.InstanceGroupMember,

View File

@ -14,11 +14,10 @@ import collections
from oslo_log import log as logging from oslo_log import log as logging
from oslo_utils import versionutils from oslo_utils import versionutils
from sqlalchemy import orm
from sqlalchemy.orm import exc as orm_exc from sqlalchemy.orm import exc as orm_exc
from sqlalchemy.orm import joinedload from sqlalchemy import sql
from sqlalchemy.sql import false
from sqlalchemy.sql import func from sqlalchemy.sql import func
from sqlalchemy.sql import or_
from nova import context as nova_context from nova import context as nova_context
from nova.db.sqlalchemy import api as db_api from nova.db.sqlalchemy import api as db_api
@ -99,11 +98,10 @@ class InstanceMapping(base.NovaTimestampObject, base.NovaObject):
@staticmethod @staticmethod
@db_api.api_context_manager.reader @db_api.api_context_manager.reader
def _get_by_instance_uuid_from_db(context, instance_uuid): def _get_by_instance_uuid_from_db(context, instance_uuid):
db_mapping = (context.session.query(api_models.InstanceMapping) db_mapping = context.session.query(api_models.InstanceMapping)\
.options(joinedload('cell_mapping')) .options(orm.joinedload('cell_mapping'))\
.filter( .filter(api_models.InstanceMapping.instance_uuid == instance_uuid)\
api_models.InstanceMapping.instance_uuid == .first()
instance_uuid)).first()
if not db_mapping: if not db_mapping:
raise exception.InstanceMappingNotFound(uuid=instance_uuid) raise exception.InstanceMappingNotFound(uuid=instance_uuid)
@ -313,10 +311,9 @@ class InstanceMappingList(base.ObjectListBase, base.NovaObject):
@staticmethod @staticmethod
@db_api.api_context_manager.reader @db_api.api_context_manager.reader
def _get_by_project_id_from_db(context, project_id): def _get_by_project_id_from_db(context, project_id):
return (context.session.query(api_models.InstanceMapping) return context.session.query(api_models.InstanceMapping)\
.options(joinedload('cell_mapping')) .options(orm.joinedload('cell_mapping'))\
.filter( .filter(api_models.InstanceMapping.project_id == project_id).all()
api_models.InstanceMapping.project_id == project_id)).all()
@base.remotable_classmethod @base.remotable_classmethod
def get_by_project_id(cls, context, project_id): def get_by_project_id(cls, context, project_id):
@ -328,9 +325,9 @@ class InstanceMappingList(base.ObjectListBase, base.NovaObject):
@staticmethod @staticmethod
@db_api.api_context_manager.reader @db_api.api_context_manager.reader
def _get_by_cell_id_from_db(context, cell_id): def _get_by_cell_id_from_db(context, cell_id):
return (context.session.query(api_models.InstanceMapping) return context.session.query(api_models.InstanceMapping)\
.options(joinedload('cell_mapping')) .options(orm.joinedload('cell_mapping'))\
.filter(api_models.InstanceMapping.cell_id == cell_id)).all() .filter(api_models.InstanceMapping.cell_id == cell_id).all()
@base.remotable_classmethod @base.remotable_classmethod
def get_by_cell_id(cls, context, cell_id): def get_by_cell_id(cls, context, cell_id):
@ -341,10 +338,10 @@ class InstanceMappingList(base.ObjectListBase, base.NovaObject):
@staticmethod @staticmethod
@db_api.api_context_manager.reader @db_api.api_context_manager.reader
def _get_by_instance_uuids_from_db(context, uuids): def _get_by_instance_uuids_from_db(context, uuids):
return (context.session.query(api_models.InstanceMapping) return context.session.query(api_models.InstanceMapping)\
.options(joinedload('cell_mapping')) .options(orm.joinedload('cell_mapping'))\
.filter(api_models.InstanceMapping.instance_uuid.in_(uuids)) .filter(api_models.InstanceMapping.instance_uuid.in_(uuids))\
.all()) .all()
@base.remotable_classmethod @base.remotable_classmethod
def get_by_instance_uuids(cls, context, uuids): def get_by_instance_uuids(cls, context, uuids):
@ -376,11 +373,11 @@ class InstanceMappingList(base.ObjectListBase, base.NovaObject):
# queued_for_delete was not run) and False (cases when the online # queued_for_delete was not run) and False (cases when the online
# data migration for queued_for_delete was run) are assumed to mean # data migration for queued_for_delete was run) are assumed to mean
# that the instance is not queued for deletion. # that the instance is not queued for deletion.
query = (query.filter(or_( query = (query.filter(sql.or_(
api_models.InstanceMapping.queued_for_delete == false(), api_models.InstanceMapping.queued_for_delete == sql.false(),
api_models.InstanceMapping.queued_for_delete.is_(None))) api_models.InstanceMapping.queued_for_delete.is_(None)))
.join('cell_mapping') .join('cell_mapping')
.options(joinedload('cell_mapping')) .options(orm.joinedload('cell_mapping'))
.filter(api_models.CellMapping.uuid == cell_uuid)) .filter(api_models.CellMapping.uuid == cell_uuid))
if limit is not None: if limit is not None:
query = query.limit(limit) query = query.limit(limit)

View File

@ -20,10 +20,7 @@ import copy
from oslo_log import log as logging from oslo_log import log as logging
from oslo_utils import importutils from oslo_utils import importutils
from sqlalchemy.sql import and_ from sqlalchemy import sql
from sqlalchemy.sql import false
from sqlalchemy.sql import null
from sqlalchemy.sql import or_
import nova.conf import nova.conf
from nova import context as nova_context from nova import context as nova_context
@ -1077,13 +1074,13 @@ def _user_id_queued_for_delete_populated(context, project_id=None):
:returns: True if user_id is set for all non-deleted instances and :returns: True if user_id is set for all non-deleted instances and
queued_for_delete is set for all instances, else False queued_for_delete is set for all instances, else False
""" """
user_id_not_populated = and_( user_id_not_populated = sql.and_(
api_models.InstanceMapping.user_id == null(), api_models.InstanceMapping.user_id == sql.null(),
api_models.InstanceMapping.queued_for_delete == false()) api_models.InstanceMapping.queued_for_delete == sql.false())
# If either queued_for_delete or user_id are unmigrated, we will return # If either queued_for_delete or user_id are unmigrated, we will return
# False. # False.
unmigrated_filter = or_( unmigrated_filter = sql.or_(
api_models.InstanceMapping.queued_for_delete == null(), api_models.InstanceMapping.queued_for_delete == sql.null(),
user_id_not_populated) user_id_not_populated)
query = context.session.query(api_models.InstanceMapping).filter( query = context.session.query(api_models.InstanceMapping).filter(
unmigrated_filter) unmigrated_filter)

View File

@ -18,9 +18,8 @@ import re
from dateutil import parser as dateutil_parser from dateutil import parser as dateutil_parser
from oslo_utils import fixture as osloutils_fixture from oslo_utils import fixture as osloutils_fixture
from oslo_utils import timeutils from oslo_utils import timeutils
import sqlalchemy as sa
from sqlalchemy import func from sqlalchemy import func
from sqlalchemy import MetaData
from sqlalchemy import select
from nova import context from nova import context
from nova.db import api as db from nova.db import api as db
@ -179,13 +178,13 @@ class TestDatabaseArchive(integrated_helpers._IntegratedTestBase):
def _get_table_counts(self): def _get_table_counts(self):
engine = sqlalchemy_api.get_engine() engine = sqlalchemy_api.get_engine()
conn = engine.connect() conn = engine.connect()
meta = MetaData(engine) meta = sa.MetaData(engine)
meta.reflect() meta.reflect()
shadow_tables = sqlalchemy_api._purgeable_tables(meta) shadow_tables = sqlalchemy_api._purgeable_tables(meta)
results = {} results = {}
for table in shadow_tables: for table in shadow_tables:
r = conn.execute( r = conn.execute(
select([func.count()]).select_from(table)).fetchone() sa.select([func.count()]).select_from(table)).fetchone()
results[table.name] = r[0] results[table.name] = r[0]
return results return results

View File

@ -36,16 +36,12 @@ from oslo_utils import fixture as utils_fixture
from oslo_utils.fixture import uuidsentinel from oslo_utils.fixture import uuidsentinel
from oslo_utils import timeutils from oslo_utils import timeutils
from oslo_utils import uuidutils from oslo_utils import uuidutils
from sqlalchemy import Column import sqlalchemy as sa
from sqlalchemy.exc import OperationalError from sqlalchemy import exc as sqla_exc
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy import inspect from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy.orm import query from sqlalchemy.orm import query
from sqlalchemy.orm import session as sqla_session from sqlalchemy.orm import session as sqla_session
from sqlalchemy import sql from sqlalchemy import sql
from sqlalchemy import Table
from nova import block_device from nova import block_device
from nova.compute import rpcapi as compute_rpcapi from nova.compute import rpcapi as compute_rpcapi
@ -173,7 +169,7 @@ class DbTestCase(test.TestCase):
class HelperTestCase(test.TestCase): class HelperTestCase(test.TestCase):
@mock.patch.object(sqlalchemy_api, 'joinedload') @mock.patch('sqlalchemy.orm.joinedload')
def test_joinedload_helper(self, mock_jl): def test_joinedload_helper(self, mock_jl):
query = sqlalchemy_api._joinedload_all('foo.bar.baz') query = sqlalchemy_api._joinedload_all('foo.bar.baz')
@ -190,7 +186,7 @@ class HelperTestCase(test.TestCase):
self.assertEqual(column3.joinedload.return_value, query) self.assertEqual(column3.joinedload.return_value, query)
@mock.patch.object(sqlalchemy_api, 'joinedload') @mock.patch('sqlalchemy.orm.joinedload')
def test_joinedload_helper_single(self, mock_jl): def test_joinedload_helper_single(self, mock_jl):
query = sqlalchemy_api._joinedload_all('foo') query = sqlalchemy_api._joinedload_all('foo')
@ -1757,8 +1753,8 @@ class InstanceTestCase(test.TestCase, ModelsObjectComparatorMixin):
instances = db.instance_get_all_by_filters_sort(self.ctxt, filters) instances = db.instance_get_all_by_filters_sort(self.ctxt, filters)
self.assertEqual([], instances) self.assertEqual([], instances)
@mock.patch('nova.db.sqlalchemy.api.undefer') @mock.patch('sqlalchemy.orm.undefer')
@mock.patch('nova.db.sqlalchemy.api.joinedload') @mock.patch('sqlalchemy.orm.joinedload')
def test_instance_get_all_by_filters_extra_columns(self, def test_instance_get_all_by_filters_extra_columns(self,
mock_joinedload, mock_joinedload,
mock_undefer): mock_undefer):
@ -1768,8 +1764,8 @@ class InstanceTestCase(test.TestCase, ModelsObjectComparatorMixin):
mock_joinedload.assert_called_once_with('info_cache') mock_joinedload.assert_called_once_with('info_cache')
mock_undefer.assert_called_once_with('extra.pci_requests') mock_undefer.assert_called_once_with('extra.pci_requests')
@mock.patch('nova.db.sqlalchemy.api.undefer') @mock.patch('sqlalchemy.orm.undefer')
@mock.patch('nova.db.sqlalchemy.api.joinedload') @mock.patch('sqlalchemy.orm.joinedload')
def test_instance_get_active_by_window_extra_columns(self, def test_instance_get_active_by_window_extra_columns(self,
mock_joinedload, mock_joinedload,
mock_undefer): mock_undefer):
@ -2942,8 +2938,9 @@ class InstanceExtraTestCase(test.TestCase):
self.ctxt, self.instance['uuid'], self.ctxt, self.instance['uuid'],
columns=['numa_topology', 'vcpu_model', 'trusted_certs', columns=['numa_topology', 'vcpu_model', 'trusted_certs',
'resources']) 'resources'])
self.assertRaises(SQLAlchemyError, self.assertRaises(
extra.__getitem__, 'pci_requests') sqla_exc.SQLAlchemyError,
extra.__getitem__, 'pci_requests')
self.assertIn('numa_topology', extra) self.assertIn('numa_topology', extra)
self.assertIn('vcpu_model', extra) self.assertIn('vcpu_model', extra)
self.assertIn('trusted_certs', extra) self.assertIn('trusted_certs', extra)
@ -5899,7 +5896,7 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self): def setUp(self):
super(ArchiveTestCase, self).setUp() super(ArchiveTestCase, self).setUp()
self.engine = get_engine() self.engine = get_engine()
self.metadata = MetaData(self.engine) self.metadata = sa.MetaData(self.engine)
self.conn = self.engine.connect() self.conn = self.engine.connect()
self.instance_id_mappings = models.InstanceIdMapping.__table__ self.instance_id_mappings = models.InstanceIdMapping.__table__
self.shadow_instance_id_mappings = sqlalchemyutils.get_table( self.shadow_instance_id_mappings = sqlalchemyutils.get_table(
@ -5931,7 +5928,7 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
except for specificially named exceptions, are empty. This except for specificially named exceptions, are empty. This
makes sure that archiving isn't moving unexpected content. makes sure that archiving isn't moving unexpected content.
""" """
metadata = MetaData(bind=self.engine) metadata = sa.MetaData(bind=self.engine)
metadata.reflect() metadata.reflect()
for table in metadata.tables: for table in metadata.tables:
if table.startswith("shadow_") and table not in exceptions: if table.startswith("shadow_") and table not in exceptions:
@ -5943,7 +5940,7 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
Shadow tables should have an identical schema to the main table. Shadow tables should have an identical schema to the main table.
""" """
metadata = MetaData(bind=self.engine) metadata = sa.MetaData(bind=self.engine)
metadata.reflect() metadata.reflect()
for table_name in metadata.tables: for table_name in metadata.tables:
# some tables don't have shadow tables so skip these # some tables don't have shadow tables so skip these
@ -5961,8 +5958,8 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
shadow_table_name = f'shadow_{table_name}' shadow_table_name = f'shadow_{table_name}'
table = Table(table_name, metadata, autoload=True) table = sa.Table(table_name, metadata, autoload=True)
shadow_table = Table(shadow_table_name, metadata, autoload=True) shadow_table = sa.Table(shadow_table_name, metadata, autoload=True)
columns = {c.name: c for c in table.columns} columns = {c.name: c for c in table.columns}
shadow_columns = {c.name: c for c in shadow_table.columns} shadow_columns = {c.name: c for c in shadow_table.columns}
@ -6159,7 +6156,7 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
ins_stmt = main_table.insert().values(uuid=uuidstr) ins_stmt = main_table.insert().values(uuid=uuidstr)
try: try:
self.conn.execute(ins_stmt) self.conn.execute(ins_stmt)
except (db_exc.DBError, OperationalError): except (db_exc.DBError, sqla_exc.OperationalError):
# This table has constraints that require a table-specific # This table has constraints that require a table-specific
# insert, so skip it. # insert, so skip it.
return 2 return 2
@ -6640,12 +6637,12 @@ class TestSqlalchemyTypesRepr(
super(TestSqlalchemyTypesRepr, self).setUp() super(TestSqlalchemyTypesRepr, self).setUp()
self.engine = enginefacade.writer.get_engine() self.engine = enginefacade.writer.get_engine()
meta = MetaData(bind=self.engine) meta = sa.MetaData(bind=self.engine)
self.table = Table( self.table = sa.Table(
'cidr_tbl', 'cidr_tbl',
meta, meta,
Column('id', Integer, primary_key=True), sa.Column('id', sa.Integer, primary_key=True),
Column('addr', col_types.CIDR()) sa.Column('addr', col_types.CIDR())
) )
self.table.create() self.table.create()
self.addCleanup(meta.drop_all) self.addCleanup(meta.drop_all)