Merge "Provide creating real unique constraints for columns"

This commit is contained in:
Jenkins 2013-01-24 00:54:25 +00:00 committed by Gerrit Code Review
commit 2e4154e120
6 changed files with 409 additions and 79 deletions

View File

@ -406,6 +406,8 @@ class XMLDictSerializer(DictSerializer):
if k in attrs:
result.setAttribute(k, str(v))
else:
if k == "deleted":
v = str(bool(v))
node = self._to_xml_node(doc, metadata, k, v)
result.appendChild(node)
else:

View File

@ -172,27 +172,43 @@ def model_query(context, model, *args, **kwargs):
:param project_only: if present and context is user-type, then restrict
query to match the context's project_id. If set to 'allow_none',
restriction includes project_id = None.
:param base_model: Where model_query is passed a "model" parameter which is
not a subclass of NovaBase, we should pass an extra base_model
parameter that is a subclass of NovaBase and corresponds to the
model parameter.
"""
session = kwargs.get('session') or get_session()
read_deleted = kwargs.get('read_deleted') or context.read_deleted
project_only = kwargs.get('project_only', False)
def issubclassof_nova_base(obj):
return isinstance(obj, type) and issubclass(obj, models.NovaBase)
base_model = model
if not issubclassof_nova_base(base_model):
base_model = kwargs.get('base_model', None)
if not issubclassof_nova_base(base_model):
raise Exception(_("model or base_model parameter should be "
"subclass of NovaBase"))
query = session.query(model, *args)
default_deleted_value = base_model.__mapper__.c.deleted.default.arg
if read_deleted == 'no':
query = query.filter_by(deleted=False)
query = query.filter(base_model.deleted == default_deleted_value)
elif read_deleted == 'yes':
pass # omit the filter to include deleted and active
elif read_deleted == 'only':
query = query.filter_by(deleted=True)
query = query.filter(base_model.deleted != default_deleted_value)
else:
raise Exception(
_("Unrecognized read_deleted value '%s'") % read_deleted)
raise Exception(_("Unrecognized read_deleted value '%s'")
% read_deleted)
if is_user_context(context) and project_only:
if project_only == 'allow_none':
query = query.filter(or_(model.project_id == context.project_id,
model.project_id == None))
query = query.\
filter(or_(base_model.project_id == context.project_id,
base_model.project_id == None))
else:
query = query.filter_by(project_id=context.project_id)
@ -408,7 +424,8 @@ def service_get_all_compute_sorted(context):
label = 'instance_cores'
subq = model_query(context, models.Instance.host,
func.sum(models.Instance.vcpus).label(label),
session=session, read_deleted="no").\
base_model=models.Instance, session=session,
read_deleted="no").\
group_by(models.Instance.host).\
subquery()
return _service_get_all_topic_subquery(context,
@ -540,7 +557,7 @@ def _update_stats(context, new_stats, compute_id, session, prune_stats=False):
# prune un-touched old stats:
for stat in statmap.values():
session.add(stat)
stat.update({'deleted': True})
stat.soft_delete(session=session)
# add new and updated stats
for stat in stats:
@ -563,10 +580,9 @@ def compute_node_update(context, compute_id, values, prune_stats=False):
def compute_node_get_by_host(context, host):
"""Get all capacity entries for the given host."""
result = model_query(context, models.ComputeNode).\
result = model_query(context, models.ComputeNode, read_deleted="no").\
join('service').\
filter(models.Service.host == host).\
filter_by(deleted=False).\
first()
return result
@ -586,6 +602,7 @@ def compute_node_statistics(context):
func.sum(models.ComputeNode.current_workload),
func.sum(models.ComputeNode.running_vms),
func.sum(models.ComputeNode.disk_available_least),
base_model=models.ComputeNode,
read_deleted="no").first()
# Build a dict of the info--making no assumptions about result
@ -660,7 +677,8 @@ def floating_ip_get(context, id):
@require_context
def floating_ip_get_pools(context):
pools = []
for result in model_query(context, models.FloatingIp.pool).distinct():
for result in model_query(context, models.FloatingIp.pool,
base_model=models.FloatingIp).distinct():
pools.append({'name': result[0]})
return pools
@ -1094,30 +1112,31 @@ def fixed_ip_disassociate_all_by_timeout(context, host, time):
# host; i.e. the network host or the instance
# host matches. Two queries necessary because
# join with update doesn't work.
host_filter = or_(and_(models.Instance.host == host,
models.Network.multi_host == True),
models.Network.host == host)
result = session.query(models.FixedIp.id).\
filter(models.FixedIp.deleted == False).\
filter(models.FixedIp.allocated == False).\
filter(models.FixedIp.updated_at < time).\
join((models.Network,
models.Network.id == models.FixedIp.network_id)).\
join((models.Instance,
models.Instance.uuid ==
models.FixedIp.instance_uuid)).\
filter(host_filter).\
all()
fixed_ip_ids = [fip[0] for fip in result]
if not fixed_ip_ids:
return 0
result = model_query(context, models.FixedIp, session=session).\
filter(models.FixedIp.id.in_(fixed_ip_ids)).\
update({'instance_uuid': None,
'leased': False,
'updated_at': timeutils.utcnow()},
synchronize_session='fetch')
return result
with session.begin():
host_filter = or_(and_(models.Instance.host == host,
models.Network.multi_host == True),
models.Network.host == host)
result = model_query(context, models.FixedIp.id,
base_model=models.FixedIp, read_deleted="no",
session=session).\
filter(models.FixedIp.allocated == False).\
filter(models.FixedIp.updated_at < time).\
join((models.Network,
models.Network.id == models.FixedIp.network_id)).\
join((models.Instance,
models.Instance.uuid == models.FixedIp.instance_uuid)).\
filter(host_filter).\
all()
fixed_ip_ids = [fip[0] for fip in result]
if not fixed_ip_ids:
return 0
result = model_query(context, models.FixedIp, session=session).\
filter(models.FixedIp.id.in_(fixed_ip_ids)).\
update({'instance_uuid': None,
'leased': False,
'updated_at': timeutils.utcnow()},
synchronize_session='fetch')
return result
@require_context
@ -1468,7 +1487,7 @@ def instance_data_get_for_project(context, project_id, session=None):
func.count(models.Instance.id),
func.sum(models.Instance.vcpus),
func.sum(models.Instance.memory_mb),
read_deleted="no",
base_model=models.Instance,
session=session).\
filter_by(project_id=project_id).\
first()
@ -1593,12 +1612,12 @@ def instance_get_all_by_filters(context, filters, sort_key, sort_dir,
# Instances can be soft or hard deleted and the query needs to
# include or exclude both
if filters.pop('deleted'):
deleted = or_(models.Instance.deleted == True,
deleted = or_(models.Instance.deleted == models.Instance.id,
models.Instance.vm_state == vm_states.SOFT_DELETED)
query_prefix = query_prefix.filter(deleted)
else:
query_prefix = query_prefix.\
filter_by(deleted=False).\
filter_by(deleted=0).\
filter(models.Instance.vm_state != vm_states.SOFT_DELETED)
if not context.is_admin:
@ -2122,19 +2141,21 @@ def network_create_safe(context, values):
def network_delete_safe(context, network_id):
session = get_session()
with session.begin():
result = session.query(models.FixedIp).\
result = model_query(context, models.FixedIp, session=session,
read_deleted="no").\
filter_by(network_id=network_id).\
filter_by(deleted=False).\
filter_by(allocated=True).\
count()
if result != 0:
raise exception.NetworkInUse(network_id=network_id)
network_ref = network_get(context, network_id=network_id,
session=session)
session.query(models.FixedIp).\
model_query(context, models.FixedIp, session=session,
read_deleted="no").\
filter_by(network_id=network_id).\
filter_by(deleted=False).\
soft_delete()
session.delete(network_ref)
@ -2213,9 +2234,9 @@ def network_get_associated_fixed_ips(context, network_id, host=None):
# without regenerating the whole list
vif_and = and_(models.VirtualInterface.id ==
models.FixedIp.virtual_interface_id,
models.VirtualInterface.deleted == False)
models.VirtualInterface.deleted == 0)
inst_and = and_(models.Instance.uuid == models.FixedIp.instance_uuid,
models.Instance.deleted == False)
models.Instance.deleted == 0)
session = get_session()
query = session.query(models.FixedIp.address,
models.FixedIp.instance_uuid,
@ -2225,7 +2246,7 @@ def network_get_associated_fixed_ips(context, network_id, host=None):
models.Instance.hostname,
models.Instance.updated_at,
models.Instance.created_at).\
filter(models.FixedIp.deleted == False).\
filter(models.FixedIp.deleted == 0).\
filter(models.FixedIp.network_id == network_id).\
filter(models.FixedIp.allocated == True).\
join((models.VirtualInterface, vif_and)).\
@ -2326,6 +2347,7 @@ def network_get_all_by_host(context, host):
fixed_host_filter = or_(models.FixedIp.host == host,
models.Instance.host == host)
fixed_ip_query = model_query(context, models.FixedIp.network_id,
base_model=models.FixedIp,
session=session).\
outerjoin((models.VirtualInterface,
models.VirtualInterface.id ==
@ -3138,13 +3160,14 @@ def security_group_in_use(context, group_id):
with session.begin():
# Are there any instances that haven't been deleted
# that include this group?
inst_assoc = session.query(models.SecurityGroupInstanceAssociation).\
filter_by(security_group_id=group_id).\
filter_by(deleted=False).\
all()
inst_assoc = model_query(context,
models.SecurityGroupInstanceAssociation,
read_deleted="no", session=session).\
filter_by(security_group_id=group_id).\
all()
for ia in inst_assoc:
num_instances = session.query(models.Instance).\
filter_by(deleted=False).\
num_instances = model_query(context, models.Instance,
session=session, read_deleted="no").\
filter_by(uuid=ia.instance_uuid).\
count()
if num_instances:
@ -3595,7 +3618,7 @@ def instance_type_get_all(context, inactive=False, filters=None):
if filters['is_public'] and context.project_id is not None:
the_filter.extend([
models.InstanceTypes.projects.any(
project_id=context.project_id, deleted=False)
project_id=context.project_id, deleted=0)
])
if len(the_filter) > 1:
query = query.filter(or_(*the_filter))
@ -4037,7 +4060,8 @@ def _instance_type_extra_specs_get_query(context, flavor_id,
session=None):
# Two queries necessary because join with update doesn't work.
t = model_query(context, models.InstanceTypes.id,
session=session, read_deleted="no").\
base_model=models.InstanceTypes, session=session,
read_deleted="no").\
filter(models.InstanceTypes.flavorid == flavor_id).\
subquery()
return model_query(context, models.InstanceTypeExtraSpecs,
@ -4091,6 +4115,7 @@ def instance_type_extra_specs_update_or_create(context, flavor_id, specs):
session = get_session()
with session.begin():
instance_type_id = model_query(context, models.InstanceTypes.id,
base_model=models.InstanceTypes,
session=session, read_deleted="no").\
filter(models.InstanceTypes.flavorid == flavor_id).\
first()

View File

@ -0,0 +1,226 @@
from sqlalchemy import CheckConstraint
from sqlalchemy.engine import reflection
from sqlalchemy.ext.compiler import compiles
from sqlalchemy import MetaData, Table, Column, Index
from sqlalchemy import select
from sqlalchemy.sql.expression import UpdateBase
from sqlalchemy.sql import literal_column
from sqlalchemy import String, Integer, Boolean
from sqlalchemy.types import NullType, BigInteger
all_tables = ['services', 'compute_nodes', 'compute_node_stats',
'certificates', 'instances', 'instance_info_caches',
'instance_types', 'volumes', 'quotas', 'quota_classes',
'quota_usages', 'reservations', 'snapshots',
'block_device_mapping', 'iscsi_targets',
'security_group_instance_association', 'security_groups',
'security_group_rules', 'provider_fw_rules', 'key_pairs',
'migrations', 'networks', 'virtual_interfaces', 'fixed_ips',
'floating_ips', 'console_pools', 'consoles',
'instance_metadata', 'instance_system_metadata',
'instance_type_projects', 'instance_type_extra_specs',
'aggregate_hosts', 'aggregate_metadata', 'aggregates',
'agent_builds', 's3_images',
'instance_faults',
'bw_usage_cache', 'volume_id_mappings', 'snapshot_id_mappings',
'instance_id_mappings', 'volume_usage_cache', 'task_log',
'instance_actions', 'instance_actions_events']
# note(boris-42): We can't do migration for the dns_domains table because it
# doesn't have `id` column.
class InsertFromSelect(UpdateBase):
def __init__(self, table, select):
self.table = table
self.select = select
@compiles(InsertFromSelect)
def visit_insert_from_select(element, compiler, **kw):
return "INSERT INTO %s %s" % (
compiler.process(element.table, asfrom=True),
compiler.process(element.select))
def get_default_deleted_value(table):
if isinstance(table.c.id.type, Integer):
return 0
# NOTE(boris-42): There is only one other type that is used as id (String)
return ""
def upgrade_enterprise_dbs(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
for table_name in all_tables:
table = Table(table_name, meta, autoload=True)
new_deleted = Column('new_deleted', table.c.id.type,
default=get_default_deleted_value(table))
new_deleted.create(table, populate_default=True)
table.update().\
where(table.c.deleted == True).\
values(new_deleted=table.c.id).\
execute()
table.c.deleted.drop()
table.c.new_deleted.alter(name="deleted")
def upgrade(migrate_engine):
if migrate_engine.name != "sqlite":
return upgrade_enterprise_dbs(migrate_engine)
# NOTE(boris-42): sqlaclhemy-migrate can't drop column with check
# constraints in sqlite DB and our `deleted` column has
# 2 check constraints. So there is only one way to remove
# these constraints:
# 1) Create new table with the same columns, constraints
# and indexes. (except deleted column).
# 2) Copy all data from old to new table.
# 3) Drop old table.
# 4) Rename new table to old table name.
insp = reflection.Inspector.from_engine(migrate_engine)
meta = MetaData()
meta.bind = migrate_engine
for table_name in all_tables:
table = Table(table_name, meta, autoload=True)
default_deleted_value = get_default_deleted_value(table)
columns = []
for column in table.columns:
column_copy = None
if column.name != "deleted":
# NOTE(boris-42): BigInteger is not supported by sqlite, so
# after copy it will have NullType, other
# types that are used in Nova are supported by
# sqlite.
if isinstance(column.type, NullType):
column_copy = Column(column.name, BigInteger(), default=0)
else:
column_copy = column.copy()
else:
column_copy = Column('deleted', table.c.id.type,
default=default_deleted_value)
columns.append(column_copy)
def is_deleted_column_constraint(constraint):
# NOTE(boris-42): There is no other way to check is CheckConstraint
# associated with deleted column.
if not isinstance(constraint, CheckConstraint):
return False
sqltext = str(constraint.sqltext)
return (sqltext.endswith("deleted in (0, 1)") or
sqltext.endswith("deleted IN (:deleted_1, :deleted_2)"))
constraints = []
for constraint in table.constraints:
if not is_deleted_column_constraint(constraint):
constraints.append(constraint.copy())
new_table = Table(table_name + "__tmp__", meta,
*(columns + constraints))
new_table.create()
indexes = []
for index in insp.get_indexes(table_name):
column_names = [new_table.c[c] for c in index['column_names']]
indexes.append(Index(index["name"],
*column_names,
unique=index["unique"]))
ins = InsertFromSelect(new_table, table.select())
migrate_engine.execute(ins)
table.drop()
[index.create(migrate_engine) for index in indexes]
new_table.rename(table_name)
new_table.update().\
where(new_table.c.deleted == True).\
values(deleted=new_table.c.id).\
execute()
# NOTE(boris-42): Fix value of deleted column: False -> "" or 0.
new_table.update().\
where(new_table.c.deleted == False).\
values(deleted=default_deleted_value).\
execute()
def downgrade_enterprise_dbs(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
for table_name in all_tables:
table = Table(table_name, meta, autoload=True)
old_deleted = Column('old_deleted', Boolean, default=False)
old_deleted.create(table, populate_default=False)
table.update().\
where(table.c.deleted == table.c.id).\
values(old_deleted=True).\
execute()
table.c.deleted.drop()
table.c.old_deleted.alter(name="deleted")
def downgrade(migrate_engine):
if migrate_engine.name != "sqlite":
return downgrade_enterprise_dbs(migrate_engine)
insp = reflection.Inspector.from_engine(migrate_engine)
meta = MetaData()
meta.bind = migrate_engine
for table_name in all_tables:
table = Table(table_name, meta, autoload=True)
columns = []
for column in table.columns:
column_copy = None
if column.name != "deleted":
if isinstance(column.type, NullType):
column_copy = Column(column.name, BigInteger(), default=0)
else:
column_copy = column.copy()
else:
column_copy = Column('deleted', Boolean, default=0)
columns.append(column_copy)
constraints = [constraint.copy() for constraint in table.constraints]
new_table = Table(table_name + "__tmp__", meta,
*(columns + constraints))
new_table.create()
indexes = []
for index in insp.get_indexes(table_name):
column_names = [new_table.c[c] for c in index['column_names']]
indexes.append(Index(index["name"],
*column_names,
unique=index["unique"]))
c_select = []
for c in table.c:
if c.name != "deleted":
c_select.append(c)
else:
c_select.append(table.c.deleted == table.c.id)
ins = InsertFromSelect(new_table, select(c_select))
migrate_engine.execute(ins)
table.drop()
[index.create(migrate_engine) for index in indexes]
new_table.rename(table_name)
new_table.update().\
where(new_table.c.deleted == new_table.c.id).\
values(deleted=True).\
execute()

View File

@ -42,7 +42,7 @@ class NovaBase(object):
created_at = Column(DateTime, default=timeutils.utcnow)
updated_at = Column(DateTime, onupdate=timeutils.utcnow)
deleted_at = Column(DateTime)
deleted = Column(Boolean, default=False)
deleted = Column(Integer, default=0)
metadata = None
def save(self, session=None):
@ -63,7 +63,7 @@ class NovaBase(object):
def soft_delete(self, session=None):
"""Mark this object as deleted."""
self.deleted = True
self.deleted = self.id
self.deleted_at = timeutils.utcnow()
self.save(session=session)
@ -129,7 +129,7 @@ class ComputeNode(BASE, NovaBase):
foreign_keys=service_id,
primaryjoin='and_('
'ComputeNode.service_id == Service.id,'
'ComputeNode.deleted == False)')
'ComputeNode.deleted == 0)')
vcpus = Column(Integer)
memory_mb = Column(Integer)
@ -173,7 +173,7 @@ class ComputeNodeStat(BASE, NovaBase):
compute_node_id = Column(Integer, ForeignKey('compute_nodes.id'))
primary_join = ('and_(ComputeNodeStat.compute_node_id == '
'ComputeNode.id, ComputeNodeStat.deleted == False)')
'ComputeNode.id, ComputeNodeStat.deleted == 0)')
stats = relationship("ComputeNode", backref="stats",
primaryjoin=primary_join)
@ -358,6 +358,7 @@ class Volume(BASE, NovaBase):
"""Represents a block storage device that can be attached to a VM."""
__tablename__ = 'volumes'
id = Column(String(36), primary_key=True)
deleted = Column(String(36), default="")
@property
def name(self):
@ -465,13 +466,14 @@ class Reservation(BASE, NovaBase):
"QuotaUsage",
foreign_keys=usage_id,
primaryjoin='and_(Reservation.usage_id == QuotaUsage.id,'
'QuotaUsage.deleted == False)')
'QuotaUsage.deleted == 0)')
class Snapshot(BASE, NovaBase):
"""Represents a block storage device that can be attached to a VM."""
__tablename__ = 'snapshots'
id = Column(String(36), primary_key=True)
deleted = Column(String(36), default="")
@property
def name(self):
@ -507,7 +509,7 @@ class BlockDeviceMapping(BASE, NovaBase):
'instance_uuid=='
'Instance.uuid,'
'BlockDeviceMapping.deleted=='
'False)')
'0)')
device_name = Column(String(255), nullable=False)
# default=False for compatibility of the existing code.
@ -542,7 +544,7 @@ class IscsiTarget(BASE, NovaBase):
backref=backref('iscsi_target', uselist=False),
foreign_keys=volume_id,
primaryjoin='and_(IscsiTarget.volume_id==Volume.id,'
'IscsiTarget.deleted==False)')
'IscsiTarget.deleted==0)')
class SecurityGroupInstanceAssociation(BASE, NovaBase):
@ -567,14 +569,14 @@ class SecurityGroup(BASE, NovaBase):
primaryjoin='and_('
'SecurityGroup.id == '
'SecurityGroupInstanceAssociation.security_group_id,'
'SecurityGroupInstanceAssociation.deleted == False,'
'SecurityGroup.deleted == False)',
'SecurityGroupInstanceAssociation.deleted == 0,'
'SecurityGroup.deleted == 0)',
secondaryjoin='and_('
'SecurityGroupInstanceAssociation.instance_uuid == Instance.uuid,'
# (anthony) the condition below shouldn't be necessary now that the
# association is being marked as deleted. However, removing this
# may cause existing deployments to choke, so I'm leaving it
'Instance.deleted == False)',
'Instance.deleted == 0)',
backref='security_groups')
@ -588,7 +590,7 @@ class SecurityGroupIngressRule(BASE, NovaBase):
foreign_keys=parent_group_id,
primaryjoin='and_('
'SecurityGroupIngressRule.parent_group_id == SecurityGroup.id,'
'SecurityGroupIngressRule.deleted == False)')
'SecurityGroupIngressRule.deleted == 0)')
protocol = Column(String(5)) # "tcp", "udp", or "icmp"
from_port = Column(Integer)
@ -602,7 +604,7 @@ class SecurityGroupIngressRule(BASE, NovaBase):
foreign_keys=group_id,
primaryjoin='and_('
'SecurityGroupIngressRule.group_id == SecurityGroup.id,'
'SecurityGroupIngressRule.deleted == False)')
'SecurityGroupIngressRule.deleted == 0)')
class ProviderFirewallRule(BASE, NovaBase):
@ -651,7 +653,7 @@ class Migration(BASE, NovaBase):
instance = relationship("Instance", foreign_keys=instance_uuid,
primaryjoin='and_(Migration.instance_uuid == '
'Instance.uuid, Instance.deleted == '
'False)')
'0)')
class Network(BASE, NovaBase):
@ -735,6 +737,7 @@ class FloatingIp(BASE, NovaBase):
class DNSDomain(BASE, NovaBase):
"""Represents a DNS domain with availability zone or project info."""
__tablename__ = 'dns_domains'
deleted = Column(Boolean, default=False)
domain = Column(String(512), primary_key=True)
scope = Column(String(255))
availability_zone = Column(String(255))
@ -779,7 +782,7 @@ class InstanceMetadata(BASE, NovaBase):
primaryjoin='and_('
'InstanceMetadata.instance_uuid == '
'Instance.uuid,'
'InstanceMetadata.deleted == False)')
'InstanceMetadata.deleted == 0)')
class InstanceSystemMetadata(BASE, NovaBase):
@ -793,7 +796,7 @@ class InstanceSystemMetadata(BASE, NovaBase):
nullable=False)
primary_join = ('and_(InstanceSystemMetadata.instance_uuid == '
'Instance.uuid, InstanceSystemMetadata.deleted == False)')
'Instance.uuid, InstanceSystemMetadata.deleted == 0)')
instance = relationship(Instance, backref="system_metadata",
foreign_keys=instance_uuid,
primaryjoin=primary_join)
@ -811,7 +814,7 @@ class InstanceTypeProjects(BASE, NovaBase):
foreign_keys=instance_type_id,
primaryjoin='and_('
'InstanceTypeProjects.instance_type_id == InstanceTypes.id,'
'InstanceTypeProjects.deleted == False)')
'InstanceTypeProjects.deleted == 0)')
class InstanceTypeExtraSpecs(BASE, NovaBase):
@ -826,7 +829,7 @@ class InstanceTypeExtraSpecs(BASE, NovaBase):
foreign_keys=instance_type_id,
primaryjoin='and_('
'InstanceTypeExtraSpecs.instance_type_id == InstanceTypes.id,'
'InstanceTypeExtraSpecs.deleted == False)')
'InstanceTypeExtraSpecs.deleted == 0)')
class Cell(BASE, NovaBase):
@ -880,24 +883,24 @@ class Aggregate(BASE, NovaBase):
secondary="aggregate_hosts",
primaryjoin='and_('
'Aggregate.id == AggregateHost.aggregate_id,'
'AggregateHost.deleted == False,'
'Aggregate.deleted == False)',
'AggregateHost.deleted == 0,'
'Aggregate.deleted == 0)',
secondaryjoin='and_('
'AggregateHost.aggregate_id == Aggregate.id, '
'AggregateHost.deleted == False,'
'Aggregate.deleted == False)',
'AggregateHost.deleted == 0,'
'Aggregate.deleted == 0)',
backref='aggregates')
_metadata = relationship(AggregateMetadata,
secondary="aggregate_metadata",
primaryjoin='and_('
'Aggregate.id == AggregateMetadata.aggregate_id,'
'AggregateMetadata.deleted == False,'
'Aggregate.deleted == False)',
'AggregateMetadata.deleted == 0,'
'Aggregate.deleted == 0)',
secondaryjoin='and_('
'AggregateMetadata.aggregate_id == Aggregate.id, '
'AggregateMetadata.deleted == False,'
'Aggregate.deleted == False)',
'AggregateMetadata.deleted == 0,'
'Aggregate.deleted == 0)',
backref='aggregates')
def _extra_keys(self):

View File

@ -536,7 +536,7 @@ def create_engine(sql_connection):
class Query(sqlalchemy.orm.query.Query):
"""Subclass of sqlalchemy.query with soft_delete() method."""
def soft_delete(self, synchronize_session='evaluate'):
return self.update({'deleted': True,
return self.update({'deleted': literal_column('id'),
'updated_at': literal_column('updated_at'),
'deleted_at': timeutils.utcnow()},
synchronize_session=synchronize_session)

View File

@ -484,3 +484,77 @@ class TestMigrations(test.TestCase):
migration_api.downgrade(engine, TestMigrations.REPOSITORY, 146)
_146_check()
def test_migration_152(self):
host1 = 'compute-host1'
host2 = 'compute-host2'
def _151_check(services, volumes):
service = services.select(services.c.id == 1).execute().first()
self.assertEqual(False, service.deleted)
service = services.select(services.c.id == 2).execute().first()
self.assertEqual(True, service.deleted)
volume = volumes.select(volumes.c.id == "first").execute().first()
self.assertEqual(False, volume.deleted)
volume = volumes.select(volumes.c.id == "second").execute().first()
self.assertEqual(True, volume.deleted)
for key, engine in self.engines.items():
migration_api.version_control(engine, TestMigrations.REPOSITORY,
migration.INIT_VERSION)
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 151)
metadata = sqlalchemy.schema.MetaData()
metadata.bind = engine
# NOTE(boris-42): It is enough to test one table with type of `id`
# column Integer and one with type String.
services = sqlalchemy.Table('services', metadata, autoload=True)
volumes = sqlalchemy.Table('volumes', metadata, autoload=True)
engine.execute(
services.insert(),
[
{'id': 1, 'host': host1, 'binary': 'nova-compute',
'report_count': 0, 'topic': 'compute', 'deleted': False},
{'id': 2, 'host': host1, 'binary': 'nova-compute',
'report_count': 0, 'topic': 'compute', 'deleted': True}
]
)
engine.execute(
volumes.insert(),
[
{'id': 'first', 'host': host1, 'deleted': False},
{'id': 'second', 'host': host2, 'deleted': True}
]
)
_151_check(services, volumes)
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 152)
# NOTE(boris-42): One more time get from DB info about tables.
metadata2 = sqlalchemy.schema.MetaData()
metadata2.bind = engine
services = sqlalchemy.Table('services', metadata2, autoload=True)
service = services.select(services.c.id == 1).execute().first()
self.assertEqual(0, service.deleted)
service = services.select(services.c.id == 2).execute().first()
self.assertEqual(service.id, service.deleted)
volumes = sqlalchemy.Table('volumes', metadata2, autoload=True)
volume = volumes.select(volumes.c.id == "first").execute().first()
self.assertEqual("", volume.deleted)
volume = volumes.select(volumes.c.id == "second").execute().first()
self.assertEqual(volume.id, volume.deleted)
migration_api.downgrade(engine, TestMigrations.REPOSITORY, 151)
# NOTE(boris-42): One more time get from DB info about tables.
metadata = sqlalchemy.schema.MetaData()
metadata.bind = engine
services = sqlalchemy.Table('services', metadata, autoload=True)
volumes = sqlalchemy.Table('volumes', metadata, autoload=True)
_151_check(services, volumes)