remove downgrade support from our database migrations

The TC approved a cross project spec to stop putting db downgrade into
project migration repos:
https://github.com/openstack/openstack-specs/blob/master/specs/no-downward-sql-migration.rst
This was done after quite a bit of feedback from the operator
community, and everyone was good with it.

This implements that spec for Nova, adjust the tests to pass
accordingly, and changes the test which requires downgrade functions
to one that prohibits them.

Implements bp nova-no-downward-sql-migration

Change-Id: I9b164b748998530d62bbc6baf356a9c3f61584cc
This commit is contained in:
Sean Dague 2015-04-17 15:07:13 -04:00
parent 1248353ced
commit e2e846f74b
76 changed files with 10 additions and 897 deletions

View File

@ -1551,7 +1551,3 @@ def upgrade(migrate_engine):
_populate_instance_types(instance_types)
_create_dump_tables(migrate_engine)
def downgrade(migrate_engine):
raise NotImplementedError('Downgrade from Havana is unsupported.')

View File

@ -20,7 +20,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -20,7 +20,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -20,7 +20,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -20,7 +20,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -20,7 +20,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -20,7 +20,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -20,7 +20,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -20,7 +20,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -20,7 +20,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -20,7 +20,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -28,9 +28,3 @@ def upgrade(migrate_engine):
col_resource.alter(type=String(255))
table.update().where(table.c.resource == 'injected_file_content_byt')\
.values(resource='injected_file_content_bytes').execute()
def downgrade(migrate_engine):
# This migration fixes the resource of project_user_quotas table.
# No need to go back and reverse this change.
pass

View File

@ -30,15 +30,3 @@ def upgrade(migrate_engine):
shadow_metrics = Column('metrics', Text, nullable=True)
compute_nodes.create_column(metrics)
shadow_compute_nodes.create_column(shadow_metrics)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# Remove the new column
compute_nodes = Table('compute_nodes', meta, autoload=True)
shadow_compute_nodes = Table('shadow_compute_nodes', meta, autoload=True)
compute_nodes.drop_column('metrics')
shadow_compute_nodes.drop_column('metrics')

View File

@ -32,15 +32,3 @@ def upgrade(migrate_engine):
shadow_extra_resources = Column('extra_resources', Text, nullable=True)
compute_nodes.create_column(extra_resources)
shadow_compute_nodes.create_column(shadow_extra_resources)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# Remove the new column
compute_nodes = Table('compute_nodes', meta, autoload=True)
shadow_compute_nodes = Table('shadow_compute_nodes', meta, autoload=True)
compute_nodes.drop_column('extra_resources')
shadow_compute_nodes.drop_column('extra_resources')

View File

@ -28,13 +28,3 @@ def upgrade(migrate_engine):
api._SHADOW_TABLE_PREFIX + 'instance_actions_events')
shadow_actions_events.create_column(host.copy())
shadow_actions_events.create_column(details.copy())
def downgrade(migrate_engine):
actions_events = utils.get_table(migrate_engine, 'instance_actions_events')
actions_events.drop_column('host')
actions_events.drop_column('details')
shadow_actions_events = utils.get_table(migrate_engine,
api._SHADOW_TABLE_PREFIX + 'instance_actions_events')
shadow_actions_events.drop_column('host')
shadow_actions_events.drop_column('details')

View File

@ -32,13 +32,3 @@ def upgrade(migrate_engine):
values(ephemeral_key_uuid=None))
migrate_engine.execute(shadow_instances.update().
values(ephemeral_key_uuid=None))
def downgrade(migrate_engine):
"""Function removes ephemeral storage encryption key uuid field."""
meta = MetaData(bind=migrate_engine)
instances = Table('instances', meta, autoload=True)
shadow_instances = Table('shadow_instances', meta, autoload=True)
instances.c.ephemeral_key_uuid.drop()
shadow_instances.c.ephemeral_key_uuid.drop()

View File

@ -26,7 +26,3 @@ def upgrade(migrate_engine):
for table_name in table_names:
table = Table('dump_' + table_name, meta)
table.drop(checkfirst=True)
def downgrade(migrate_engine):
pass

View File

@ -13,15 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import timeutils
from sqlalchemy import Column
from sqlalchemy import DateTime
from sqlalchemy import ForeignKey
from sqlalchemy import Index
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import Text
@ -44,51 +37,3 @@ def upgrade(engine):
table = Table(table_name, meta, autoload=True)
stats = Column('stats', Text, default='{}')
table.create_column(stats)
def downgrade(engine):
meta = MetaData()
meta.bind = engine
table_names = ('compute_nodes', 'shadow_compute_nodes')
for table_name in table_names:
table = Table(table_name, meta, autoload=True)
table.drop_column('stats')
if engine.name == 'mysql':
fk_name = 'fk_compute_node_stats_compute_node_id'
else:
fk_name = 'compute_node_stats_compute_node_id_fkey'
table = Table('compute_node_stats', meta,
Column('created_at', DateTime, default=timeutils.utcnow),
Column('updated_at', DateTime, onupdate=timeutils.utcnow),
Column('deleted_at', DateTime),
Column('deleted', Integer, default=0),
Column('id', Integer, nullable=False),
Column('key', String(255), nullable=False),
Column('value', String(255), nullable=True),
Column('compute_node_id', Integer,
ForeignKey('compute_nodes.id', name=fk_name),
index=True),
Index('compute_node_stats_node_id_and_deleted_idx',
'compute_node_id', 'deleted'),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
table.create()
# shadow version has no fkey or index
table = Table('shadow_compute_node_stats', meta,
Column('created_at', DateTime, default=timeutils.utcnow),
Column('updated_at', DateTime, onupdate=timeutils.utcnow),
Column('deleted_at', DateTime),
Column('deleted', Integer, default=0),
Column('id', Integer, primary_key=True, nullable=False),
Column('key', String(255), nullable=False),
Column('value', String(255), nullable=True),
Column('compute_node_id', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
table.create()

View File

@ -43,17 +43,3 @@ def upgrade(migrate_engine):
reservations.c.deleted, reservations.c.expire)
index.create(migrate_engine)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
reservations = Table('reservations', meta, autoload=True)
index = _get_deleted_expire_index(reservations)
if index:
index.drop(migrate_engine)
else:
LOG.info(_LI('Skipped removing reservations_deleted_expire_idx '
'because index does not exist.'))

View File

@ -20,7 +20,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -20,7 +20,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -20,7 +20,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -20,7 +20,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -20,7 +20,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -20,7 +20,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -20,7 +20,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -20,7 +20,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -20,7 +20,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -22,10 +22,3 @@ def upgrade(migrate_engine):
# Match the maximum length of user_id in Keystone here instead of
# assuming explicitly a single UUID length.
col_resource.alter(type=String(64))
def downgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
table = Table('volume_usage_cache', meta, autoload=True)
col_resource = getattr(table.c, 'user_id')
col_resource.alter(type=String(36))

View File

@ -51,40 +51,3 @@ def upgrade(migrate_engine):
shadow_networks.create_column(enable_dhcp.copy())
if not hasattr(shadow_networks.c, 'share_address'):
shadow_networks.create_column(share_address.copy())
# NOTE(vish): sqlite won't drop bool columns because it leaves a
# constraint behind so work around it.
def drop_boolean(column):
for constraint in column.table.constraints:
if column.name in unicode(getattr(constraint, 'sqltext', '')):
column.table.constraints.remove(constraint)
break
column.drop()
def downgrade(migrate_engine):
"""Function removes network mtu, dhcp_server, and share_dhcp fields."""
meta = MetaData(bind=migrate_engine)
networks = Table('networks', meta, autoload=True)
shadow_networks = Table('shadow_networks', meta, autoload=True)
# NOTE(vish): ignore duplicate runs of upgrade so this can
# be backported
if hasattr(networks.c, 'mtu'):
networks.c.mtu.drop()
if hasattr(networks.c, 'dhcp_server'):
networks.c.dhcp_server.drop()
if hasattr(networks.c, 'enable_dhcp'):
drop_boolean(networks.c.enable_dhcp)
if hasattr(networks.c, 'share_address'):
drop_boolean(networks.c.share_address)
if hasattr(shadow_networks.c, 'mtu'):
shadow_networks.c.mtu.drop()
if hasattr(shadow_networks.c, 'dhcp_server'):
shadow_networks.c.dhcp_server.drop()
if hasattr(shadow_networks.c, 'enable_dhcp'):
drop_boolean(shadow_networks.c.enable_dhcp)
if hasattr(shadow_networks.c, 'share_address'):
drop_boolean(shadow_networks.c.share_address)

View File

@ -28,14 +28,3 @@ def upgrade(migrate_engine):
fkey = ForeignKeyConstraint(columns=[pci_devices.c.compute_node_id],
refcolumns=[compute_nodes.c.id])
fkey.create()
def downgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
pci_devices = Table('pci_devices', meta, autoload=True)
compute_nodes = Table('compute_nodes', meta, autoload=True)
fkey = ForeignKeyConstraint(columns=[pci_devices.c.compute_node_id],
refcolumns=[compute_nodes.c.id])
fkey.drop()

View File

@ -28,16 +28,3 @@ def upgrade(migrate_engine):
pci_devices.c.product_id.alter(nullable=False)
pci_devices.c.vendor_id.alter(nullable=False)
pci_devices.c.dev_type.alter(nullable=False)
def downgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
quota_usages = Table('quota_usages', meta, autoload=True)
quota_usages.c.resource.alter(nullable=True)
pci_devices = Table('pci_devices', meta, autoload=True)
pci_devices.c.deleted.alter(nullable=False)
pci_devices.c.product_id.alter(nullable=True)
pci_devices.c.vendor_id.alter(nullable=True)
pci_devices.c.dev_type.alter(nullable=True)

View File

@ -43,17 +43,3 @@ def upgrade(migrate_engine):
reservations.c.deleted, reservations.c.expire)
index.create(migrate_engine)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
reservations = Table('reservations', meta, autoload=True)
index = _get_deleted_expire_index(reservations)
if index:
index.drop(migrate_engine)
else:
LOG.info(_LI('Skipped removing reservations_deleted_expire_idx '
'because index does not exist.'))

View File

@ -29,8 +29,3 @@ def upgrade(migrate_engine):
for index in bdm.indexes:
if index.name == INDEX_NAME:
index.drop()
def downgrade(migrate_engine):
# Unnecessary to re-add duplicate index when downgrading
pass

View File

@ -14,8 +14,7 @@
# under the License.
from sqlalchemy import MetaData, Table, Column, DateTime, Integer, String, \
ForeignKey
from sqlalchemy import MetaData, Table
def upgrade(migrate_engine):
@ -30,42 +29,3 @@ def upgrade(migrate_engine):
shadow_group_metadata = Table('shadow_instance_group_metadata', meta,
autoload=True)
shadow_group_metadata.drop()
def downgrade(migrate_engine):
"""Revert removal of the instance_group_metadata table."""
meta = MetaData(bind=migrate_engine)
Table('instance_groups', meta, autoload=True)
Table('shadow_instance_groups', meta, autoload=True)
if not migrate_engine.has_table('instance_group_metadata'):
group_metadata = Table('instance_group_metadata', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Integer),
Column('id', Integer, primary_key=True, nullable=False),
Column('key', String(length=255)),
Column('value', String(length=255)),
Column('group_id', Integer, ForeignKey('instance_groups.id'),
nullable=False),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
group_metadata.create()
if not migrate_engine.has_table('shadow_instance_group_metadata'):
shadow_group_metadata = Table('shadow_instance_group_metadata', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Integer),
Column('id', Integer, primary_key=True, nullable=False),
Column('key', String(length=255)),
Column('value', String(length=255)),
Column('group_id', Integer,
ForeignKey('shadow_instance_groups.id'),
nullable=False),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
shadow_group_metadata.create()

View File

@ -28,14 +28,3 @@ def upgrade(migrate_engine):
shadow_numa_topology = Column('numa_topology', Text, nullable=True)
compute_nodes.create_column(numa_topology)
shadow_compute_nodes.create_column(shadow_numa_topology)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
compute_nodes = Table('compute_nodes', meta, autoload=True)
shadow_compute_nodes = Table('shadow_compute_nodes', meta, autoload=True)
compute_nodes.drop_column('numa_topology')
shadow_compute_nodes.drop_column('numa_topology')

View File

@ -58,14 +58,3 @@ def upgrade(migrate_engine):
instance_fkey = ForeignKeyConstraint(
columns=fkey_columns, refcolumns=fkey_refcolumns)
instance_fkey.create()
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
for prefix in ('', 'shadow_'):
table_name = prefix + 'instance_extra'
if migrate_engine.has_table(table_name):
instance_extra = Table(table_name, meta, autoload=True)
instance_extra.drop()

View File

@ -30,13 +30,3 @@ def upgrade(migrate_engine):
new_column = Column(NEW_COLUMN_NAME, Text, nullable=True)
if not hasattr(table.c, NEW_COLUMN_NAME):
table.create_column(new_column)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
for prefix in ('', 'shadow_'):
table = Table(prefix + BASE_TABLE_NAME, meta, autoload=True)
if hasattr(table.c, NEW_COLUMN_NAME):
getattr(table.c, NEW_COLUMN_NAME).drop()

View File

@ -33,17 +33,3 @@ def upgrade(engine):
if not hasattr(shadow_pci_devices.c, 'request_id'):
shadow_pci_devices.create_column(request_id.copy())
def downgrade(engine):
"""Function drops request_id field."""
meta = MetaData(bind=engine)
pci_devices = Table('pci_devices', meta, autoload=True)
shadow_pci_devices = Table('shadow_pci_devices', meta, autoload=True)
if hasattr(pci_devices.c, 'request_id'):
pci_devices.c.request_id.drop()
if hasattr(shadow_pci_devices.c, 'request_id'):
shadow_pci_devices.c.request_id.drop()

View File

@ -20,7 +20,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -20,7 +20,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -20,7 +20,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -20,7 +20,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -20,7 +20,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -20,7 +20,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -20,7 +20,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -20,7 +20,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -20,7 +20,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -20,7 +20,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -35,8 +35,3 @@ def upgrade(migrate_engine):
for index in table.indexes:
if index.name == index_name:
index.drop()
def downgrade(migrate_engine):
# Unnecessary to re-add duplicated index when downgrading
pass

View File

@ -25,10 +25,3 @@ def upgrade(migrate_engine):
mysql_engine='InnoDB',
mysql_charset='utf8')
tags.create()
def downgrade(migrate_engine):
meta = sa.MetaData()
meta.bind = migrate_engine
table = sa.Table('tags', meta, autoload=True)
table.drop()

View File

@ -13,7 +13,6 @@
# under the License.
from migrate import UniqueConstraint
from oslo_db.sqlalchemy import utils
from oslo_log import log as logging
from sqlalchemy import MetaData
from sqlalchemy.sql import null
@ -112,14 +111,3 @@ def upgrade(migrate_engine):
# any ForeignKeys on the instances.uuid column due to some index rename
# issues in older versions of MySQL. That is beyond the scope of this
# migration.
def downgrade(migrate_engine):
# drop the unique constraint on instances.uuid
UniqueConstraint('uuid',
table=utils.get_table(migrate_engine, 'instances'),
name=UC_NAME).drop()
# We can't bring the deleted records back but we can make uuid nullable.
for table_name in ('instances', 'shadow_instances'):
table = utils.get_table(migrate_engine, table_name)
table.columns.uuid.alter(nullable=True)

View File

@ -40,19 +40,3 @@ def upgrade(migrate_engine):
ukey = UniqueConstraint('host', 'hypervisor_hostname', table=compute_nodes,
name="uniq_compute_nodes0host0hypervisor_hostname")
ukey.create()
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# Remove the new column
compute_nodes = Table('compute_nodes', meta, autoload=True)
shadow_compute_nodes = Table('shadow_compute_nodes', meta, autoload=True)
ukey = UniqueConstraint('host', 'hypervisor_hostname', table=compute_nodes,
name="uniq_compute_nodes0host0hypervisor_hostname")
ukey.drop()
compute_nodes.drop_column('host')
shadow_compute_nodes.drop_column('host')

View File

@ -30,16 +30,3 @@ def upgrade(migrate_engine):
pci_devices.create_column(numa_node)
if not hasattr(shadow_pci_devices.c, 'numa_node'):
shadow_pci_devices.create_column(numa_node.copy())
def downgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
# Remove the numa_node column
pci_devices = Table('pci_devices', meta, autoload=True)
shadow_pci_devices = Table('shadow_pci_devices', meta, autoload=True)
if hasattr(pci_devices.c, 'numa_node'):
pci_devices.drop_column('numa_node')
if hasattr(shadow_pci_devices.c, 'numa_node'):
shadow_pci_devices.drop_column('numa_node')

View File

@ -30,13 +30,3 @@ def upgrade(migrate_engine):
new_column = Column(NEW_COLUMN_NAME, Text, nullable=True)
if not hasattr(table.c, NEW_COLUMN_NAME):
table.create_column(new_column)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
for prefix in ('', 'shadow_'):
table = Table(prefix + BASE_TABLE_NAME, meta, autoload=True)
if hasattr(table.c, NEW_COLUMN_NAME):
getattr(table.c, NEW_COLUMN_NAME).drop()

View File

@ -67,22 +67,3 @@ def upgrade(migrate_engine):
ensure_index_exists(migrate_engine, 'virtual_interfaces',
'virtual_interfaces_network_id_idx',
['network_id'])
def downgrade(migrate_engine):
"""Remove indexes previously missing on SQLite and PostgreSQL."""
if migrate_engine.name in ('sqlite', 'postgresql'):
for table_name, index_name, column_names in INDEXES:
ensure_index_removed(migrate_engine, table_name, index_name)
elif migrate_engine.name == 'mysql':
# Rename some indexes with conflicting names back
ensure_index_removed(migrate_engine, 'dns_domains',
'dns_domains_project_id_idx')
ensure_index_exists(migrate_engine, 'dns_domains', 'project_id',
['project_id'])
ensure_index_removed(migrate_engine, 'virtual_interfaces',
'virtual_interfaces_network_id_idx')
ensure_index_exists(migrate_engine, 'virtual_interfaces', 'network_id',
['network_id'])

View File

@ -26,7 +26,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migrate_engine):
pass

View File

@ -106,10 +106,3 @@ def upgrade(migrate_engine):
uc = UniqueConstraint(*column_names, table=table, name=name)
uc.create()
def downgrade(migrate_engine):
# sqlalchemy-migrate doesn't handle dropping foreign keys. Since this
# migration only applies to SQLite, which isn't a supported production
# deploy, the amount of work outweighs the need for a downgrade.
pass

View File

@ -44,24 +44,3 @@ def upgrade(migrate_engine):
for index in instances.indexes:
if [c.name for c in index.columns] == ['project_id']:
index.drop()
def downgrade(migrate_engine):
"""Change instances (project_id, deleted) index to cover (project_id)."""
meta = MetaData(bind=migrate_engine)
instances = Table('instances', meta, autoload=True)
for index in instances.indexes:
if [c.name for c in index.columns] == ['project_id']:
LOG.info(_LI('Skipped adding instances_project_id_idx '
'because an equivalent index already exists.'))
break
else:
index = Index('project_id', instances.c.project_id)
index.create()
for index in instances.indexes:
if [c.name for c in index.columns] == ['project_id', 'deleted']:
index.drop()

View File

@ -39,19 +39,3 @@ def upgrade(migrate_engine):
key_pairs.create_column(keypair_type)
shadow_key_pairs.create_column(keypair_type.copy())
def downgrade(migrate_engine):
"""Function removes key_pairs type field."""
meta = MetaData(bind=migrate_engine)
key_pairs = Table('key_pairs', meta, autoload=True)
shadow_key_pairs = Table('shadow_key_pairs', meta, autoload=True)
enum = Enum(metadata=meta, name='keypair_types')
if hasattr(key_pairs.c, 'type'):
key_pairs.c.type.drop()
if hasattr(shadow_key_pairs.c, 'type'):
shadow_key_pairs.c.type.drop()
enum.drop()

View File

@ -30,13 +30,3 @@ def upgrade(migrate_engine):
new_column = Column(NEW_COLUMN_NAME, Text, nullable=True)
if not hasattr(table.c, NEW_COLUMN_NAME):
table.create_column(new_column)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
for prefix in ('', 'shadow_'):
table = Table(prefix + BASE_TABLE_NAME, meta, autoload=True)
if hasattr(table.c, NEW_COLUMN_NAME):
getattr(table.c, NEW_COLUMN_NAME).drop()

View File

@ -43,12 +43,3 @@ def upgrade(migrate_engine):
columns = [getattr(table.c, col_name) for col_name in INDEX_COLUMNS]
index = Index(INDEX_NAME, *columns)
index.create(migrate_engine)
def downgrade(migrate_engine):
meta, table, index = _get_table_index(migrate_engine)
if not index:
LOG.info(_LI('Skipped removing %s because no such index exists'),
INDEX_NAME)
return
index.drop(migrate_engine)

View File

@ -66,40 +66,3 @@ def upgrade(migrate_engine):
# Delete the nested index which was created by the FK
index.drop()
break
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
compute_nodes = Table('compute_nodes', meta, autoload=True)
shadow_compute_nodes = Table('shadow_compute_nodes', meta, autoload=True)
services = Table('services', meta, autoload=True)
_correct_sqlite_unique_constraints(migrate_engine, compute_nodes)
# Make the service_id field not nullable
# NOTE(sbauza): Beyond the point of this commit, service_id will not be
# updated, but previous commits still do. We can tho safely go back to
# a state where all the compute nodes are providing this field.
compute_nodes.c.service_id.alter(nullable=False)
shadow_compute_nodes.c.service_id.alter(nullable=False)
# Adding only FK if not existing yet
fkeys = {fk.parent.name: fk.column
for fk in compute_nodes.foreign_keys}
if 'service_id' in fkeys and fkeys['service_id'] == services.c.id:
return
# NOTE(sbauza): See 216_havana.py for the whole logic
if migrate_engine.name == 'postgresql':
# PostgreSQL names things like it wants (correct and compatible!)
fkey = ForeignKeyConstraint(columns=[compute_nodes.c.service_id],
refcolumns=[services.c.id])
fkey.create()
else:
# For MySQL we name our fkeys explicitly so they match Havana
fkey = ForeignKeyConstraint(columns=[compute_nodes.c.service_id],
refcolumns=[services.c.id],
name='fk_compute_nodes_service_id')
fkey.create()

View File

@ -33,19 +33,3 @@ def upgrade(migrate_engine):
table=compute_nodes,
name="uniq_compute_nodes0host0hypervisor_hostname0deleted")
ukey.create()
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
compute_nodes = Table('compute_nodes', meta, autoload=True)
ukey = UniqueConstraint(
'host', 'hypervisor_hostname', 'deleted',
table=compute_nodes,
name="uniq_compute_nodes0host0hypervisor_hostname0deleted")
ukey.drop()
ukey = UniqueConstraint('host', 'hypervisor_hostname', table=compute_nodes,
name="uniq_compute_nodes0host0hypervisor_hostname")
ukey.create()

View File

@ -33,21 +33,3 @@ def upgrade(migrate_engine):
UniqueConstraint('user_id', 'name', 'deleted', table=key_pairs,
name='uniq_key_pairs0user_id0name0deleted').create()
def downgrade(migrate_engine):
"""Function allows null value for keypairs name field."""
meta = MetaData(bind=migrate_engine)
key_pairs = Table('key_pairs', meta, autoload=True)
# Note: Since we are altering name field, this constraint on name needs to
# first be dropped before we can alter name. We then re-create the same
# constraint. It was first added in 216_havana.py so no need to remove
# constraint on downgrade.
UniqueConstraint('user_id', 'name', 'deleted', table=key_pairs,
name='uniq_key_pairs0user_id0name0deleted').drop()
key_pairs.c.name.alter(nullable=True)
UniqueConstraint('user_id', 'name', 'deleted', table=key_pairs,
name='uniq_key_pairs0user_id0name0deleted').create()

View File

@ -20,7 +20,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -20,7 +20,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -20,7 +20,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -20,7 +20,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -20,7 +20,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -20,7 +20,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -20,7 +20,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -20,7 +20,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -20,7 +20,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -20,7 +20,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -69,9 +69,6 @@ class NovaMigrationsCheckers(test_migrations.ModelsMigrationsSync,
TIMEOUT_SCALING_FACTOR = 2
snake_walk = True
downgrade = True
@property
def INIT_VERSION(self):
return migration.db_initial_version()
@ -194,7 +191,7 @@ class NovaMigrationsCheckers(test_migrations.ModelsMigrationsSync,
super(NovaMigrationsCheckers, self).migrate_up(version, with_data)
def test_walk_versions(self):
self.walk_versions(self.snake_walk, self.downgrade)
self.walk_versions(snake_walk=False, downgrade=False)
def _check_227(self, engine, data):
table = oslodbutils.get_table(engine, 'project_user_quotas')
@ -218,9 +215,6 @@ class NovaMigrationsCheckers(test_migrations.ModelsMigrationsSync,
self.assertIsInstance(compute_nodes.c.metrics.type,
sqlalchemy.types.Text)
def _post_downgrade_228(self, engine):
self.assertColumnNotExists(engine, 'compute_nodes', 'metrics')
def _check_229(self, engine, data):
self.assertColumnExists(engine, 'compute_nodes', 'extra_resources')
@ -228,9 +222,6 @@ class NovaMigrationsCheckers(test_migrations.ModelsMigrationsSync,
self.assertIsInstance(compute_nodes.c.extra_resources.type,
sqlalchemy.types.Text)
def _post_downgrade_229(self, engine):
self.assertColumnNotExists(engine, 'compute_nodes', 'extra_resources')
def _check_230(self, engine, data):
for table_name in ['instance_actions_events',
'shadow_instance_actions_events']:
@ -244,12 +235,6 @@ class NovaMigrationsCheckers(test_migrations.ModelsMigrationsSync,
self.assertIsInstance(action_events.c.details.type,
sqlalchemy.types.Text)
def _post_downgrade_230(self, engine):
for table_name in ['instance_actions_events',
'shadow_instance_actions_events']:
self.assertColumnNotExists(engine, table_name, 'host')
self.assertColumnNotExists(engine, table_name, 'details')
def _check_231(self, engine, data):
self.assertColumnExists(engine, 'instances', 'ephemeral_key_uuid')
@ -258,10 +243,6 @@ class NovaMigrationsCheckers(test_migrations.ModelsMigrationsSync,
sqlalchemy.types.String)
self.assertTrue(db_utils.check_shadow_table(engine, 'instances'))
def _post_downgrade_231(self, engine):
self.assertColumnNotExists(engine, 'instances', 'ephemeral_key_uuid')
self.assertTrue(db_utils.check_shadow_table(engine, 'instances'))
def _check_232(self, engine, data):
table_names = ['compute_node_stats', 'compute_nodes',
'instance_actions', 'instance_actions_events',
@ -279,12 +260,6 @@ class NovaMigrationsCheckers(test_migrations.ModelsMigrationsSync,
self.assertRaises(sqlalchemy.exc.NoSuchTableError,
oslodbutils.get_table, engine, 'compute_node_stats')
def _post_downgrade_233(self, engine):
self.assertColumnNotExists(engine, 'compute_nodes', 'stats')
# confirm compute_node_stats exists
oslodbutils.get_table(engine, 'compute_node_stats')
def _check_234(self, engine, data):
self.assertIndexMembers(engine, 'reservations',
'reservations_deleted_expire_idx',
@ -295,11 +270,6 @@ class NovaMigrationsCheckers(test_migrations.ModelsMigrationsSync,
engine, 'volume_usage_cache')
self.assertEqual(64, volume_usage_cache.c.user_id.type.length)
def _post_downgrade_244(self, engine):
volume_usage_cache = oslodbutils.get_table(
engine, 'volume_usage_cache')
self.assertEqual(36, volume_usage_cache.c.user_id.type.length)
def _pre_upgrade_245(self, engine):
# create a fake network
networks = oslodbutils.get_table(engine, 'networks')
@ -318,22 +288,11 @@ class NovaMigrationsCheckers(test_migrations.ModelsMigrationsSync,
# share address should default to false
self.assertFalse(network.share_address)
def _post_downgrade_245(self, engine):
self.assertColumnNotExists(engine, 'networks', 'mtu')
self.assertColumnNotExists(engine, 'networks', 'dhcp_server')
self.assertColumnNotExists(engine, 'networks', 'enable_dhcp')
self.assertColumnNotExists(engine, 'networks', 'share_address')
def _check_246(self, engine, data):
pci_devices = oslodbutils.get_table(engine, 'pci_devices')
self.assertEqual(1, len([fk for fk in pci_devices.foreign_keys
if fk.parent.name == 'compute_node_id']))
def _post_downgrade_246(self, engine):
pci_devices = oslodbutils.get_table(engine, 'pci_devices')
self.assertEqual(0, len([fk for fk in pci_devices.foreign_keys
if fk.parent.name == 'compute_node_id']))
def _check_247(self, engine, data):
quota_usages = oslodbutils.get_table(engine, 'quota_usages')
self.assertFalse(quota_usages.c.resource.nullable)
@ -344,26 +303,11 @@ class NovaMigrationsCheckers(test_migrations.ModelsMigrationsSync,
self.assertFalse(pci_devices.c.vendor_id.nullable)
self.assertFalse(pci_devices.c.dev_type.nullable)
def _post_downgrade_247(self, engine):
quota_usages = oslodbutils.get_table(engine, 'quota_usages')
self.assertTrue(quota_usages.c.resource.nullable)
pci_devices = oslodbutils.get_table(engine, 'pci_devices')
self.assertFalse(pci_devices.c.deleted.nullable)
self.assertTrue(pci_devices.c.product_id.nullable)
self.assertTrue(pci_devices.c.vendor_id.nullable)
self.assertTrue(pci_devices.c.dev_type.nullable)
def _check_248(self, engine, data):
self.assertIndexMembers(engine, 'reservations',
'reservations_deleted_expire_idx',
['deleted', 'expire'])
def _post_downgrade_248(self, engine):
reservations = oslodbutils.get_table(engine, 'reservations')
index_names = [idx.name for idx in reservations.indexes]
self.assertNotIn('reservations_deleted_expire_idx', index_names)
def _check_249(self, engine, data):
# Assert that only one index exists that covers columns
# instance_uuid and device_name
@ -372,23 +316,10 @@ class NovaMigrationsCheckers(test_migrations.ModelsMigrationsSync,
if [c.name for c in i.columns] ==
['instance_uuid', 'device_name']]))
def _post_downgrade_249(self, engine):
# The duplicate index is not created on downgrade, so this
# asserts that only one index exists that covers columns
# instance_uuid and device_name
bdm = oslodbutils.get_table(engine, 'block_device_mapping')
self.assertEqual(1, len([i for i in bdm.indexes
if [c.name for c in i.columns] ==
['instance_uuid', 'device_name']]))
def _check_250(self, engine, data):
self.assertTableNotExists(engine, 'instance_group_metadata')
self.assertTableNotExists(engine, 'shadow_instance_group_metadata')
def _post_downgrade_250(self, engine):
oslodbutils.get_table(engine, 'instance_group_metadata')
oslodbutils.get_table(engine, 'shadow_instance_group_metadata')
def _check_251(self, engine, data):
self.assertColumnExists(engine, 'compute_nodes', 'numa_topology')
self.assertColumnExists(engine, 'shadow_compute_nodes',
@ -402,11 +333,6 @@ class NovaMigrationsCheckers(test_migrations.ModelsMigrationsSync,
self.assertIsInstance(shadow_compute_nodes.c.numa_topology.type,
sqlalchemy.types.Text)
def _post_downgrade_251(self, engine):
self.assertColumnNotExists(engine, 'compute_nodes', 'numa_topology')
self.assertColumnNotExists(engine, 'shadow_compute_nodes',
'numa_topology')
def _check_252(self, engine, data):
oslodbutils.get_table(engine, 'instance_extra')
oslodbutils.get_table(engine, 'shadow_instance_extra')
@ -414,10 +340,6 @@ class NovaMigrationsCheckers(test_migrations.ModelsMigrationsSync,
'instance_extra_idx',
['instance_uuid'])
def _post_downgrade_252(self, engine):
self.assertTableNotExists(engine, 'instance_extra')
self.assertTableNotExists(engine, 'shadow_instance_extra')
def _check_253(self, engine, data):
self.assertColumnExists(engine, 'instance_extra', 'pci_requests')
self.assertColumnExists(
@ -430,11 +352,6 @@ class NovaMigrationsCheckers(test_migrations.ModelsMigrationsSync,
self.assertIsInstance(shadow_instance_extra.c.pci_requests.type,
sqlalchemy.types.Text)
def _post_downgrade_253(self, engine):
self.assertColumnNotExists(engine, 'instance_extra', 'pci_requests')
self.assertColumnNotExists(engine, 'shadow_instance_extra',
'pci_requests')
def _check_254(self, engine, data):
self.assertColumnExists(engine, 'pci_devices', 'request_id')
self.assertColumnExists(
@ -448,11 +365,6 @@ class NovaMigrationsCheckers(test_migrations.ModelsMigrationsSync,
self.assertIsInstance(shadow_pci_devices.c.request_id.type,
sqlalchemy.types.String)
def _post_downgrade_254(self, engine):
self.assertColumnNotExists(engine, 'pci_devices', 'request_id')
self.assertColumnNotExists(
engine, 'shadow_pci_devices', 'request_id')
def _check_265(self, engine, data):
# Assert that only one index exists that covers columns
# host and deleted
@ -466,20 +378,6 @@ class NovaMigrationsCheckers(test_migrations.ModelsMigrationsSync,
if [c.name for c in i.columns][:1] ==
['host']]))
def _post_downgrade_265(self, engine):
# The duplicated index is not created on downgrade, so this
# asserts that only one index exists that covers columns
# host and deleted
instances = oslodbutils.get_table(engine, 'instances')
self.assertEqual(1, len([i for i in instances.indexes
if [c.name for c in i.columns][:2] ==
['host', 'deleted']]))
# and only one index covers host column
iscsi_targets = oslodbutils.get_table(engine, 'iscsi_targets')
self.assertEqual(1, len([i for i in iscsi_targets.indexes
if [c.name for c in i.columns][:1] ==
['host']]))
def _check_266(self, engine, data):
self.assertColumnExists(engine, 'tags', 'resource_id')
self.assertColumnExists(engine, 'tags', 'tag')
@ -491,9 +389,6 @@ class NovaMigrationsCheckers(test_migrations.ModelsMigrationsSync,
self.assertIsInstance(table.c.tag.type,
sqlalchemy.types.String)
def _post_downgrade_266(self, engine):
self.assertTableNotExists(engine, 'tags')
def _pre_upgrade_267(self, engine):
# Create a fixed_ips row with a null instance_uuid (if not already
# there) to make sure that's not deleted.
@ -539,16 +434,6 @@ class NovaMigrationsCheckers(test_migrations.ModelsMigrationsSync,
).execute().first()
self.assertIsNone(volume.instance_uuid)
def _post_downgrade_267(self, engine):
# Make sure the UC is gone and the column is nullable again.
instances = oslodbutils.get_table(engine, 'instances')
self.assertTrue(instances.c.uuid.nullable)
inspector = reflection.Inspector.from_engine(engine)
constraints = inspector.get_unique_constraints('instances')
constraint_names = [constraint['name'] for constraint in constraints]
self.assertNotIn('uniq_instances0uuid', constraint_names)
def test_migration_267(self):
# This is separate from test_walk_versions so we can test the case
# where there are non-null instance_uuid entries in the database which
@ -603,10 +488,6 @@ class NovaMigrationsCheckers(test_migrations.ModelsMigrationsSync,
self.assertIsInstance(shadow_compute_nodes.c.host.type,
sqlalchemy.types.String)
def _post_downgrade_268(self, engine):
self.assertColumnNotExists(engine, 'compute_nodes', 'host')
self.assertColumnNotExists(engine, 'shadow_compute_nodes', 'host')
def _check_269(self, engine, data):
self.assertColumnExists(engine, 'pci_devices', 'numa_node')
@ -621,10 +502,6 @@ class NovaMigrationsCheckers(test_migrations.ModelsMigrationsSync,
sqlalchemy.types.Integer)
self.assertTrue(shadow_pci_devices.c.numa_node.nullable)
def _post_downgrade_269(self, engine):
self.assertColumnNotExists(engine, 'pci_devices', 'numa_node')
self.assertColumnNotExists(engine, 'shadow_pci_devices', 'numa_node')
def _check_270(self, engine, data):
self.assertColumnExists(engine, 'instance_extra', 'flavor')
self.assertColumnExists(engine, 'shadow_instance_extra', 'flavor')
@ -637,10 +514,6 @@ class NovaMigrationsCheckers(test_migrations.ModelsMigrationsSync,
self.assertIsInstance(shadow_instance_extra.c.flavor.type,
sqlalchemy.types.Text)
def _post_downgrade_270(self, engine):
self.assertColumnNotExists(engine, 'instance_extra', 'flavor')
self.assertColumnNotExists(engine, 'shadow_instance_extra', 'flavor')
def _check_271(self, engine, data):
self.assertIndexMembers(engine, 'block_device_mapping',
'snapshot_id', ['snapshot_id'])
@ -672,39 +545,6 @@ class NovaMigrationsCheckers(test_migrations.ModelsMigrationsSync,
self.assertIndexNotExists(engine, 'dns_domains', 'project_id')
self.assertIndexNotExists(engine, 'virtual_interfaces', 'network_id')
def _post_downgrade_271(self, engine):
self.assertIndexNotExists(engine, 'dns_domains',
'dns_domains_project_id_idx')
self.assertIndexNotExists(engine, 'virtual_interfaces',
'virtual_interfaces_network_id_idx')
if engine.name == 'mysql':
self.assertIndexMembers(engine, 'dns_domains',
'project_id',
['project_id'])
self.assertIndexMembers(engine, 'virtual_interfaces',
'network_id',
['network_id'])
# Rest of indexes will still exist on MySQL
return
# Never existed on non-MySQL databases, so shouldn't exist now
self.assertIndexNotExists(engine, 'dns_domains', 'project_id')
self.assertIndexNotExists(engine, 'virtual_interfaces', 'network_id')
for table_name, index_name in [
('block_device_mapping', 'snapshot_id'),
('block_device_mapping', 'volume_id'),
('dns_domains', 'dns_domains_project_id_idx'),
('fixed_ips', 'network_id'),
('fixed_ips', 'fixed_ips_instance_uuid_fkey'),
('fixed_ips', 'fixed_ips_virtual_interface_id_fkey'),
('floating_ips', 'fixed_ip_id'),
('iscsi_targets', 'iscsi_targets_volume_id_fkey'),
('virtual_interfaces', 'virtual_interfaces_network_id_idx'),
('virtual_interfaces',
'virtual_interfaces_instance_uuid_fkey')]:
self.assertIndexNotExists(engine, table_name, index_name)
def _pre_upgrade_273(self, engine):
if engine.name != 'sqlite':
return
@ -762,22 +602,12 @@ class NovaMigrationsCheckers(test_migrations.ModelsMigrationsSync,
self.assertEqual(fkeys[src_column].table.name, dst_table)
self.assertEqual(fkeys[src_column].name, dst_column)
def _post_downgrade_273(self, engine):
# NOTE(johannes): No downgrade implemented, so nothing to check
pass
def _check_274(self, engine, data):
self.assertIndexMembers(engine, 'instances',
'instances_project_id_deleted_idx',
['project_id', 'deleted'])
self.assertIndexNotExists(engine, 'instances', 'project_id')
def _post_downgrade_274(self, engine):
self.assertIndexMembers(engine, 'instances',
'project_id', ['project_id'])
self.assertIndexNotExists(engine, 'instances',
'instances_project_id_deleted_idx')
def _pre_upgrade_275(self, engine):
# Create a keypair record so we can test that the upgrade will set
# 'ssh' as default value in the new column for the previous keypair
@ -803,10 +633,6 @@ class NovaMigrationsCheckers(test_migrations.ModelsMigrationsSync,
key_pairs.c.name == 'test-migr').execute().first()
self.assertEqual('ssh', keypair.type)
def _post_downgrade_275(self, engine):
self.assertColumnNotExists(engine, 'key_pairs', 'type')
self.assertColumnNotExists(engine, 'shadow_key_pairs', 'type')
def _check_276(self, engine, data):
self.assertColumnExists(engine, 'instance_extra', 'vcpu_model')
self.assertColumnExists(engine, 'shadow_instance_extra', 'vcpu_model')
@ -819,42 +645,17 @@ class NovaMigrationsCheckers(test_migrations.ModelsMigrationsSync,
self.assertIsInstance(shadow_instance_extra.c.vcpu_model.type,
sqlalchemy.types.Text)
def _post_downgrade_276(self, engine):
self.assertColumnNotExists(engine, 'instance_extra', 'vcpu_model')
self.assertColumnNotExists(engine, 'shadow_instance_extra',
'vcpu_model')
def _check_277(self, engine, data):
self.assertIndexMembers(engine, 'fixed_ips',
'fixed_ips_deleted_allocated_updated_at_idx',
['deleted', 'allocated', 'updated_at'])
def _post_downgrade_277(self, engine):
self.assertIndexNotExists(engine, 'fixed_ips',
'fixed_ips_deleted_allocated_updated_at_idx')
def _check_278(self, engine, data):
compute_nodes = oslodbutils.get_table(engine, 'compute_nodes')
self.assertEqual(0, len([fk for fk in compute_nodes.foreign_keys
if fk.parent.name == 'service_id']))
self.assertTrue(compute_nodes.c.service_id.nullable)
def _post_downgrade_278(self, engine):
compute_nodes = oslodbutils.get_table(engine, 'compute_nodes')
service_id_fks = [fk for fk in compute_nodes.foreign_keys
if fk.parent.name == 'service_id'
and fk.column.name == 'id']
self.assertEqual(1, len(service_id_fks))
self.assertFalse(compute_nodes.c.service_id.nullable)
if engine.name == 'postgresql':
# Only make sure that posgresql at least adds a name for the FK
self.assertIsNotNone(service_id_fks[0].name)
elif engine.name != 'sqlite':
# Erm, SQLA<1.0 doesn't return FK names for sqlite so we need to
# check only for other engines
self.assertEqual('fk_compute_nodes_service_id',
service_id_fks[0].name)
def _check_279(self, engine, data):
inspector = reflection.Inspector.from_engine(engine)
constraints = inspector.get_unique_constraints('compute_nodes')
@ -864,23 +665,10 @@ class NovaMigrationsCheckers(test_migrations.ModelsMigrationsSync,
self.assertIn('uniq_compute_nodes0host0hypervisor_hostname0deleted',
constraint_names)
def _post_downgrade_279(self, engine):
inspector = reflection.Inspector.from_engine(engine)
constraints = inspector.get_unique_constraints('compute_nodes')
constraint_names = [constraint['name'] for constraint in constraints]
self.assertNotIn('uniq_compute_nodes0host0hypervisor_hostname0deleted',
constraint_names)
self.assertIn('uniq_compute_nodes0host0hypervisor_hostname',
constraint_names)
def _check_280(self, engine, data):
key_pairs = oslodbutils.get_table(engine, 'key_pairs')
self.assertFalse(key_pairs.c.name.nullable)
def _post_downgrade_280(self, engine):
key_pairs = oslodbutils.get_table(engine, 'key_pairs')
self.assertTrue(key_pairs.c.name.nullable)
class TestNovaMigrationsSQLite(NovaMigrationsCheckers,
test_base.DbTestCase,
@ -922,12 +710,12 @@ class TestNovaMigrationsPostgreSQL(NovaMigrationsCheckers,
class ProjectTestCase(test.NoDBTestCase):
def test_all_migrations_have_downgrade(self):
def test_no_migrations_have_downgrade(self):
topdir = os.path.normpath(os.path.dirname(__file__) + '/../../../')
py_glob = os.path.join(topdir, "nova", "db", "sqlalchemy",
"migrate_repo", "versions", "*.py")
missing_downgrade = []
includes_downgrade = []
for path in glob.iglob(py_glob):
has_upgrade = False
has_downgrade = False
@ -938,10 +726,11 @@ class ProjectTestCase(test.NoDBTestCase):
if 'def downgrade(' in line:
has_downgrade = True
if has_upgrade and not has_downgrade:
if has_upgrade and has_downgrade:
fname = os.path.basename(path)
missing_downgrade.append(fname)
includes_downgrade.append(fname)
helpful_msg = ("The following migrations are missing a downgrade:"
"\n\t%s" % '\n\t'.join(sorted(missing_downgrade)))
self.assertFalse(missing_downgrade, helpful_msg)
helpful_msg = ("The following migrations have a downgrade "
"which is not supported:"
"\n\t%s" % '\n\t'.join(sorted(includes_downgrade)))
self.assertFalse(includes_downgrade, helpful_msg)