Remove downgrade migrations

According to cross project spec[1] downgrade migrations should be removed.

[1] I622f89fe63327d44f9b229d3bd9e76e15acbaa7a

Implements blueprint: no-downward-sql-migration

Change-Id: I111cdb4bba361de5da0ce7db8144965c947ada41
This commit is contained in:
Ivan Kolodyazhny 2015-12-17 13:15:57 +02:00
parent 0acf5ab4aa
commit 6d678dc393
74 changed files with 32 additions and 1577 deletions

View File

@ -24,6 +24,9 @@ from oslo_db import options
from stevedore import driver
from cinder.db.sqlalchemy import api as db_api
from cinder import exception
from cinder.i18n import _
INIT_VERSION = 000
@ -55,6 +58,15 @@ def db_sync(version=None, init_version=INIT_VERSION, engine=None):
if engine is None:
engine = db_api.get_engine()
current_db_version = get_backend().db_version(engine,
MIGRATE_REPO_PATH,
init_version)
# TODO(e0ne): drop version validation when new oslo.db will be released
if version and int(version) < current_db_version:
msg = _('Database schema downgrade is not allowed.')
raise exception.InvalidInput(reason=msg)
return get_backend().db_sync(engine=engine,
abs_path=MIGRATE_REPO_PATH,
version=version,

View File

@ -256,12 +256,3 @@ def upgrade(migrate_engine):
"ALTER DATABASE %s DEFAULT CHARACTER SET utf8" %
migrate_engine.url.database)
migrate_engine.execute("ALTER TABLE %s Engine=InnoDB" % table)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
tables = define_tables(meta)
tables.reverse()
for table in tables:
table.drop()

View File

@ -12,7 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
from migrate import ForeignKeyConstraint
from sqlalchemy import Boolean, Column, DateTime
from sqlalchemy import MetaData, Integer, String, Table, ForeignKey
@ -88,37 +87,3 @@ def upgrade(migrate_engine):
)
reservations.create()
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
fk_name = None
if migrate_engine.name == 'mysql':
fk_name = 'reservations_ibfk_1'
elif migrate_engine.name == 'postgresql':
fk_name = 'reservations_usage_id_fkey'
# NOTE: MySQL and PostgreSQL Cannot drop the quota_usages table
# until the foreign key is removed. We remove the foreign key first,
# and then we drop the table.
table = Table('reservations', meta, autoload=True)
ref_table = Table('reservations', meta, autoload=True)
params = {'columns': [table.c['usage_id']],
'refcolumns': [ref_table.c['id']],
'name': fk_name}
if fk_name:
fkey = ForeignKeyConstraint(**params)
fkey.drop()
quota_classes = Table('quota_classes', meta, autoload=True)
quota_classes.drop()
quota_usages = Table('quota_usages', meta, autoload=True)
quota_usages.drop()
reservations = Table('reservations', meta, autoload=True)
reservations.drop()

View File

@ -53,12 +53,3 @@ def upgrade(migrate_engine):
except Exception:
meta.drop_all(tables=[volume_glance_metadata])
raise
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
volume_glance_metadata = Table('volume_glance_metadata',
meta, autoload=True)
volume_glance_metadata.drop()

View File

@ -13,7 +13,7 @@
import uuid
from migrate import ForeignKeyConstraint
from sqlalchemy import Integer, MetaData, String, Table
from sqlalchemy import MetaData, String, Table
def upgrade(migrate_engine):
@ -79,81 +79,3 @@ def upgrade(migrate_engine):
pass
else:
raise
def downgrade(migrate_engine):
"""Convert volume_type from UUID back to int."""
meta = MetaData()
meta.bind = migrate_engine
volumes = Table('volumes', meta, autoload=True)
volume_types = Table('volume_types', meta, autoload=True)
extra_specs = Table('volume_type_extra_specs', meta, autoload=True)
fkey_remove_list = [volumes.c.volume_type_id,
volume_types.c.id,
extra_specs.c.volume_type_id]
for column in fkey_remove_list:
fkeys = list(column.foreign_keys)
if fkeys:
fkey_name = fkeys[0].constraint.name
fkey = ForeignKeyConstraint(columns=[column],
refcolumns=[volume_types.c.id],
name=fkey_name)
try:
fkey.drop()
except Exception:
if migrate_engine.url.get_dialect().name.startswith('sqlite'):
pass
else:
raise
vtype_list = list(volume_types.select().execute())
new_id = 1
for t in vtype_list:
volumes.update().\
where(volumes.c.volume_type_id == t['id']).\
values(volume_type_id=new_id).execute()
extra_specs.update().\
where(extra_specs.c.volume_type_id == t['id']).\
values(volume_type_id=new_id).execute()
volume_types.update().\
where(volume_types.c.id == t['id']).\
values(id=new_id).execute()
new_id += 1
if migrate_engine.name == 'postgresql':
# NOTE(e0ne): PostgreSQL can't cast string to int automatically
table_column_pairs = [('volumes', 'volume_type_id'),
('volume_types', 'id'),
('volume_type_extra_specs', 'volume_type_id')]
sql = 'ALTER TABLE {0} ALTER COLUMN {1} ' + \
'TYPE INTEGER USING {1}::numeric'
for table, column in table_column_pairs:
migrate_engine.execute(sql.format(table, column))
else:
volumes.c.volume_type_id.alter(Integer)
volume_types.c.id.alter(Integer)
extra_specs.c.volume_type_id.alter(Integer)
for column in fkey_remove_list:
fkeys = list(column.foreign_keys)
if fkeys:
fkey_name = fkeys[0].constraint.name
fkey = ForeignKeyConstraint(columns=[column],
refcolumns=[volume_types.c.id],
name=fkey_name)
try:
fkey.create()
except Exception:
if migrate_engine.url.get_dialect().name.startswith('sqlite'):
pass
else:
raise

View File

@ -23,13 +23,3 @@ def upgrade(migrate_engine):
source_volid = Column('source_volid', String(36))
volumes.create_column(source_volid)
volumes.update().values(source_volid=None).execute()
def downgrade(migrate_engine):
"""Remove source volume id column to volumes."""
meta = MetaData()
meta.bind = migrate_engine
volumes = Table('volumes', meta, autoload=True)
source_volid = Column('source_volid', String(36))
volumes.drop_column(source_volid)

View File

@ -1,121 +0,0 @@
CREATE TEMPORARY TABLE volumes_backup (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
id VARCHAR(36) NOT NULL,
ec2_id VARCHAR(255),
user_id VARCHAR(255),
project_id VARCHAR(255),
host VARCHAR(255),
size INTEGER,
availability_zone VARCHAR(255),
instance_uuid VARCHAR(36),
mountpoint VARCHAR(255),
attach_time VARCHAR(255),
status VARCHAR(255),
attach_status VARCHAR(255),
scheduled_at DATETIME,
launched_at DATETIME,
terminated_at DATETIME,
display_name VARCHAR(255),
display_description VARCHAR(255),
provider_location VARCHAR(256),
provider_auth VARCHAR(256),
snapshot_id VARCHAR(36),
volume_type_id VARCHAR(36),
source_volid VARCHAR(36),
PRIMARY KEY (id),
CHECK (deleted IN (0, 1))
);
INSERT INTO volumes_backup
SELECT created_at,
updated_at,
deleted_at,
deleted,
id,
ec2_id,
user_id,
project_id,
host,
size,
availability_zone,
instance_uuid,
mountpoint,
attach_time,
status,
attach_status,
scheduled_at,
launched_at,
terminated_at,
display_name,
display_description,
provider_location,
provider_auth,
snapshot_id,
volume_type_id,
source_volid
FROM volumes;
DROP TABLE volumes;
CREATE TABLE volumes (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
id VARCHAR(36) NOT NULL,
ec2_id VARCHAR(255),
user_id VARCHAR(255),
project_id VARCHAR(255),
host VARCHAR(255),
size INTEGER,
availability_zone VARCHAR(255),
instance_uuid VARCHAR(36),
mountpoint VARCHAR(255),
attach_time VARCHAR(255),
status VARCHAR(255),
attach_status VARCHAR(255),
scheduled_at DATETIME,
launched_at DATETIME,
terminated_at DATETIME,
display_name VARCHAR(255),
display_description VARCHAR(255),
provider_location VARCHAR(256),
provider_auth VARCHAR(256),
snapshot_id VARCHAR(36),
volume_type_id VARCHAR(36),
PRIMARY KEY (id),
CHECK (deleted IN (0, 1))
);
INSERT INTO volumes
SELECT created_at,
updated_at,
deleted_at,
deleted,
id,
ec2_id,
user_id,
project_id,
host,
size,
availability_zone,
instance_uuid,
mountpoint,
attach_time,
status,
attach_status,
scheduled_at,
launched_at,
terminated_at,
display_name,
display_description,
provider_location,
provider_auth,
snapshot_id,
volume_type_id
FROM volumes_backup;
DROP TABLE volumes_backup;

View File

@ -23,12 +23,3 @@ def upgrade(migrate_engine):
provider_location = Column('provider_location', String(255))
snapshots.create_column(provider_location)
snapshots.update().values(provider_location=None).execute()
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
snapshots = Table('snapshots', meta, autoload=True)
provider_location = snapshots.columns.provider_location
snapshots.drop_column(provider_location)

View File

@ -25,15 +25,3 @@ def upgrade(migrate_engine):
ForeignKeyConstraint(
columns=[snapshots.c.volume_id],
refcolumns=[volumes.c.id]).create()
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
snapshots = Table('snapshots', meta, autoload=True)
volumes = Table('volumes', meta, autoload=True)
ForeignKeyConstraint(
columns=[snapshots.c.volume_id],
refcolumns=[volumes.c.id]).drop()

View File

@ -1,28 +0,0 @@
-- As sqlite does not support the DROP FOREIGN KEY, we need to create
-- the table, and move all the data to it.
CREATE TABLE snapshots_v6 (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
id VARCHAR(36) NOT NULL,
volume_id VARCHAR(36) NOT NULL,
user_id VARCHAR(255),
project_id VARCHAR(255),
status VARCHAR(255),
progress VARCHAR(255),
volume_size INTEGER,
scheduled_at DATETIME,
display_name VARCHAR(255),
display_description VARCHAR(255),
provider_location VARCHAR(255),
PRIMARY KEY (id),
CHECK (deleted IN (0, 1))
);
INSERT INTO snapshots_v6 SELECT * FROM snapshots;
DROP TABLE snapshots;
ALTER TABLE snapshots_v6 RENAME TO snapshots;

View File

@ -47,11 +47,3 @@ def upgrade(migrate_engine):
)
backups.create()
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
backups = Table('backups', meta, autoload=True)
backups.drop()

View File

@ -36,12 +36,3 @@ def upgrade(migrate_engine):
)
snapshot_metadata.create()
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
snapshot_metadata = Table('snapshot_metadata',
meta,
autoload=True)
snapshot_metadata.drop()

View File

@ -39,12 +39,3 @@ def upgrade(migrate_engine):
)
transfers.create()
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
transfers = Table('transfers',
meta,
autoload=True)
transfers.drop()

View File

@ -31,13 +31,3 @@ def upgrade(migrate_engine):
volumes.update().\
where(volumes.c.id == item['volume_id']).\
values(bootable=True).execute()
def downgrade(migrate_engine):
"""Remove bootable column to volumes."""
meta = MetaData()
meta.bind = migrate_engine
volumes = Table('volumes', meta, autoload=True)
bootable = volumes.columns.bootable
volumes.drop_column(bootable)

View File

@ -1,61 +0,0 @@
CREATE TABLE volumes_v10 (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
id VARCHAR(36) NOT NULL,
ec2_id INTEGER,
user_id VARCHAR(255),
project_id VARCHAR(255),
snapshot_id VARCHAR(36),
host VARCHAR(255),
size INTEGER,
availability_zone VARCHAR(255),
instance_uuid VARCHAR(36),
mountpoint VARCHAR(255),
attach_time VARCHAR(255),
status VARCHAR(255),
attach_status VARCHAR(255),
scheduled_at DATETIME,
launched_at DATETIME,
terminated_at DATETIME,
display_name VARCHAR(255),
display_description VARCHAR(255),
provider_location VARCHAR(255),
provider_auth VARCHAR(255),
volume_type_id VARCHAR(36),
source_volid VARCHAR(36),
PRIMARY KEY (id)
);
INSERT INTO volumes_v10
SELECT created_at,
updated_at,
deleted_at,
deleted,
id,
ec2_id,
user_id,
project_id,
snapshot_id,
host,
size,
availability_zone,
instance_uuid,
mountpoint,
attach_time,
status,
attach_status,
scheduled_at,
launched_at,
terminated_at,
display_name,
display_description,
provider_location,
provider_auth,
volume_type_id,
source_volid
FROM volumes;
DROP TABLE volumes;
ALTER TABLE volumes_v10 RENAME TO volumes;

View File

@ -23,13 +23,3 @@ def upgrade(migrate_engine):
attached_host = Column('attached_host', String(255))
volumes.create_column(attached_host)
volumes.update().values(attached_host=None).execute()
def downgrade(migrate_engine):
"""Remove attach host column from volumes."""
meta = MetaData()
meta.bind = migrate_engine
volumes = Table('volumes', meta, autoload=True)
attached_host = Column('attached_host', String(255))
volumes.drop_column(attached_host)

View File

@ -1,63 +0,0 @@
CREATE TABLE volumes_v11 (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
id VARCHAR(36) NOT NULL,
ec2_id INTEGER,
user_id VARCHAR(255),
project_id VARCHAR(255),
snapshot_id VARCHAR(36),
host VARCHAR(255),
size INTEGER,
availability_zone VARCHAR(255),
instance_uuid VARCHAR(36),
mountpoint VARCHAR(255),
attach_time VARCHAR(255),
status VARCHAR(255),
attach_status VARCHAR(255),
scheduled_at DATETIME,
launched_at DATETIME,
terminated_at DATETIME,
display_name VARCHAR(255),
display_description VARCHAR(255),
provider_location VARCHAR(255),
provider_auth VARCHAR(255),
volume_type_id VARCHAR(36),
source_volid VARCHAR(36),
bootable BOOLEAN,
PRIMARY KEY (id)
);
INSERT INTO volumes_v11
SELECT created_at,
updated_at,
deleted_at,
deleted,
id,
ec2_id,
user_id,
project_id,
snapshot_id,
host,
size,
availability_zone,
instance_uuid,
mountpoint,
attach_time,
status,
attach_status,
scheduled_at,
launched_at,
terminated_at,
display_name,
display_description,
provider_location,
provider_auth,
volume_type_id,
source_volid,
bootable
FROM volumes;
DROP TABLE volumes;
ALTER TABLE volumes_v11 RENAME TO volumes;

View File

@ -23,13 +23,3 @@ def upgrade(migrate_engine):
provider_geometry = Column('provider_geometry', String(255))
volumes.create_column(provider_geometry)
volumes.update().values(provider_geometry=None).execute()
def downgrade(migrate_engine):
"""Remove provider_geometry column from volumes."""
meta = MetaData()
meta.bind = migrate_engine
volumes = Table('volumes', meta, autoload=True)
provider_geometry = Column('provider_geometry', String(255))
volumes.drop_column(provider_geometry)

View File

@ -1,65 +0,0 @@
CREATE TABLE volumes_v12 (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
id VARCHAR(36) NOT NULL,
ec2_id INTEGER,
user_id VARCHAR(255),
project_id VARCHAR(255),
snapshot_id VARCHAR(36),
host VARCHAR(255),
size INTEGER,
availability_zone VARCHAR(255),
instance_uuid VARCHAR(36),
mountpoint VARCHAR(255),
attach_time VARCHAR(255),
status VARCHAR(255),
attach_status VARCHAR(255),
scheduled_at DATETIME,
launched_at DATETIME,
terminated_at DATETIME,
display_name VARCHAR(255),
display_description VARCHAR(255),
provider_location VARCHAR(255),
provider_auth VARCHAR(255),
volume_type_id VARCHAR(36),
source_volid VARCHAR(36),
bootable BOOLEAN,
attached_host VARCHAR(255),
PRIMARY KEY (id)
);
INSERT INTO volumes_v12
SELECT created_at,
updated_at,
deleted_at,
deleted,
id,
ec2_id,
user_id,
project_id,
snapshot_id,
host,
size,
availability_zone,
instance_uuid,
mountpoint,
attach_time,
status,
attach_status,
scheduled_at,
launched_at,
terminated_at,
display_name,
display_description,
provider_location,
provider_auth,
volume_type_id,
source_volid,
bootable,
attached_host
FROM volumes;
DROP TABLE volumes;
ALTER TABLE volumes_v12 RENAME TO volumes;

View File

@ -23,13 +23,3 @@ def upgrade(migrate_engine):
_name_id = Column('_name_id', String(36))
volumes.create_column(_name_id)
volumes.update().values(_name_id=None).execute()
def downgrade(migrate_engine):
"""Remove _name_id column from volumes."""
meta = MetaData()
meta.bind = migrate_engine
volumes = Table('volumes', meta, autoload=True)
_name_id = volumes.columns._name_id
volumes.drop_column(_name_id)

View File

@ -1,67 +0,0 @@
CREATE TABLE volumes_v13 (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
id VARCHAR(36) NOT NULL,
ec2_id INTEGER,
user_id VARCHAR(255),
project_id VARCHAR(255),
snapshot_id VARCHAR(36),
host VARCHAR(255),
size INTEGER,
availability_zone VARCHAR(255),
instance_uuid VARCHAR(36),
attached_host VARCHAR(255),
mountpoint VARCHAR(255),
attach_time VARCHAR(255),
status VARCHAR(255),
attach_status VARCHAR(255),
scheduled_at DATETIME,
launched_at DATETIME,
terminated_at DATETIME,
display_name VARCHAR(255),
display_description VARCHAR(255),
provider_location VARCHAR(255),
provider_auth VARCHAR(255),
volume_type_id VARCHAR(36),
source_volid VARCHAR(36),
bootable BOOLEAN,
provider_geometry VARCHAR(255),
PRIMARY KEY (id)
);
INSERT INTO volumes_v13
SELECT created_at,
updated_at,
deleted_at,
deleted,
id,
ec2_id,
user_id,
project_id,
snapshot_id,
host,
size,
availability_zone,
instance_uuid,
attached_host,
mountpoint,
attach_time,
status,
attach_status,
scheduled_at,
launched_at,
terminated_at,
display_name,
display_description,
provider_location,
provider_auth,
volume_type_id,
source_volid,
bootable,
provider_geometry
FROM volumes;
DROP TABLE volumes;
ALTER TABLE volumes_v13 RENAME TO volumes;

View File

@ -10,8 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Boolean, Column, DateTime, Integer
from sqlalchemy import MetaData, String, Table
from sqlalchemy import MetaData, Table
TABLE_NAME = 'migrations'
@ -22,29 +21,3 @@ def upgrade(migrate_engine):
meta.bind = migrate_engine
table = Table(TABLE_NAME, meta, autoload=True)
table.drop()
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
table = Table(
TABLE_NAME, meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
Column('source_compute', String(length=255)),
Column('dest_compute', String(length=255)),
Column('dest_host', String(length=255)),
Column('old_instance_type_id', Integer),
Column('new_instance_type_id', Integer),
Column('instance_uuid', String(length=255), nullable=True),
Column('status', String(length=255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
table.create()

View File

@ -12,8 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Boolean, Column, DateTime, ForeignKey
from sqlalchemy import Integer, MetaData, String, Table
from sqlalchemy import MetaData, Table
def upgrade(migrate_engine):
@ -28,61 +27,3 @@ def upgrade(migrate_engine):
for table in tables:
table.drop()
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
Table('volumes', meta, autoload=True)
sm_backend_config = Table(
'sm_backend_config', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
Column('flavor_id', Integer, ForeignKey('sm_flavors.id'),
nullable=False),
Column('sr_uuid', String(length=255)),
Column('sr_type', String(length=255)),
Column('config_params', String(length=2047)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
sm_flavors = Table(
'sm_flavors', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
Column('label', String(length=255)),
Column('description', String(length=255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
sm_volume = Table(
'sm_volume', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', String(length=36),
ForeignKey('volumes.id'),
primary_key=True,
nullable=False),
Column('backend_id', Integer, ForeignKey('sm_backend_config.id'),
nullable=False),
Column('vdi_uuid', String(length=255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
tables = [sm_flavors, sm_backend_config, sm_volume]
for table in tables:
table.create()

View File

@ -58,20 +58,3 @@ def upgrade(migrate_engine):
)
encryption.create()
def downgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
# drop encryption key UUID for volumes
volumes = Table('volumes', meta, autoload=True)
volumes.c.encryption_key_id.drop()
# drop encryption key UUID and volume type id for snapshots
snapshots = Table('snapshots', meta, autoload=True)
snapshots.c.encryption_key_id.drop()
snapshots.c.volume_type_id.drop()
# drop encryption types table
encryption = Table('encryption', meta, autoload=True)
encryption.drop()

View File

@ -16,7 +16,6 @@
from sqlalchemy import Boolean, Column, DateTime
from sqlalchemy import ForeignKey, MetaData, String, Table
from migrate import ForeignKeyConstraint
def upgrade(migrate_engine):
@ -47,30 +46,3 @@ def upgrade(migrate_engine):
volume_types.create_column(qos_specs_id)
volume_types.update().values(qos_specs_id=None).execute()
def downgrade(migrate_engine):
"""Remove volume_type_rate_limit table."""
meta = MetaData()
meta.bind = migrate_engine
qos_specs = Table('quality_of_service_specs', meta, autoload=True)
if migrate_engine.name == 'mysql':
# NOTE(alanmeadows): MySQL Cannot drop column qos_specs_id
# until the foreign key volumes_types_ibfk_1 is removed. We
# remove the foreign key first, and then we drop the column.
table = Table('volume_types', meta, autoload=True)
ref_table = Table('volume_types', meta, autoload=True)
params = {'columns': [table.c['qos_specs_id']],
'refcolumns': [ref_table.c['id']],
'name': 'volume_types_ibfk_1'}
fkey = ForeignKeyConstraint(**params)
fkey.drop()
volume_types = Table('volume_types', meta, autoload=True)
qos_specs_id = Column('qos_specs_id', String(36))
volume_types.drop_column(qos_specs_id)
qos_specs.drop()

View File

@ -24,13 +24,3 @@ def upgrade(migrate_engine):
volumes = Table('volumes', meta, autoload=True)
migration_status = Column('migration_status', String(255))
volumes.create_column(migration_status)
def downgrade(migrate_engine):
"""Remove migration_status column from volumes."""
meta = MetaData()
meta.bind = migrate_engine
volumes = Table('volumes', meta, autoload=True)
migration_status = volumes.columns.migration_status
volumes.drop_column(migration_status)

View File

@ -37,12 +37,3 @@ def upgrade(migrate_engine):
)
volume_admin_metadata.create()
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
volume_admin_metadata = Table('volume_admin_metadata',
meta,
autoload=True)
volume_admin_metadata.drop()

View File

@ -64,13 +64,3 @@ def upgrade(migrate_engine):
'resource': 'gigabytes',
'hard_limit': CONF.quota_gigabytes,
'deleted': False, })
def downgrade(migrate_engine):
"""Don't delete the 'default' entries at downgrade time.
We don't know if the user had default entries when we started.
If they did, we wouldn't want to remove them. So, the safest
thing to do is just leave the 'default' entries at downgrade time.
"""
pass

View File

@ -21,10 +21,3 @@ def upgrade(migrate_engine):
services = Table('services', meta, autoload=True)
reason = Column('disabled_reason', String(255))
services.create_column(reason)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
services = Table('services', meta, autoload=True)
services.drop_column('disabled_reason')

View File

@ -36,14 +36,3 @@ def upgrade(migrate_engine):
reservations.c.deleted, reservations.c.expire)
index.create(migrate_engine)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
reservations = Table('reservations', meta, autoload=True)
index = _get_deleted_expire_index(reservations)
if index:
index.drop(migrate_engine)

View File

@ -32,16 +32,3 @@ def upgrade(migrate_engine):
volumes.update().values(replication_status='disabled',
replication_extended_status=None,
replication_driver_data=None).execute()
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
volumes = Table('volumes', meta, autoload=True)
replication_status = volumes.columns.replication_status
replication_extended_status = volumes.columns.replication_extended_status
replication_driver_data = volumes.columns.replication_driver_data
volumes.drop_column(replication_status)
volumes.drop_column(replication_extended_status)
volumes.drop_column(replication_driver_data)

View File

@ -13,7 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
from migrate import ForeignKeyConstraint
from sqlalchemy import Boolean, Column, DateTime
from sqlalchemy import ForeignKey, MetaData, String, Table
@ -80,52 +79,3 @@ def upgrade(migrate_engine):
snapshots.create_column(cgsnapshot_id)
snapshots.update().values(cgsnapshot_id=None).execute()
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# Drop column from snapshots table
if migrate_engine.name == 'mysql':
# MySQL cannot drop column cgsnapshot_id until the foreign key
# constraint is removed. So remove the foreign key first, and
# then drop the column.
table = Table('snapshots', meta, autoload=True)
ref_table = Table('snapshots', meta, autoload=True)
params = {'columns': [table.c['cgsnapshot_id']],
'refcolumns': [ref_table.c['id']],
'name': 'snapshots_ibfk_1'}
fkey = ForeignKeyConstraint(**params)
fkey.drop()
snapshots = Table('snapshots', meta, autoload=True)
cgsnapshot_id = snapshots.columns.cgsnapshot_id
snapshots.drop_column(cgsnapshot_id)
# Drop column from volumes table
if migrate_engine.name == 'mysql':
# MySQL cannot drop column consistencygroup_id until the foreign
# key constraint is removed. So remove the foreign key first,
# and then drop the column.
table = Table('volumes', meta, autoload=True)
ref_table = Table('volumes', meta, autoload=True)
params = {'columns': [table.c['consistencygroup_id']],
'refcolumns': [ref_table.c['id']],
'name': 'volumes_ibfk_1'}
fkey = ForeignKeyConstraint(**params)
fkey.drop()
volumes = Table('volumes', meta, autoload=True)
consistencygroup_id = volumes.columns.consistencygroup_id
volumes.drop_column(consistencygroup_id)
# Drop table
cgsnapshots = Table('cgsnapshots', meta, autoload=True)
cgsnapshots.drop()
# Drop table
consistencygroups = Table('consistencygroups', meta, autoload=True)
consistencygroups.drop()

View File

@ -50,13 +50,3 @@ def upgrade(migrate_engine):
'resource': 'consistencygroups',
'hard_limit': CONF.quota_consistencygroups,
'deleted': False, })
def downgrade(migrate_engine):
"""Don't delete the 'default' entries at downgrade time.
We don't know if the user had default entries when we started.
If they did, we wouldn't want to remove them. So, the safest
thing to do is just leave the 'default' entries at downgrade time.
"""
pass

View File

@ -17,7 +17,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -17,7 +17,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -17,7 +17,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -17,7 +17,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -17,7 +17,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -39,15 +39,3 @@ def upgrade(migrate_engine):
)
volume_type_projects.create()
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
volume_types = Table('volume_types', meta, autoload=True)
is_public = volume_types.columns.is_public
volume_types.drop_column(is_public)
volume_type_projects = Table('volume_type_projects', meta, autoload=True)
volume_type_projects.drop()

View File

@ -1,29 +0,0 @@
-- As sqlite does not support the DROP CHECK, we need to create
-- the table, and move all the data to it.
CREATE TABLE volume_types_v31 (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
id VARCHAR(36) NOT NULL,
name VARCHAR(255),
qos_specs_id VARCHAR(36),
PRIMARY KEY (id),
CHECK (deleted IN (0, 1)),
FOREIGN KEY(qos_specs_id) REFERENCES quality_of_service_specs (id)
);
INSERT INTO volume_types_v31
SELECT created_at,
updated_at,
deleted_at,
deleted,
id,
name,
qos_specs_id
FROM volume_types;
DROP TABLE volume_types;
ALTER TABLE volume_types_v31 RENAME TO volume_types;
DROP TABLE volume_type_projects;

View File

@ -62,27 +62,6 @@ def upgrade(migrate_engine):
pkey.create()
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
encryptions = Table('encryption', meta, autoload=True)
encryption_id_pk = PrimaryKeyConstraint(encryptions.columns.encryption_id)
encryption_id_pk.drop()
encryptions.drop_column(encryptions.columns.encryption_id)
volume_type_pk = PrimaryKeyConstraint(encryptions.columns.volume_type_id)
volume_type_pk.create()
ref_table = Table('volume_types', meta, autoload=True)
params = {'columns': [encryptions.c['volume_type_id']],
'refcolumns': [ref_table.c['id']],
'name': 'encryption_ibfk_1'}
volume_type_fk = ForeignKeyConstraint(**params)
volume_type_fk.create()
def _upgrade_sqlite(meta, encryptions):
new_encryptions = Table(
'encryption_33', meta,

View File

@ -1,28 +0,0 @@
CREATE TABLE encryption_v32 (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
cipher VARCHAR(255),
control_location VARCHAR(255),
key_size INTEGER,
provider VARCHAR(255),
volume_type_id VARCHAR(36),
PRIMARY KEY (volume_type_id),
FOREIGN KEY(volume_type_id) REFERENCES volume_types(id)
);
INSERT INTO encryption_v32
SELECT created_at,
updated_at,
deleted_at,
deleted,
cipher,
control_location,
key_size,
provider,
volume_type_id
FROM encryption;
DROP TABLE encryption;
ALTER TABLE encryption_v32 RENAME TO encryption;

View File

@ -1,25 +0,0 @@
CREATE TABLE volume_types_v33 (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
id VARCHAR(36) NOT NULL,
name VARCHAR(255),
is_public BOOLEAN,
qos_specs_id VARCHAR(36),
PRIMARY KEY (id)
);
INSERT INTO volume_types_v33
SELECT created_at,
updated_at,
deleted_at,
deleted,
id,
name,
is_public,
qos_specs_id
FROM volume_types;
DROP TABLE volume_types;
ALTER TABLE volume_types_v33 RENAME TO volume_types;

View File

@ -23,13 +23,3 @@ def upgrade(migrate_engine):
description = Column('description', String(255))
volume_types.create_column(description)
volume_types.update().values(description=None).execute()
def downgrade(migrate_engine):
"""Remove description column to volumes."""
meta = MetaData()
meta.bind = migrate_engine
volume_types = Table('volume_types', meta, autoload=True)
description = volume_types.columns.description
volume_types.drop_column(description)

View File

@ -23,13 +23,3 @@ def upgrade(migrate_engine):
provider_id = Column('provider_id', String(255))
volumes.create_column(provider_id)
volumes.update().values(provider_id=None).execute()
def downgrade(migrate_engine):
"""Remove provider_id column from volumes."""
meta = MetaData()
meta.bind = migrate_engine
volumes = Table('volumes', meta, autoload=True)
provider_id = volumes.columns.provider_id
volumes.drop_column(provider_id)

View File

@ -23,13 +23,3 @@ def upgrade(migrate_engine):
provider_id = Column('provider_id', String(255))
snapshots.create_column(provider_id)
snapshots.update().values(provider_id=None).execute()
def downgrade(migrate_engine):
"""Remove provider_id column from snapshots."""
meta = MetaData()
meta.bind = migrate_engine
snapshots = Table('snapshots', meta, autoload=True)
provider_id = snapshots.columns.provider_id
snapshots.drop_column(provider_id)

View File

@ -24,14 +24,3 @@ def upgrade(migrate_engine):
consistencygroups.create_column(cgsnapshot_id)
consistencygroups.update().values(cgsnapshot_id=None).execute()
def downgrade(migrate_engine):
"""Remove cgsnapshot_id column from consistencygroups."""
meta = MetaData()
meta.bind = migrate_engine
consistencygroups = Table('consistencygroups', meta, autoload=True)
cgsnapshot_id = consistencygroups.columns.cgsnapshot_id
consistencygroups.drop_column(cgsnapshot_id)

View File

@ -34,11 +34,3 @@ def upgrade(migrate_engine):
)
initiator_data.create()
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
table_name = 'driver_initiator_data'
initiator_data = Table(table_name, meta, autoload=True)
initiator_data.drop()

View File

@ -26,13 +26,3 @@ def upgrade(migrate_engine):
backups.create_column(parent_id)
backups.update().values(parent_id=None).execute()
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
backups = Table('backups', meta, autoload=True)
parent_id = backups.columns.parent_id
backups.drop_column(parent_id)

View File

@ -86,48 +86,3 @@ def upgrade(migrate_engine):
volumes.drop_column(attach_time)
attached_host = volumes.columns.attached_host
volumes.drop_column(attached_host)
def downgrade(migrate_engine):
"""Remove volume_attachment table."""
meta = MetaData()
meta.bind = migrate_engine
# Put the needed volumes table columns back
volumes = Table('volumes', meta, autoload=True)
multiattach = volumes.columns.multiattach
volumes.drop_column(multiattach)
attached_host = Column('attached_host', String(length=255))
volumes.create_column(attached_host)
volumes.update().values(attached_host=None).execute()
attach_time = Column('attach_time', String(length=255))
volumes.create_column(attach_time)
volumes.update().values(attach_time=None).execute()
instance_uuid = Column('instance_uuid', String(length=36))
volumes.create_column(instance_uuid)
volumes.update().values(instance_uuid=None).execute()
mountpoint = Column('mountpoint', String(length=255))
volumes.create_column(mountpoint)
volumes.update().values(mountpoint=None).execute()
volume_attachment = Table('volume_attachment', meta, autoload=True)
attachments = list(volume_attachment.select().execute())
for attachment in attachments:
# we are going to lose data here for
# multiple attaches. We'll migrate and the
# last update wins.
if not attachment.deleted_at:
volume_id = attachment.volume_id
volumes.update().\
where(volumes.c.id == volume_id).\
values(mountpoint=attachment.mountpoint,
attached_host=attachment.attached_host,
attach_time=attachment.attach_time,
instance_uuid=attachment.instance_uuid).\
execute()
volume_attachment.drop()

View File

@ -1,84 +0,0 @@
CREATE TABLE volumes_v39 (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
id VARCHAR(36) NOT NULL,
ec2_id INTEGER,
user_id VARCHAR(255),
project_id VARCHAR(255),
snapshot_id VARCHAR(36),
host VARCHAR(255),
size INTEGER,
availability_zone VARCHAR(255),
status VARCHAR(255),
attach_status VARCHAR(255),
scheduled_at DATETIME,
launched_at DATETIME,
terminated_at DATETIME,
display_name VARCHAR(255),
display_description VARCHAR(255),
provider_location VARCHAR(255),
provider_auth VARCHAR(255),
volume_type_id VARCHAR(36),
source_volid VARCHAR(36),
bootable INTEGER,
provider_geometry VARCHAR(255),
_name_id VARCHAR(36),
encryption_key_id VARCHAR(36),
migration_status VARCHAR(255),
attached_host VARCHAR(255),
attach_time VARCHAR(255),
instance_uuid VARCHAR(36),
mountpoint VARCHAR(255),
consistencygroup_id VARCHAR(36),
replication_status VARCHAR(255),
replication_extended_status VARCHAR(255),
replication_driver_data VARCHAR(255),
PRIMARY KEY (id)
);
INSERT INTO volumes_v39
SELECT volumes.created_at,
volumes.updated_at,
volumes.deleted_at,
volumes.deleted,
volumes.id,
volumes.ec2_id,
volumes.user_id,
volumes.project_id,
volumes.snapshot_id,
volumes.host,
volumes.size,
volumes.availability_zone,
volumes.status,
volumes.attach_status,
volumes.scheduled_at,
volumes.launched_at,
volumes.terminated_at,
volumes.display_name,
volumes.display_description,
volumes.provider_location,
volumes.provider_auth,
volumes.volume_type_id,
volumes.source_volid,
volumes.bootable,
volumes.provider_geometry,
volumes._name_id,
volumes.encryption_key_id,
volumes.migration_status,
volume_attachment.attached_host,
volume_attachment.attach_time,
volume_attachment.instance_uuid,
volume_attachment.mountpoint,
volumes.consistencygroup_id,
volumes.replication_status,
volumes.replication_extended_status,
volumes.replication_driver_data
FROM volumes
LEFT OUTER JOIN volume_attachment
ON volumes.id=volume_attachment.volume_id;
DROP TABLE volumes;
ALTER TABLE volumes_v39 RENAME TO volumes;
DROP TABLE volume_attachment;

View File

@ -19,10 +19,3 @@ def upgrade(migrate_engine):
services = Table('services', meta, autoload=True)
modified_at = Column('modified_at', DateTime(timezone=False))
services.create_column(modified_at)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
services = Table('services', meta, autoload=True)
services.drop_column('modified_at')

View File

@ -20,7 +20,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -20,7 +20,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -20,7 +20,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -20,7 +20,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -20,7 +20,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -42,14 +42,3 @@ def upgrade(migrate_engine):
'resource': 'per_volume_gigabytes',
'hard_limit': -1,
'deleted': False, })
def downgrade(migrate_engine):
"""Downgrade.
Don't delete the 'default' entries at downgrade time.
We don't know if the user had default entries when we started.
If they did, we wouldn't want to remove them. So, the safest
thing to do is just leave the 'default' entries at downgrade time.
"""
pass

View File

@ -23,12 +23,3 @@ def upgrade(migrate_engine):
# Add a new column allocated to save allocated quota
allocated = Column('allocated', Integer, default=0)
quotas.create_column(allocated)
def downgrade(migrate_engine):
"""Remove allocated column from quotas."""
meta = MetaData()
meta.bind = migrate_engine
quotas = Table('quotas', meta, autoload=True)
quotas.drop_column('allocated')

View File

@ -29,15 +29,3 @@ def upgrade(migrate_engine):
backups.create_column(temp_snapshot_id)
backups.update().values(temp_snapshot_id=None).execute()
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
backups = Table('backups', meta, autoload=True)
temp_volume_id = backups.columns.temp_volume_id
temp_snapshot_id = backups.columns.temp_snapshot_id
backups.drop_column(temp_volume_id)
backups.drop_column(temp_snapshot_id)

View File

@ -25,13 +25,3 @@ def upgrade(migrate_engine):
volumes.create_column(previous_status)
volumes.update().values(previous_status=None).execute()
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
volumes = Table('volumes', meta, autoload=True)
previous_status = volumes.columns.previous_status
volumes.drop_column(previous_status)

View File

@ -24,14 +24,3 @@ def upgrade(migrate_engine):
consistencygroups.create_column(source_cgid)
consistencygroups.update().values(source_cgid=None).execute()
def downgrade(migrate_engine):
"""Remove source_cgid column from consistencygroups."""
meta = MetaData()
meta.bind = migrate_engine
consistencygroups = Table('consistencygroups', meta, autoload=True)
source_cgid = consistencygroups.columns.source_cgid
consistencygroups.drop_column(source_cgid)

View File

@ -23,13 +23,3 @@ def upgrade(migrate_engine):
provider_auth = Column('provider_auth', String(255))
snapshots.create_column(provider_auth)
snapshots.update().values(provider_auth=None).execute()
def downgrade(migrate_engine):
"""Remove provider_auth column from snapshots."""
meta = MetaData()
meta.bind = migrate_engine
snapshots = Table('snapshots', meta, autoload=True)
provider_auth = snapshots.columns.provider_auth
snapshots.drop_column(provider_auth)

View File

@ -34,18 +34,3 @@ def upgrade(migrate_engine):
services.update().values(rpc_available_version=None).execute()
services.update().values(object_current_version=None).execute()
services.update().values(object_available_version=None).execute()
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
services = Table('services', meta, autoload=True)
rpc_current_version = services.columns.rpc_current_version
rpc_available_version = services.columns.rpc_available_version
object_current_version = services.columns.object_current_version
object_available_version = services.columns.object_available_version
services.drop_column(rpc_current_version)
services.drop_column(rpc_available_version)
services.drop_column(object_current_version)
services.drop_column(object_available_version)

View File

@ -31,14 +31,3 @@ def upgrade(migrate_engine):
if dep_bks_list:
backups.update().where(backups.columns.id == backup.id).values(
num_dependent_backups=len(dep_bks_list)).execute()
def downgrade(migrate_engine):
"""Remove num_dependent_backups column to backups."""
meta = MetaData()
meta.bind = migrate_engine
backups = Table('backups', meta, autoload=True)
num_dependent_backups = backups.columns.num_dependent_backups
backups.drop_column(num_dependent_backups)

View File

@ -35,12 +35,3 @@ def upgrade(migrate_engine):
)
image_volume_cache.create()
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
table_name = 'image_volume_cache_entries'
image_volume_cache = Table(table_name, meta, autoload=True)
image_volume_cache.drop()

View File

@ -17,7 +17,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -17,7 +17,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -17,7 +17,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -17,7 +17,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -17,7 +17,3 @@
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -35,6 +35,7 @@ from cinder.cmd import scheduler as cinder_scheduler
from cinder.cmd import volume as cinder_volume
from cinder.cmd import volume_usage_audit
from cinder import context
from cinder import exception
from cinder import test
from cinder.tests.unit import fake_volume
from cinder import version
@ -384,6 +385,13 @@ class TestCinderManageCmd(test.TestCase):
db_cmds.version()
self.assertEqual(1, db_version.call_count)
@mock.patch('oslo_db.sqlalchemy.migration.db_version')
def test_db_commands_downgrade_fails(self, db_version):
db_version.return_value = 2
db_cmds = cinder_manage.DbCommands()
with mock.patch('sys.stdout', new=six.StringIO()):
self.assertRaises(exception.InvalidInput, db_cmds.sync, 1)
@mock.patch('cinder.version.version_string')
def test_versions_commands_list(self, version_string):
version_cmds = cinder_manage.VersionCommands()

View File

@ -159,21 +159,12 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
self.assertIsInstance(snapshots.c.provider_location.type,
sqlalchemy.types.VARCHAR)
def _post_downgrade_006(self, engine):
snapshots = db_utils.get_table(engine, 'snapshots')
self.assertNotIn('provider_location', snapshots.c)
def _check_007(self, engine, data):
snapshots = db_utils.get_table(engine, 'snapshots')
fkey, = snapshots.c.volume_id.foreign_keys
self.assertIsNotNone(fkey)
def _post_downgrade_007(self, engine):
snapshots = db_utils.get_table(engine, 'snapshots')
self.assertEqual(0, len(snapshots.c.volume_id.foreign_keys))
def _pre_upgrade_008(self, engine):
self.assertFalse(engine.dialect.has_table(engine.connect(),
"backups"))
@ -249,10 +240,6 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
self.assertIsInstance(snapshot_metadata.c.value.type,
sqlalchemy.types.VARCHAR)
def _post_downgrade_008(self, engine):
self.assertFalse(engine.dialect.has_table(engine.connect(),
"snapshot_metadata"))
def _check_010(self, engine, data):
"""Test adding transfers table works correctly."""
self.assertTrue(engine.dialect.has_table(engine.connect(),
@ -280,10 +267,6 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
self.assertIsInstance(transfers.c.expires_at.type,
self.TIME_TYPE)
def _post_downgrade_010(self, engine):
self.assertFalse(engine.dialect.has_table(engine.connect(),
"transfers"))
def _check_011(self, engine, data):
"""Test adding transfers table works correctly."""
volumes = db_utils.get_table(engine, 'volumes')
@ -291,49 +274,29 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
self.assertIsInstance(volumes.c.bootable.type,
self.BOOL_TYPE)
def _post_downgrade_011(self, engine):
volumes = db_utils.get_table(engine, 'volumes')
self.assertNotIn('bootable', volumes.c)
def _check_012(self, engine, data):
"""Test that adding attached_host column works correctly."""
volumes = db_utils.get_table(engine, 'volumes')
self.assertIsInstance(volumes.c.attached_host.type,
sqlalchemy.types.VARCHAR)
def _post_downgrade_012(self, engine):
volumes = db_utils.get_table(engine, 'volumes')
self.assertNotIn('attached_host', volumes.c)
def _check_013(self, engine, data):
"""Test that adding provider_geometry column works correctly."""
volumes = db_utils.get_table(engine, 'volumes')
self.assertIsInstance(volumes.c.provider_geometry.type,
sqlalchemy.types.VARCHAR)
def _post_downgrade_013(self, engine):
volumes = db_utils.get_table(engine, 'volumes')
self.assertNotIn('provider_geometry', volumes.c)
def _check_014(self, engine, data):
"""Test that adding _name_id column works correctly."""
volumes = db_utils.get_table(engine, 'volumes')
self.assertIsInstance(volumes.c._name_id.type,
sqlalchemy.types.VARCHAR)
def _post_downgrade_014(self, engine):
volumes = db_utils.get_table(engine, 'volumes')
self.assertNotIn('_name_id', volumes.c)
def _check_015(self, engine, data):
"""Test removing migrations table works correctly."""
self.assertFalse(engine.dialect.has_table(engine.connect(),
"migrations"))
def _post_downgrade_015(self, engine):
self.assertTrue(engine.dialect.has_table(engine.connect(),
"migrations"))
def _check_016(self, engine, data):
"""Test that dropping xen storage manager tables works correctly."""
self.assertFalse(engine.dialect.has_table(engine.connect(),
@ -343,14 +306,6 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
self.assertFalse(engine.dialect.has_table(engine.connect(),
'sm_volume'))
def _post_downgrade_016(self, engine):
self.assertTrue(engine.dialect.has_table(engine.connect(),
'sm_flavors'))
self.assertTrue(engine.dialect.has_table(engine.connect(),
'sm_backend_config'))
self.assertTrue(engine.dialect.has_table(engine.connect(),
'sm_volume'))
def _check_017(self, engine, data):
"""Test that added encryption information works correctly."""
# encryption key UUID
@ -378,16 +333,6 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
self.assertIsInstance(encryption.c.provider.type,
sqlalchemy.types.VARCHAR)
def _post_downgrade_017(self, engine):
volumes = db_utils.get_table(engine, 'volumes')
self.assertNotIn('encryption_key_id', volumes.c)
snapshots = db_utils.get_table(engine, 'snapshots')
self.assertNotIn('encryption_key_id', snapshots.c)
self.assertFalse(engine.dialect.has_table(engine.connect(),
'encryption'))
def _check_018(self, engine, data):
"""Test that added qos_specs table works correctly."""
self.assertTrue(engine.dialect.has_table(
@ -410,20 +355,12 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
self.assertIsInstance(qos_specs.c.value.type,
sqlalchemy.types.VARCHAR)
def _post_downgrade_018(self, engine):
self.assertFalse(engine.dialect.has_table(
engine.connect(), "quality_of_service_specs"))
def _check_019(self, engine, data):
"""Test that adding migration_status column works correctly."""
volumes = db_utils.get_table(engine, 'volumes')
self.assertIsInstance(volumes.c.migration_status.type,
sqlalchemy.types.VARCHAR)
def _post_downgrade_019(self, engine):
volumes = db_utils.get_table(engine, 'volumes')
self.assertNotIn('migration_status', volumes.c)
def _check_020(self, engine, data):
"""Test adding volume_admin_metadata table works correctly."""
self.assertTrue(engine.dialect.has_table(engine.connect(),
@ -448,10 +385,6 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
self.assertIsInstance(volume_admin_metadata.c.value.type,
sqlalchemy.types.VARCHAR)
def _post_downgrade_020(self, engine):
self.assertFalse(engine.dialect.has_table(engine.connect(),
"volume_admin_metadata"))
def _verify_quota_defaults(self, engine):
quota_class_metadata = db_utils.get_table(engine, 'quota_classes')
@ -465,20 +398,12 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
"""Test adding default data for quota classes works correctly."""
self._verify_quota_defaults(engine)
def _post_downgrade_021(self, engine):
# Defaults should not be deleted during downgrade
self._verify_quota_defaults(engine)
def _check_022(self, engine, data):
"""Test that adding disabled_reason column works correctly."""
services = db_utils.get_table(engine, 'services')
self.assertIsInstance(services.c.disabled_reason.type,
sqlalchemy.types.VARCHAR)
def _post_downgrade_022(self, engine):
services = db_utils.get_table(engine, 'services')
self.assertNotIn('disabled_reason', services.c)
def _check_023(self, engine, data):
"""Test that adding reservations index works correctly."""
reservations = db_utils.get_table(engine, 'reservations')
@ -491,11 +416,6 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
self.assertEqual(sorted(['deleted', 'expire']),
sorted(index_columns))
def _post_downgrade_023(self, engine):
reservations = db_utils.get_table(engine, 'reservations')
index_names = [idx.name for idx in reservations.indexes]
self.assertNotIn('reservations_deleted_expire_idx', index_names)
def _check_024(self, engine, data):
"""Test adding replication columns to volume table."""
volumes = db_utils.get_table(engine, 'volumes')
@ -506,12 +426,6 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
self.assertIsInstance(volumes.c.replication_driver_data.type,
sqlalchemy.types.VARCHAR)
def _post_downgrade_024(self, engine):
volumes = db_utils.get_table(engine, 'volumes')
self.assertNotIn('replication_status', volumes.c)
self.assertNotIn('replication_extended_status', volumes.c)
self.assertNotIn('replication_driver_data', volumes.c)
def _check_025(self, engine, data):
"""Test adding table and columns for consistencygroups."""
# Test consistencygroup_id is in Table volumes
@ -600,31 +514,6 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
# 2 foreign keys in Table snapshots
self.assertEqual(2, len(snapshots.foreign_keys))
def _post_downgrade_025(self, engine):
metadata = sqlalchemy.MetaData()
# Test consistencygroup_id is not in Table volumes
volumes = self.get_table_ref(engine, 'volumes', metadata)
self.assertNotIn('consistencygroup_id', volumes.c)
# Test cgsnapshot_id is not in Table snapshots
snapshots = self.get_table_ref(engine, 'snapshots', metadata)
self.assertNotIn('cgsnapshot_id', snapshots.c)
# Verify foreign keys are removed
self.assertEqual(0, len(volumes.foreign_keys))
self.assertEqual(1, len(snapshots.foreign_keys))
# volume_id foreign key is still in Table snapshots
fkey, = snapshots.c.volume_id.foreign_keys
self.assertEqual(volumes.c.id, fkey.column)
# Test Table cgsnapshots doesn't exist any more
self.assertFalse(engine.dialect.has_table(engine.connect(),
"cgsnapshots"))
# Test Table consistencygroups doesn't exist any more
self.assertFalse(engine.dialect.has_table(engine.connect(),
"consistencygroups"))
def _pre_upgrade_026(self, engine):
"""Test adding default data for consistencygroups quota class."""
quota_class_metadata = db_utils.get_table(engine, 'quota_classes')
@ -643,15 +532,6 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
self.assertEqual(4, num_defaults)
def _post_downgrade_026(self, engine):
# Defaults should not be deleted during downgrade
quota_class_metadata = db_utils.get_table(engine, 'quota_classes')
num_defaults = quota_class_metadata.count().\
where(quota_class_metadata.c.class_name == 'default').\
execute().scalar()
self.assertEqual(4, num_defaults)
def _check_032(self, engine, data):
"""Test adding volume_type_projects table works correctly."""
volume_type_projects = db_utils.get_table(engine,
@ -675,62 +555,33 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
self.assertIsInstance(volume_types.c.is_public.type,
self.BOOL_TYPE)
def _post_downgrade_032(self, engine):
self.assertFalse(engine.dialect.has_table(engine.connect(),
"volume_type_projects"))
volume_types = db_utils.get_table(engine, 'volume_types')
self.assertNotIn('is_public', volume_types.c)
def _check_033(self, engine, data):
"""Test adding encryption_id column to encryption table."""
encryptions = db_utils.get_table(engine, 'encryption')
self.assertIsInstance(encryptions.c.encryption_id.type,
sqlalchemy.types.VARCHAR)
def _post_downgrade_033(self, engine):
metadata = sqlalchemy.schema.MetaData()
metadata.bind = engine
encryptions = db_utils.get_table(engine, 'encryption')
self.assertNotIn('encryption_id', encryptions.c)
def _check_034(self, engine, data):
"""Test adding description columns to volume_types table."""
volume_types = db_utils.get_table(engine, 'volume_types')
self.assertIsInstance(volume_types.c.description.type,
sqlalchemy.types.VARCHAR)
def _post_downgrade_034(self, engine):
volume_types = db_utils.get_table(engine, 'volume_types')
self.assertNotIn('description', volume_types.c)
def _check_035(self, engine, data):
volumes = db_utils.get_table(engine, 'volumes')
self.assertIsInstance(volumes.c.provider_id.type,
sqlalchemy.types.VARCHAR)
def _post_downgrade_035(self, engine):
volumes = db_utils.get_table(engine, 'volumes')
self.assertNotIn('provider_id', volumes.c)
def _check_036(self, engine, data):
snapshots = db_utils.get_table(engine, 'snapshots')
self.assertIsInstance(snapshots.c.provider_id.type,
sqlalchemy.types.VARCHAR)
def _post_downgrade_036(self, engine):
snapshots = db_utils.get_table(engine, 'snapshots')
self.assertNotIn('provider_id', snapshots.c)
def _check_037(self, engine, data):
consistencygroups = db_utils.get_table(engine, 'consistencygroups')
self.assertIsInstance(consistencygroups.c.cgsnapshot_id.type,
sqlalchemy.types.VARCHAR)
def _post_downgrade_037(self, engine):
consistencygroups = db_utils.get_table(engine, 'consistencygroups')
self.assertNotIn('cgsnapshot_id', consistencygroups.c)
def _check_038(self, engine, data):
"""Test adding and removing driver_initiator_data table."""
@ -758,20 +609,11 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
self.assertIsInstance(private_data.c.value.type,
sqlalchemy.types.VARCHAR)
def _post_downgrade_038(self, engine):
has_table = engine.dialect.has_table(engine.connect(),
"driver_initiator_data")
self.assertFalse(has_table)
def _check_039(self, engine, data):
backups = db_utils.get_table(engine, 'backups')
self.assertIsInstance(backups.c.parent_id.type,
sqlalchemy.types.VARCHAR)
def _post_downgrade_039(self, engine):
backups = db_utils.get_table(engine, 'backups')
self.assertNotIn('parent_id', backups.c)
def _check_40(self, engine, data):
volumes = db_utils.get_table(engine, 'volumes')
self.assertNotIn('instance_uuid', volumes.c)
@ -793,39 +635,17 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
self.assertIsInstance(attachments.c.attach_status.type,
sqlalchemy.types.VARCHAR)
def _post_downgrade_040(self, engine):
self.assertFalse(engine.dialect.has_table(engine.connect(),
"volume_attachment"))
volumes = db_utils.get_table(engine, 'volumes')
self.assertNotIn('multiattach', volumes.c)
self.assertIsInstance(volumes.c.instance_uuid.type,
sqlalchemy.types.VARCHAR)
self.assertIsInstance(volumes.c.attached_host.type,
sqlalchemy.types.VARCHAR)
self.assertIsInstance(volumes.c.attach_time.type,
sqlalchemy.types.VARCHAR)
self.assertIsInstance(volumes.c.mountpoint.type,
sqlalchemy.types.VARCHAR)
def _check_041(self, engine, data):
"""Test that adding modified_at column works correctly."""
services = db_utils.get_table(engine, 'services')
self.assertIsInstance(services.c.modified_at.type,
self.TIME_TYPE)
def _post_downgrade_041(self, engine):
services = db_utils.get_table(engine, 'services')
self.assertNotIn('modified_at', services.c)
def _check_048(self, engine, data):
quotas = db_utils.get_table(engine, 'quotas')
self.assertIsInstance(quotas.c.allocated.type,
sqlalchemy.types.INTEGER)
def _post_downgrade_048(self, engine):
quotas = db_utils.get_table(engine, 'quotas')
self.assertNotIn('allocated', quotas.c)
def _check_049(self, engine, data):
backups = db_utils.get_table(engine, 'backups')
self.assertIsInstance(backups.c.temp_volume_id.type,
@ -833,38 +653,21 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
self.assertIsInstance(backups.c.temp_snapshot_id.type,
sqlalchemy.types.VARCHAR)
def _post_downgrade_049(self, engine):
backups = db_utils.get_table(engine, 'backups')
self.assertNotIn('temp_volume_id', backups.c)
self.assertNotIn('temp_snapshot_id', backups.c)
def _check_050(self, engine, data):
volumes = db_utils.get_table(engine, 'volumes')
self.assertIsInstance(volumes.c.previous_status.type,
sqlalchemy.types.VARCHAR)
def _post_downgrade_050(self, engine):
volumes = db_utils.get_table(engine, 'volumes')
self.assertNotIn('previous_status', volumes.c)
def _check_051(self, engine, data):
consistencygroups = db_utils.get_table(engine, 'consistencygroups')
self.assertIsInstance(consistencygroups.c.source_cgid.type,
sqlalchemy.types.VARCHAR)
def _post_downgrade_051(self, engine):
consistencygroups = db_utils.get_table(engine, 'consistencygroups')
self.assertNotIn('source_cgid', consistencygroups.c)
def _check_052(self, engine, data):
snapshots = db_utils.get_table(engine, 'snapshots')
self.assertIsInstance(snapshots.c.provider_auth.type,
sqlalchemy.types.VARCHAR)
def _post_downgrade_052(self, engine):
snapshots = db_utils.get_table(engine, 'snapshots')
self.assertNotIn('provider_auth', snapshots.c)
def _check_053(self, engine, data):
services = db_utils.get_table(engine, 'services')
self.assertIsInstance(services.c.rpc_current_version.type,
@ -876,22 +679,11 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
self.assertIsInstance(services.c.object_available_version.type,
sqlalchemy.types.VARCHAR)
def _post_downgrade_053(self, engine):
services = db_utils.get_table(engine, 'services')
self.assertNotIn('rpc_current_version', services.c)
self.assertNotIn('rpc_available_version', services.c)
self.assertNotIn('object_current_version', services.c)
self.assertNotIn('object_available_version', services.c)
def _check_054(self, engine, data):
backups = db_utils.get_table(engine, 'backups')
self.assertIsInstance(backups.c.num_dependent_backups.type,
sqlalchemy.types.INTEGER)
def _post_downgrade_054(self, engine):
backups = db_utils.get_table(engine, 'backups')
self.assertNotIn('num_dependent_backups', backups.c)
def _check_055(self, engine, data):
"""Test adding image_volume_cache_entries table."""
has_table = engine.dialect.has_table(engine.connect(),
@ -918,14 +710,8 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
self.assertIsInstance(private_data.c.last_used.type,
self.TIME_TYPE)
def _post_downgrade_055(self, engine):
"""Test removing image_volume_cache_entries table."""
has_table = engine.dialect.has_table(engine.connect(),
"image_volume_cache_entries")
self.assertFalse(has_table)
def test_walk_versions(self):
self.walk_versions(True, False)
self.walk_versions(False, False)
class TestSqliteMigrations(test_base.DbTestCase,

View File

@ -38,10 +38,10 @@ class ExceptionTestCase(test.TestCase):
class ProjectTestCase(test.TestCase):
def test_all_migrations_have_downgrade(self):
topdir = os.path.normpath(os.path.dirname(__file__) + '/../../')
topdir = os.path.normpath(os.path.dirname(__file__) + '/../../../')
py_glob = os.path.join(topdir, "cinder", "db", "sqlalchemy",
"migrate_repo", "versions", "*.py")
missing_downgrade = []
downgrades = []
for path in glob.iglob(py_glob):
has_upgrade = False
has_downgrade = False
@ -52,10 +52,11 @@ class ProjectTestCase(test.TestCase):
if 'def downgrade(' in line:
has_downgrade = True
if has_upgrade and not has_downgrade:
if has_upgrade and has_downgrade:
fname = os.path.basename(path)
missing_downgrade.append(fname)
downgrades.append(fname)
helpful_msg = (_("The following migrations are missing a downgrade:"
"\n\t%s") % '\n\t'.join(sorted(missing_downgrade)))
self.assertFalse(missing_downgrade, msg=helpful_msg)
helpful_msg = (_("The following migrations have a downgrade, "
"which are not allowed: "
"\n\t%s") % '\n\t'.join(sorted(downgrades)))
self.assertFalse(downgrades, msg=helpful_msg)