From e9dccb93be059176fec19b9a021672cbcc2f8414 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Fri, 17 Feb 2023 12:34:56 +0000 Subject: [PATCH] db: Remove legacy migrations sqlalchemy-migrate does not (and will not) support sqlalchemy 2.0. We need to drop these migrations to ensure we can upgrade our sqlalchemy version. Change-Id: I39448af0eb8f4c557d591057760c27b1d40d3593 Signed-off-by: Stephen Finucane --- cinder/db/legacy_migrations/README | 7 - cinder/db/legacy_migrations/__init__.py | 0 cinder/db/legacy_migrations/manage.py | 24 - cinder/db/legacy_migrations/migrate.cfg | 20 - .../versions/135_cinder_init.py | 1035 ----------------- .../136_make_vol_type_col_non_nullable.py | 52 - .../versions/137_placeholder.py | 22 - .../versions/138_placeholder.py | 22 - .../versions/139_placeholder.py | 22 - .../140_create_project_default_volume_type.py | 50 - .../141_add_quota_usage_unique_constraint.py | 38 - .../versions/142_placeholder.py | 22 - .../versions/143_placeholder.py | 22 - .../versions/144_placeholder.py | 22 - .../versions/145_add_use_quota_fields.py | 34 - .../db/legacy_migrations/versions/__init__.py | 0 cinder/db/migration.py | 73 +- cinder/tests/unit/db/test_migration.py | 139 +-- cinder/tests/unit/db/test_migrations.py | 256 +--- doc/source/conf.py | 2 - .../contributor/database-migrations.rst | 14 +- ...e-sqlalchemy-migrate-c62b541fd5f4ab10.yaml | 5 + requirements.txt | 1 - 23 files changed, 22 insertions(+), 1860 deletions(-) delete mode 100644 cinder/db/legacy_migrations/README delete mode 100644 cinder/db/legacy_migrations/__init__.py delete mode 100644 cinder/db/legacy_migrations/manage.py delete mode 100644 cinder/db/legacy_migrations/migrate.cfg delete mode 100644 cinder/db/legacy_migrations/versions/135_cinder_init.py delete mode 100644 cinder/db/legacy_migrations/versions/136_make_vol_type_col_non_nullable.py delete mode 100644 cinder/db/legacy_migrations/versions/137_placeholder.py delete mode 100644 cinder/db/legacy_migrations/versions/138_placeholder.py delete mode 100644 cinder/db/legacy_migrations/versions/139_placeholder.py delete mode 100644 cinder/db/legacy_migrations/versions/140_create_project_default_volume_type.py delete mode 100644 cinder/db/legacy_migrations/versions/141_add_quota_usage_unique_constraint.py delete mode 100644 cinder/db/legacy_migrations/versions/142_placeholder.py delete mode 100644 cinder/db/legacy_migrations/versions/143_placeholder.py delete mode 100644 cinder/db/legacy_migrations/versions/144_placeholder.py delete mode 100644 cinder/db/legacy_migrations/versions/145_add_use_quota_fields.py delete mode 100644 cinder/db/legacy_migrations/versions/__init__.py create mode 100644 releasenotes/notes/remove-sqlalchemy-migrate-c62b541fd5f4ab10.yaml diff --git a/cinder/db/legacy_migrations/README b/cinder/db/legacy_migrations/README deleted file mode 100644 index 2f81df17aea..00000000000 --- a/cinder/db/legacy_migrations/README +++ /dev/null @@ -1,7 +0,0 @@ -This is a database migration repository. - -More information at: - https://github.com/openstack/sqlalchemy-migrate - -Original project is no longer maintained at: - http://code.google.com/p/sqlalchemy-migrate/ diff --git a/cinder/db/legacy_migrations/__init__.py b/cinder/db/legacy_migrations/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/cinder/db/legacy_migrations/manage.py b/cinder/db/legacy_migrations/manage.py deleted file mode 100644 index 9d9baedaa25..00000000000 --- a/cinder/db/legacy_migrations/manage.py +++ /dev/null @@ -1,24 +0,0 @@ -#!/usr/bin/env python3 -# Copyright 2013 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from migrate.versioning.shell import main - -REPOSITORY = os.path.abspath(os.path.dirname(__file__)) - - -if __name__ == '__main__': - main(debug='False', repository=REPOSITORY) diff --git a/cinder/db/legacy_migrations/migrate.cfg b/cinder/db/legacy_migrations/migrate.cfg deleted file mode 100644 index 10c685c0e50..00000000000 --- a/cinder/db/legacy_migrations/migrate.cfg +++ /dev/null @@ -1,20 +0,0 @@ -[db_settings] -# Used to identify which repository this database is versioned under. -# You can use the name of your project. -repository_id=cinder - -# The name of the database table used to track the schema version. -# This name shouldn't already be used by your project. -# If this is changed once a database is under version control, you'll need to -# change the table name in each database too. -version_table=migrate_version - -# When committing a change script, Migrate will attempt to generate the -# sql for all supported databases; normally, if one of them fails - probably -# because you don't have that database installed - it is ignored and the -# commit continues, perhaps ending successfully. -# Databases in this list MUST compile successfully during a commit, or the -# entire commit will fail. List the databases your application will actually -# be using to ensure your updates to that database work properly. -# This must be a list; example: ['postgres','sqlite'] -required_dbs=[] diff --git a/cinder/db/legacy_migrations/versions/135_cinder_init.py b/cinder/db/legacy_migrations/versions/135_cinder_init.py deleted file mode 100644 index 4cb1451a09d..00000000000 --- a/cinder/db/legacy_migrations/versions/135_cinder_init.py +++ /dev/null @@ -1,1035 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import uuid - -from oslo_config import cfg -from oslo_utils import timeutils -import sqlalchemy as sa -from sqlalchemy.dialects import mysql -from sqlalchemy.sql import expression - -from cinder.volume import group_types as volume_group_types -from cinder.volume import volume_types - -# Get default values via config. The defaults will either -# come from the default values set in the quota option -# configuration or via cinder.conf if the user has configured -# default values for quotas there. -CONF = cfg.CONF -CONF.import_opt('quota_volumes', 'cinder.quota') -CONF.import_opt('quota_snapshots', 'cinder.quota') -CONF.import_opt('quota_gigabytes', 'cinder.quota') -CONF.import_opt('quota_consistencygroups', 'cinder.quota') - -CLASS_NAME = 'default' -CREATED_AT = datetime.datetime.now() # noqa - - -def define_tables(meta): - services = sa.Table( - 'services', meta, - sa.Column('created_at', sa.DateTime), - sa.Column('updated_at', sa.DateTime), - sa.Column('deleted_at', sa.DateTime), - sa.Column('deleted', sa.Boolean), - sa.Column('id', sa.Integer, primary_key=True, nullable=False), - sa.Column('host', sa.String(255)), - sa.Column('binary', sa.String(255)), - sa.Column('topic', sa.String(255)), - sa.Column('report_count', sa.Integer, nullable=False), - sa.Column('disabled', sa.Boolean), - sa.Column('availability_zone', sa.String(255)), - sa.Column('disabled_reason', sa.String(255)), - sa.Column('modified_at', sa.DateTime(timezone=False)), - sa.Column('rpc_current_version', sa.String(36)), - sa.Column('object_current_version', sa.String(36)), - sa.Column('replication_status', sa.String(36), default='not-capable'), - sa.Column('frozen', sa.Boolean, default=False), - sa.Column('active_backend_id', sa.String(255)), - sa.Column('cluster_name', sa.String(255), nullable=True), - sa.Column('uuid', sa.String(36), nullable=True), - sa.Index('services_uuid_idx', 'uuid', unique=True), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - consistencygroups = sa.Table( - 'consistencygroups', meta, - sa.Column('created_at', sa.DateTime(timezone=False)), - sa.Column('updated_at', sa.DateTime(timezone=False)), - sa.Column('deleted_at', sa.DateTime(timezone=False)), - sa.Column('deleted', sa.Boolean(create_constraint=True, name=None)), - sa.Column('id', sa.String(36), primary_key=True, nullable=False), - sa.Column('user_id', sa.String(255)), - sa.Column('project_id', sa.String(255)), - sa.Column('host', sa.String(255)), - sa.Column('availability_zone', sa.String(255)), - sa.Column('name', sa.String(255)), - sa.Column('description', sa.String(255)), - sa.Column('volume_type_id', sa.String(255)), - sa.Column('status', sa.String(255)), - sa.Column('cgsnapshot_id', sa.String(36)), - sa.Column('source_cgid', sa.String(36)), - sa.Column('cluster_name', sa.String(255), nullable=True), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - cgsnapshots = sa.Table( - 'cgsnapshots', meta, - sa.Column('created_at', sa.DateTime(timezone=False)), - sa.Column('updated_at', sa.DateTime(timezone=False)), - sa.Column('deleted_at', sa.DateTime(timezone=False)), - sa.Column('deleted', sa.Boolean(create_constraint=True, name=None)), - sa.Column('id', sa.String(36), primary_key=True, nullable=False), - sa.Column( - 'consistencygroup_id', - sa.String(36), - sa.ForeignKey('consistencygroups.id'), - nullable=False, - index=True), - sa.Column('user_id', sa.String(255)), - sa.Column('project_id', sa.String(255)), - sa.Column('name', sa.String(255)), - sa.Column('description', sa.String(255)), - sa.Column('status', sa.String(255)), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - groups = sa.Table( - 'groups', meta, - sa.Column('created_at', sa.DateTime(timezone=False)), - sa.Column('updated_at', sa.DateTime(timezone=False)), - sa.Column('deleted_at', sa.DateTime(timezone=False)), - sa.Column('deleted', sa.Boolean), - sa.Column('id', sa.String(36), primary_key=True, nullable=False), - sa.Column('user_id', sa.String(length=255)), - sa.Column('project_id', sa.String(length=255)), - sa.Column('cluster_name', sa.String(255)), - sa.Column('host', sa.String(length=255)), - sa.Column('availability_zone', sa.String(length=255)), - sa.Column('name', sa.String(length=255)), - sa.Column('description', sa.String(length=255)), - sa.Column('group_type_id', sa.String(length=36)), - sa.Column('status', sa.String(length=255)), - sa.Column('group_snapshot_id', sa.String(36)), - sa.Column('source_group_id', sa.String(36)), - sa.Column('replication_status', sa.String(255)), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - - group_snapshots = sa.Table( - 'group_snapshots', meta, - sa.Column('created_at', sa.DateTime(timezone=False)), - sa.Column('updated_at', sa.DateTime(timezone=False)), - sa.Column('deleted_at', sa.DateTime(timezone=False)), - sa.Column('deleted', sa.Boolean(create_constraint=True, name=None)), - sa.Column('id', sa.String(36), primary_key=True), - sa.Column( - 'group_id', - sa.String(36), - sa.ForeignKey('groups.id'), - nullable=False, - index=True), - sa.Column('user_id', sa.String(length=255)), - sa.Column('project_id', sa.String(length=255)), - sa.Column('name', sa.String(length=255)), - sa.Column('description', sa.String(length=255)), - sa.Column('status', sa.String(length=255)), - sa.Column('group_type_id', sa.String(length=36)), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - - volumes = sa.Table( - 'volumes', meta, - sa.Column('created_at', sa.DateTime), - sa.Column('updated_at', sa.DateTime), - sa.Column('deleted_at', sa.DateTime), - sa.Column('deleted', sa.Boolean), - sa.Column('id', sa.String(36), primary_key=True, nullable=False), - sa.Column('ec2_id', sa.String(255)), - sa.Column('user_id', sa.String(255)), - sa.Column('project_id', sa.String(255)), - sa.Column('host', sa.String(255)), - sa.Column('size', sa.Integer), - sa.Column('availability_zone', sa.String(255)), - sa.Column('status', sa.String(255)), - sa.Column('attach_status', sa.String(255)), - sa.Column('scheduled_at', sa.DateTime), - sa.Column('launched_at', sa.DateTime), - sa.Column('terminated_at', sa.DateTime), - sa.Column('display_name', sa.String(255)), - sa.Column('display_description', sa.String(255)), - sa.Column('provider_location', sa.String(256)), - sa.Column('provider_auth', sa.String(256)), - sa.Column('snapshot_id', sa.String(36)), - sa.Column('volume_type_id', sa.String(36)), - sa.Column('source_volid', sa.String(36)), - sa.Column('bootable', sa.Boolean), - sa.Column('provider_geometry', sa.String(255)), - sa.Column('_name_id', sa.String(36)), - sa.Column('encryption_key_id', sa.String(36)), - sa.Column('migration_status', sa.String(255)), - sa.Column('replication_status', sa.String(255)), - sa.Column('replication_extended_status', sa.String(255)), - sa.Column('replication_driver_data', sa.String(255)), - sa.Column( - 'consistencygroup_id', - sa.String(36), - sa.ForeignKey('consistencygroups.id'), - index=True), - sa.Column('provider_id', sa.String(255)), - sa.Column('multiattach', sa.Boolean), - sa.Column('previous_status', sa.String(255)), - sa.Column('cluster_name', sa.String(255), nullable=True), - sa.Column( - 'group_id', - sa.String(36), - sa.ForeignKey('groups.id'), - index=True), - sa.Column( - 'service_uuid', - sa.String(36), - sa.ForeignKey('services.uuid'), - nullable=True), - sa.Column('shared_targets', sa.Boolean, default=True), - sa.Index('volumes_service_uuid_idx', 'service_uuid', 'deleted'), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - volume_attachment = sa.Table( - 'volume_attachment', meta, - sa.Column('created_at', sa.DateTime), - sa.Column('updated_at', sa.DateTime), - sa.Column('deleted_at', sa.DateTime), - sa.Column('deleted', sa.Boolean), - sa.Column('id', sa.String(36), primary_key=True, nullable=False), - sa.Column( - 'volume_id', - sa.String(36), - sa.ForeignKey('volumes.id'), - nullable=False, - index=True), - sa.Column('attached_host', sa.String(255)), - sa.Column('instance_uuid', sa.String(36)), - sa.Column('mountpoint', sa.String(255)), - sa.Column('attach_time', sa.DateTime), - sa.Column('detach_time', sa.DateTime), - sa.Column('attach_mode', sa.String(36)), - sa.Column('attach_status', sa.String(255)), - sa.Column('connection_info', sa.Text), - sa.Column('connector', sa.Text), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - attachment_specs = sa.Table( - 'attachment_specs', meta, - sa.Column('created_at', sa.DateTime(timezone=False)), - sa.Column('updated_at', sa.DateTime(timezone=False)), - sa.Column('deleted_at', sa.DateTime(timezone=False)), - sa.Column('deleted', sa.Boolean(), default=False), - sa.Column('id', sa.Integer, primary_key=True, nullable=False), - sa.Column( - 'attachment_id', - sa.String(36), - sa.ForeignKey('volume_attachment.id'), - nullable=False, - index=True), - sa.Column('key', sa.String(255)), - sa.Column('value', sa.String(255)), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - snapshots = sa.Table( - 'snapshots', meta, - sa.Column('created_at', sa.DateTime), - sa.Column('updated_at', sa.DateTime), - sa.Column('deleted_at', sa.DateTime), - sa.Column('deleted', sa.Boolean), - sa.Column('id', sa.String(36), primary_key=True, nullable=False), - sa.Column( - 'volume_id', - sa.String(36), - sa.ForeignKey('volumes.id', name='snapshots_volume_id_fkey'), - nullable=False, - index=True), - sa.Column('user_id', sa.String(255)), - sa.Column('project_id', sa.String(255)), - sa.Column('status', sa.String(255)), - sa.Column('progress', sa.String(255)), - sa.Column('volume_size', sa.Integer), - sa.Column('scheduled_at', sa.DateTime), - sa.Column('display_name', sa.String(255)), - sa.Column('display_description', sa.String(255)), - sa.Column('provider_location', sa.String(255)), - sa.Column('encryption_key_id', sa.String(36)), - sa.Column('volume_type_id', sa.String(36)), - sa.Column( - 'cgsnapshot_id', - sa.String(36), - sa.ForeignKey('cgsnapshots.id'), - index=True), - sa.Column('provider_id', sa.String(255)), - sa.Column('provider_auth', sa.String(255)), - sa.Column( - 'group_snapshot_id', - sa.String(36), - sa.ForeignKey('group_snapshots.id'), - index=True), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - snapshot_metadata = sa.Table( - 'snapshot_metadata', meta, - sa.Column('created_at', sa.DateTime), - sa.Column('updated_at', sa.DateTime), - sa.Column('deleted_at', sa.DateTime), - sa.Column('deleted', sa.Boolean), - sa.Column('id', sa.Integer, primary_key=True, nullable=False), - sa.Column( - 'snapshot_id', - sa.String(36), - sa.ForeignKey('snapshots.id'), - nullable=False, - index=True), - sa.Column('key', sa.String(255)), - sa.Column('value', sa.String(255)), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - quality_of_service_specs = sa.Table( - 'quality_of_service_specs', meta, - sa.Column('created_at', sa.DateTime(timezone=False)), - sa.Column('updated_at', sa.DateTime(timezone=False)), - sa.Column('deleted_at', sa.DateTime(timezone=False)), - sa.Column('deleted', sa.Boolean(create_constraint=True, name=None)), - sa.Column('id', sa.String(36), primary_key=True, nullable=False), - sa.Column( - 'specs_id', - sa.String(36), - sa.ForeignKey('quality_of_service_specs.id'), - index=True), - sa.Column('key', sa.String(255)), - sa.Column('value', sa.String(255)), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - volume_types = sa.Table( - 'volume_types', meta, - sa.Column('created_at', sa.DateTime), - sa.Column('updated_at', sa.DateTime), - sa.Column('deleted_at', sa.DateTime), - sa.Column('deleted', sa.Boolean), - sa.Column('id', sa.String(36), primary_key=True, nullable=False), - sa.Column('name', sa.String(255)), - sa.Column( - 'qos_specs_id', - sa.String(36), - sa.ForeignKey('quality_of_service_specs.id'), - index=True), - sa.Column('is_public', sa.Boolean), - sa.Column('description', sa.String(255)), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - volume_type_projects = sa.Table( - 'volume_type_projects', meta, - sa.Column('id', sa.Integer, primary_key=True, nullable=False), - sa.Column('created_at', sa.DateTime), - sa.Column('updated_at', sa.DateTime), - sa.Column('deleted_at', sa.DateTime), - sa.Column( - 'volume_type_id', - sa.String(36), - sa.ForeignKey('volume_types.id')), - sa.Column('project_id', sa.String(255)), - sa.Column('deleted', sa.Integer), - sa.UniqueConstraint('volume_type_id', 'project_id', 'deleted'), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - volume_metadata = sa.Table( - 'volume_metadata', meta, - sa.Column('created_at', sa.DateTime), - sa.Column('updated_at', sa.DateTime), - sa.Column('deleted_at', sa.DateTime), - sa.Column('deleted', sa.Boolean), - sa.Column('id', sa.Integer, primary_key=True, nullable=False), - sa.Column( - 'volume_id', - sa.String(36), - sa.ForeignKey('volumes.id'), - nullable=False, - index=True), - sa.Column('key', sa.String(255)), - sa.Column('value', sa.String(255)), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - volume_type_extra_specs = sa.Table( - 'volume_type_extra_specs', meta, - sa.Column('created_at', sa.DateTime), - sa.Column('updated_at', sa.DateTime), - sa.Column('deleted_at', sa.DateTime), - sa.Column('deleted', sa.Boolean), - sa.Column('id', sa.Integer, primary_key=True, nullable=False), - sa.Column( - 'volume_type_id', - sa.String(36), - sa.ForeignKey( - 'volume_types.id', - name='volume_type_extra_specs_ibfk_1'), - nullable=False, - index=True), - sa.Column('key', sa.String(255)), - sa.Column('value', sa.String(255)), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - quotas = sa.Table( - 'quotas', meta, - sa.Column('id', sa.Integer, primary_key=True, nullable=False), - sa.Column('created_at', sa.DateTime), - sa.Column('updated_at', sa.DateTime), - sa.Column('deleted_at', sa.DateTime), - sa.Column('deleted', sa.Boolean), - sa.Column('project_id', sa.String(255)), - sa.Column('resource', sa.String(255), nullable=False), - sa.Column('hard_limit', sa.Integer), - sa.Column('allocated', sa.Integer, default=0), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - quota_classes = sa.Table( - 'quota_classes', meta, - sa.Column('created_at', sa.DateTime(timezone=False)), - sa.Column('updated_at', sa.DateTime(timezone=False)), - sa.Column('deleted_at', sa.DateTime(timezone=False)), - sa.Column('deleted', sa.Boolean(create_constraint=True, name=None)), - sa.Column('id', sa.Integer(), primary_key=True), - sa.Column('class_name', sa.String(255), index=True), - sa.Column('resource', sa.String(255)), - sa.Column('hard_limit', sa.Integer(), nullable=True), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - - quota_usages = sa.Table( - 'quota_usages', meta, - sa.Column('created_at', sa.DateTime(timezone=False)), - sa.Column('updated_at', sa.DateTime(timezone=False)), - sa.Column('deleted_at', sa.DateTime(timezone=False)), - sa.Column('deleted', sa.Boolean(create_constraint=True, name=None)), - sa.Column('id', sa.Integer(), primary_key=True), - sa.Column('project_id', sa.String(255), index=True), - sa.Column('resource', sa.String(255)), - sa.Column('in_use', sa.Integer(), nullable=False), - sa.Column('reserved', sa.Integer(), nullable=False), - sa.Column('until_refresh', sa.Integer(), nullable=True), - sa.Index('quota_usage_project_resource_idx', 'project_id', 'resource'), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - - reservations = sa.Table( - 'reservations', meta, - sa.Column('created_at', sa.DateTime(timezone=False)), - sa.Column('updated_at', sa.DateTime(timezone=False)), - sa.Column('deleted_at', sa.DateTime(timezone=False)), - sa.Column('deleted', sa.Boolean(create_constraint=True, name=None)), - sa.Column('id', sa.Integer(), primary_key=True), - sa.Column('uuid', sa.String(36), nullable=False), - sa.Column( - 'usage_id', - sa.Integer(), - sa.ForeignKey('quota_usages.id'), - nullable=True, - index=True), - sa.Column('project_id', sa.String(255), index=True), - sa.Column('resource', sa.String(255)), - sa.Column('delta', sa.Integer(), nullable=False), - sa.Column('expire', sa.DateTime(timezone=False)), - sa.Column( - 'allocated_id', - sa.Integer, - sa.ForeignKey('quotas.id'), - nullable=True, - index=True), - sa.Index('reservations_deleted_expire_idx', 'deleted', 'expire'), - sa.Index('reservations_deleted_uuid_idx', 'deleted', 'uuid'), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - - volume_glance_metadata = sa.Table( - 'volume_glance_metadata', - meta, - sa.Column('created_at', sa.DateTime(timezone=False)), - sa.Column('updated_at', sa.DateTime(timezone=False)), - sa.Column('deleted_at', sa.DateTime(timezone=False)), - sa.Column('deleted', sa.Boolean(create_constraint=True, name=None)), - sa.Column('id', sa.Integer(), primary_key=True, nullable=False), - sa.Column( - 'volume_id', - sa.String(36), - sa.ForeignKey('volumes.id'), - index=True), - sa.Column( - 'snapshot_id', - sa.String(36), - sa.ForeignKey('snapshots.id'), - index=True), - sa.Column('key', sa.String(255)), - sa.Column('value', sa.Text), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - backups = sa.Table( - 'backups', meta, - sa.Column('created_at', sa.DateTime(timezone=False)), - sa.Column('updated_at', sa.DateTime(timezone=False)), - sa.Column('deleted_at', sa.DateTime(timezone=False)), - sa.Column('deleted', sa.Boolean(create_constraint=True, name=None)), - sa.Column('id', sa.String(36), primary_key=True, nullable=False), - sa.Column('volume_id', sa.String(36), nullable=False), - sa.Column('user_id', sa.String(255)), - sa.Column('project_id', sa.String(255)), - sa.Column('host', sa.String(255)), - sa.Column('availability_zone', sa.String(255)), - sa.Column('display_name', sa.String(255)), - sa.Column('display_description', sa.String(255)), - sa.Column('container', sa.String(255)), - sa.Column('status', sa.String(255)), - sa.Column('fail_reason', sa.String(255)), - sa.Column('service_metadata', sa.String(255)), - sa.Column('service', sa.String(255)), - sa.Column('size', sa.Integer()), - sa.Column('object_count', sa.Integer()), - sa.Column('parent_id', sa.String(36)), - sa.Column('temp_volume_id', sa.String(36)), - sa.Column('temp_snapshot_id', sa.String(36)), - sa.Column('num_dependent_backups', sa.Integer, default=0), - sa.Column('snapshot_id', sa.String(36)), - sa.Column('data_timestamp', sa.DateTime), - sa.Column('restore_volume_id', sa.String(36)), - sa.Column('encryption_key_id', sa.String(36)), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - backup_metadata = sa.Table( - 'backup_metadata', meta, - sa.Column('created_at', sa.DateTime(timezone=False)), - sa.Column('updated_at', sa.DateTime(timezone=False)), - sa.Column('deleted_at', sa.DateTime(timezone=False)), - sa.Column('deleted', sa.Boolean(), default=False), - sa.Column('id', sa.Integer, primary_key=True, nullable=False), - sa.Column( - 'backup_id', - sa.String(36), - sa.ForeignKey('backups.id'), - nullable=False, - index=True), - sa.Column('key', sa.String(255)), - sa.Column('value', sa.String(255)), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - transfers = sa.Table( - 'transfers', meta, - sa.Column('created_at', sa.DateTime(timezone=False)), - sa.Column('updated_at', sa.DateTime(timezone=False)), - sa.Column('deleted_at', sa.DateTime(timezone=False)), - sa.Column('deleted', sa.Boolean), - sa.Column('id', sa.String(36), primary_key=True, nullable=False), - sa.Column( - 'volume_id', - sa.String(36), - sa.ForeignKey('volumes.id'), - nullable=False, - index=True), - sa.Column('display_name', sa.String(255)), - sa.Column('salt', sa.String(255)), - sa.Column('crypt_hash', sa.String(255)), - sa.Column('expires_at', sa.DateTime(timezone=False)), - sa.Column('no_snapshots', sa.Boolean, default=False), - sa.Column('source_project_id', sa.String(255), nullable=True), - sa.Column('destination_project_id', sa.String(255), nullable=True), - sa.Column('accepted', sa.Boolean, default=False), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - # Sqlite needs to handle nullable differently - is_nullable = (meta.bind.name == 'sqlite') - - encryption = sa.Table( - 'encryption', meta, - sa.Column('created_at', sa.DateTime(timezone=False)), - sa.Column('updated_at', sa.DateTime(timezone=False)), - sa.Column('deleted_at', sa.DateTime(timezone=False)), - sa.Column('deleted', sa.Boolean(create_constraint=True, name=None)), - sa.Column('cipher', sa.String(255)), - sa.Column('control_location', sa.String(255), nullable=is_nullable), - sa.Column('key_size', sa.Integer), - sa.Column('provider', sa.String(255), nullable=is_nullable), - # NOTE(joel-coffman): The volume_type_id must be unique or else the - # referenced volume type becomes ambiguous. That is, specifying the - # volume type is not sufficient to identify a particular encryption - # scheme unless each volume type is associated with at most one - # encryption scheme. - sa.Column('volume_type_id', sa.String(36), nullable=is_nullable), - # NOTE (smcginnis): nullable=True triggers this to not set a default - # value, but since it's a primary key the resulting schema will end up - # still being NOT NULL. This is avoiding a case in MySQL where it will - # otherwise set this to NOT NULL DEFAULT ''. May be harmless, but - # inconsistent with previous schema. - sa.Column( - 'encryption_id', - sa.String(36), - primary_key=True, - nullable=True), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - volume_admin_metadata = sa.Table( - 'volume_admin_metadata', meta, - sa.Column('created_at', sa.DateTime), - sa.Column('updated_at', sa.DateTime), - sa.Column('deleted_at', sa.DateTime), - sa.Column('deleted', sa.Boolean), - sa.Column('id', sa.Integer, primary_key=True, nullable=False), - sa.Column( - 'volume_id', - sa.String(36), - sa.ForeignKey('volumes.id'), - nullable=False, - index=True), - sa.Column('key', sa.String(255)), - sa.Column('value', sa.String(255)), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - initiator_data = sa.Table( - 'driver_initiator_data', meta, - sa.Column('created_at', sa.DateTime(timezone=False)), - sa.Column('updated_at', sa.DateTime(timezone=False)), - sa.Column('id', sa.Integer, primary_key=True, nullable=False), - sa.Column('initiator', sa.String(255), index=True, nullable=False), - sa.Column('namespace', sa.String(255), nullable=False), - sa.Column('key', sa.String(255), nullable=False), - sa.Column('value', sa.String(255)), - sa.UniqueConstraint('initiator', 'namespace', 'key'), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - image_volume_cache = sa.Table( - 'image_volume_cache_entries', meta, - sa.Column('image_updated_at', sa.DateTime(timezone=False)), - sa.Column('id', sa.Integer, primary_key=True, nullable=False), - sa.Column('host', sa.String(255), index=True, nullable=False), - sa.Column('image_id', sa.String(36), index=True, nullable=False), - sa.Column('volume_id', sa.String(36), nullable=False), - sa.Column('size', sa.Integer, nullable=False), - sa.Column('last_used', sa.DateTime, nullable=False), - sa.Column('cluster_name', sa.String(255)), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - messages = sa.Table( - 'messages', meta, - sa.Column('id', sa.String(36), primary_key=True, nullable=False), - sa.Column('project_id', sa.String(255), nullable=False), - sa.Column('request_id', sa.String(255)), - sa.Column('resource_type', sa.String(36)), - sa.Column('resource_uuid', sa.String(255), nullable=True), - sa.Column('event_id', sa.String(255), nullable=False), - sa.Column('message_level', sa.String(255), nullable=False), - sa.Column('created_at', sa.DateTime(timezone=False)), - sa.Column('updated_at', sa.DateTime(timezone=False)), - sa.Column('deleted_at', sa.DateTime(timezone=False)), - sa.Column('deleted', sa.Boolean), - sa.Column('expires_at', sa.DateTime(timezone=False), index=True), - sa.Column('detail_id', sa.String(10), nullable=True), - sa.Column('action_id', sa.String(10), nullable=True), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - cluster = sa.Table( - 'clusters', meta, - sa.Column('created_at', sa.DateTime(timezone=False)), - sa.Column('updated_at', sa.DateTime(timezone=False)), - sa.Column('deleted_at', sa.DateTime(timezone=False)), - sa.Column('deleted', sa.Boolean(), default=False), - sa.Column('id', sa.Integer, primary_key=True, nullable=False), - sa.Column('name', sa.String(255), nullable=False), - sa.Column('binary', sa.String(255), nullable=False), - sa.Column('disabled', sa.Boolean(), default=False), - sa.Column('disabled_reason', sa.String(255)), - sa.Column('race_preventer', sa.Integer, nullable=False, default=0), - sa.Column( - 'replication_status', - sa.String(length=36), - default='not-capable'), - sa.Column('active_backend_id', sa.String(length=255)), - sa.Column( - 'frozen', - sa.Boolean, - nullable=False, - default=False, - server_default=expression.false()), - # To remove potential races on creation we have a constraint set on - # name and race_preventer fields, and we set value on creation to 0, so - # 2 clusters with the same name will fail this constraint. On deletion - # we change this field to the same value as the id which will be unique - # and will not conflict with the creation of another cluster with the - # same name. - sa.UniqueConstraint('name', 'binary', 'race_preventer'), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - - workers = sa.Table( - 'workers', meta, - sa.Column('created_at', sa.DateTime(timezone=False)), - sa.Column('updated_at', sa.DateTime(timezone=False)), - sa.Column('deleted_at', sa.DateTime(timezone=False)), - sa.Column('deleted', sa.Boolean(), default=False), - sa.Column('id', sa.Integer, primary_key=True), - sa.Column('resource_type', sa.String(40), nullable=False), - sa.Column('resource_id', sa.String(36), nullable=False), - sa.Column('status', sa.String(255), nullable=False), - sa.Column( - 'service_id', - sa.Integer, - sa.ForeignKey('services.id'), - nullable=True, - index=True), - sa.Column( - 'race_preventer', - sa.Integer, - nullable=False, - default=0, - server_default=sa.text('0')), - sa.UniqueConstraint('resource_type', 'resource_id'), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - - group_types = sa.Table( - 'group_types', meta, - sa.Column('id', sa.String(36), primary_key=True, nullable=False), - sa.Column('name', sa.String(255), nullable=False), - sa.Column('description', sa.String(255)), - sa.Column('created_at', sa.DateTime(timezone=False)), - sa.Column('updated_at', sa.DateTime(timezone=False)), - sa.Column('deleted_at', sa.DateTime(timezone=False)), - sa.Column('deleted', sa.Boolean), - sa.Column('is_public', sa.Boolean), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - - group_type_specs = sa.Table( - 'group_type_specs', meta, - sa.Column('id', sa.Integer, primary_key=True, nullable=False), - sa.Column('key', sa.String(255)), - sa.Column('value', sa.String(255)), - sa.Column( - 'group_type_id', - sa.String(36), - sa.ForeignKey('group_types.id'), - nullable=False, - index=True), - sa.Column('created_at', sa.DateTime(timezone=False)), - sa.Column('updated_at', sa.DateTime(timezone=False)), - sa.Column('deleted_at', sa.DateTime(timezone=False)), - sa.Column('deleted', sa.Boolean), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - - group_type_projects = sa.Table( - 'group_type_projects', meta, - sa.Column('id', sa.Integer, primary_key=True, nullable=False), - sa.Column('created_at', sa.DateTime), - sa.Column('updated_at', sa.DateTime), - sa.Column('deleted_at', sa.DateTime), - sa.Column( - 'group_type_id', - sa.String(36), - sa.ForeignKey('group_types.id')), - sa.Column('project_id', sa.String(length=255)), - sa.Column('deleted', sa.Boolean(create_constraint=True, name=None)), - sa.UniqueConstraint('group_type_id', 'project_id', 'deleted'), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - - grp_vt_mapping = sa.Table( - 'group_volume_type_mapping', meta, - sa.Column('created_at', sa.DateTime), - sa.Column('updated_at', sa.DateTime), - sa.Column('deleted_at', sa.DateTime), - sa.Column('deleted', sa.Boolean), - sa.Column('id', sa.Integer, primary_key=True, nullable=False), - sa.Column( - 'volume_type_id', - sa.String(36), - sa.ForeignKey('volume_types.id'), - nullable=False, - index=True), - sa.Column( - 'group_id', - sa.String(36), - sa.ForeignKey('groups.id'), - nullable=False, - index=True), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - - return [consistencygroups, - cgsnapshots, - groups, - group_snapshots, - services, - volumes, - volume_attachment, - attachment_specs, - snapshots, - snapshot_metadata, - quality_of_service_specs, - volume_types, - volume_type_projects, - quotas, - volume_metadata, - volume_type_extra_specs, - quota_classes, - quota_usages, - reservations, - volume_glance_metadata, - backups, - backup_metadata, - transfers, - encryption, - volume_admin_metadata, - initiator_data, - image_volume_cache, - messages, - cluster, - workers, - group_types, - group_type_specs, - group_type_projects, - grp_vt_mapping] - - -def upgrade(migrate_engine): - meta = sa.MetaData() - meta.bind = migrate_engine - - # create all tables - # Take care on create order for those with FK dependencies - tables = define_tables(meta) - - for table in tables: - table.create() - - if migrate_engine.name == "mysql": - tables = ["consistencygroups", - "cgsnapshots", - "snapshots", - "snapshot_metadata", - "quality_of_service_specs", - "volume_types", - "volume_type_projects", - "volumes", - "volume_attachment", - "migrate_version", - "quotas", - "services", - "volume_metadata", - "volume_type_extra_specs", - "quota_classes", - "quota_usages", - "reservations", - "volume_glance_metadata", - "backups", - "backup_metadata", - "transfers", - "encryption", - "volume_admin_metadata", - "driver_initiator_data", - "image_volume_cache_entries"] - - migrate_engine.execute("SET foreign_key_checks = 0") - for table in tables: - migrate_engine.execute( - "ALTER TABLE %s CONVERT TO CHARACTER SET utf8" % table) - migrate_engine.execute("SET foreign_key_checks = 1") - migrate_engine.execute( - "ALTER DATABASE %s DEFAULT CHARACTER SET utf8" % - migrate_engine.url.database) - migrate_engine.execute("ALTER TABLE %s Engine=InnoDB" % table) - - workers = sa.Table('workers', meta, autoload=True) - - # This is only necessary for mysql, and since the table is not in use this - # will only be a schema update. - if migrate_engine.name.startswith('mysql'): - try: - workers.c.updated_at.alter(mysql.DATETIME(fsp=6)) - except Exception: - # MySQL v5.5 or earlier don't support sub-second resolution so we - # may have cleanup races in Active-Active configurations, that's - # why upgrading is recommended in that case. - # Code in Cinder is capable of working with 5.5, so for 5.5 there's - # no problem - pass - - quota_usages = sa.Table('quota_usages', meta, autoload=True) - try: - quota_usages.c.resource.alter(type=sa.String(300)) - except Exception: - # On MariaDB, max length varies depending on the version and the InnoDB - # page size [1], so it is possible to have error 1071 ('Specified key - # was too long; max key length is 767 bytes"). Since this migration is - # to resolve a corner case, deployments with those DB versions won't be - # covered. - # [1]: https://mariadb.com/kb/en/library/innodb-limitations/#page-sizes - if not migrate_engine.name.startswith('mysql'): - raise - - # Set default quota class values - quota_classes = sa.Table('quota_classes', meta, autoload=True) - qci = quota_classes.insert() - qci.execute({'created_at': CREATED_AT, - 'class_name': CLASS_NAME, - 'resource': 'volumes', - 'hard_limit': CONF.quota_volumes, - 'deleted': False, }) - # Set default snapshots - qci.execute({'created_at': CREATED_AT, - 'class_name': CLASS_NAME, - 'resource': 'snapshots', - 'hard_limit': CONF.quota_snapshots, - 'deleted': False, }) - # Set default gigabytes - qci.execute({'created_at': CREATED_AT, - 'class_name': CLASS_NAME, - 'resource': 'gigabytes', - 'hard_limit': CONF.quota_gigabytes, - 'deleted': False, }) - qci.execute({'created_at': CREATED_AT, - 'class_name': CLASS_NAME, - 'resource': 'consistencygroups', - 'hard_limit': CONF.quota_consistencygroups, - 'deleted': False, }) - qci.execute({'created_at': CREATED_AT, - 'class_name': CLASS_NAME, - 'resource': 'per_volume_gigabytes', - 'hard_limit': -1, - 'deleted': False, }) - qci.execute({'created_at': CREATED_AT, - 'class_name': CLASS_NAME, - 'resource': 'groups', - 'hard_limit': CONF.quota_groups, - 'deleted': False, }) - - # TODO(geguileo): Once we remove support for MySQL 5.5 we have to create - # an upgrade migration to remove this row. - # Set workers table sub-second support sentinel - workers = sa.Table('workers', meta, autoload=True) - wi = workers.insert() - now = timeutils.utcnow().replace(microsecond=123) - wi.execute({'created_at': now, - 'updated_at': now, - 'deleted': False, - 'resource_type': 'SENTINEL', - 'resource_id': 'SUB-SECOND', - 'status': 'OK'}) - - # Create default group type - group_types = sa.Table('group_types', meta, autoload=True) - group_type_specs = sa.Table('group_type_specs', meta, autoload=True) - - now = timeutils.utcnow() - grp_type_id = "%s" % uuid.uuid4() - group_type_dicts = { - 'id': grp_type_id, - 'name': volume_group_types.DEFAULT_CGSNAPSHOT_TYPE, - 'description': 'Default group type for migrating cgsnapshot', - 'created_at': now, - 'updated_at': now, - 'deleted': False, - 'is_public': True, - } - grp_type = group_types.insert() - grp_type.execute(group_type_dicts) - - group_spec_dicts = { - 'key': 'consistent_group_snapshot_enabled', - 'value': ' True', - 'group_type_id': grp_type_id, - 'created_at': now, - 'updated_at': now, - 'deleted': False, - } - grp_spec = group_type_specs.insert() - grp_spec.execute(group_spec_dicts) - - # Increase the resource column size to the quota_usages table. - # - # The resource value is constructed from (prefix + volume_type_name), - # but the length of volume_type_name is limited to 255, if we add a - # prefix such as 'volumes_' or 'gigabytes_' to volume_type_name it - # will exceed the db length limit. - - # Create default volume type - vol_types = sa.Table("volume_types", meta, autoload=True) - volume_type_dict = { - 'id': str(uuid.uuid4()), - 'name': volume_types.DEFAULT_VOLUME_TYPE, - 'description': 'Default Volume Type', - 'created_at': now, - 'updated_at': now, - 'deleted': False, - 'is_public': True, - } - vol_type = vol_types.insert() - vol_type.execute(volume_type_dict) diff --git a/cinder/db/legacy_migrations/versions/136_make_vol_type_col_non_nullable.py b/cinder/db/legacy_migrations/versions/136_make_vol_type_col_non_nullable.py deleted file mode 100644 index 67cbb4c4aa1..00000000000 --- a/cinder/db/legacy_migrations/versions/136_make_vol_type_col_non_nullable.py +++ /dev/null @@ -1,52 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sqlalchemy as sa - -from cinder import exception -from cinder.i18n import _ - - -def upgrade(migrate_engine): - """Make volume_type columns non-nullable""" - - meta = sa.MetaData(bind=migrate_engine) - - # Update volume_type columns in tables to not allow null value - - volumes = sa.Table('volumes', meta, autoload=True) - - try: - volumes.c.volume_type_id.alter(nullable=False) - except Exception: - msg = (_('Migration cannot continue until all volumes have ' - 'been migrated to the `__DEFAULT__` volume type. Please ' - 'run `cinder-manage db online_data_migrations`. ' - 'There are still untyped volumes unmigrated.')) - raise exception.ValidationError(msg) - - snapshots = sa.Table('snapshots', meta, autoload=True) - - try: - snapshots.c.volume_type_id.alter(nullable=False) - except Exception: - msg = (_('Migration cannot continue until all snapshots have ' - 'been migrated to the `__DEFAULT__` volume type. Please ' - 'run `cinder-manage db online_data_migrations`.' - 'There are still %(count)i untyped snapshots unmigrated.')) - raise exception.ValidationError(msg) - - encryption = sa.Table('encryption', meta, autoload=True) - # since volume_type is a mandatory arg when creating encryption - # volume_type_id column won't contain any null values so we can directly - # alter it - encryption.c.volume_type_id.alter(nullable=False) diff --git a/cinder/db/legacy_migrations/versions/137_placeholder.py b/cinder/db/legacy_migrations/versions/137_placeholder.py deleted file mode 100644 index fbc885b5755..00000000000 --- a/cinder/db/legacy_migrations/versions/137_placeholder.py +++ /dev/null @@ -1,22 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# This is a placeholder for Ussuri backports. -# Do not use this number for new Victoria work. New work starts after all the -# placeholders. -# -# See this for more information: -# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html - - -def upgrade(migrate_engine): - pass diff --git a/cinder/db/legacy_migrations/versions/138_placeholder.py b/cinder/db/legacy_migrations/versions/138_placeholder.py deleted file mode 100644 index fbc885b5755..00000000000 --- a/cinder/db/legacy_migrations/versions/138_placeholder.py +++ /dev/null @@ -1,22 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# This is a placeholder for Ussuri backports. -# Do not use this number for new Victoria work. New work starts after all the -# placeholders. -# -# See this for more information: -# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html - - -def upgrade(migrate_engine): - pass diff --git a/cinder/db/legacy_migrations/versions/139_placeholder.py b/cinder/db/legacy_migrations/versions/139_placeholder.py deleted file mode 100644 index fbc885b5755..00000000000 --- a/cinder/db/legacy_migrations/versions/139_placeholder.py +++ /dev/null @@ -1,22 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# This is a placeholder for Ussuri backports. -# Do not use this number for new Victoria work. New work starts after all the -# placeholders. -# -# See this for more information: -# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html - - -def upgrade(migrate_engine): - pass diff --git a/cinder/db/legacy_migrations/versions/140_create_project_default_volume_type.py b/cinder/db/legacy_migrations/versions/140_create_project_default_volume_type.py deleted file mode 100644 index 1d17bfecedf..00000000000 --- a/cinder/db/legacy_migrations/versions/140_create_project_default_volume_type.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright 2020 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sqlalchemy as sa - - -def upgrade(migrate_engine): - meta = sa.MetaData() - meta.bind = migrate_engine - - # This is required to establish foreign key dependency between - # volume_type_id and volume_types.id columns. See L#34-35 - sa.Table('volume_types', meta, autoload=True) - - default_volume_types = sa.Table( - 'default_volume_types', meta, - sa.Column('created_at', sa.DateTime), - sa.Column('updated_at', sa.DateTime), - sa.Column('deleted_at', sa.DateTime), - sa.Column( - 'volume_type_id', - sa.String(36), - sa.ForeignKey('volume_types.id'), - index=True), - sa.Column( - 'project_id', - sa.String(length=255), - primary_key=True, - nullable=False), - sa.Column('deleted', sa.Boolean(create_constraint=True, name=None)), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - try: - default_volume_types.create() - except Exception: - raise diff --git a/cinder/db/legacy_migrations/versions/141_add_quota_usage_unique_constraint.py b/cinder/db/legacy_migrations/versions/141_add_quota_usage_unique_constraint.py deleted file mode 100644 index e48ec2ac203..00000000000 --- a/cinder/db/legacy_migrations/versions/141_add_quota_usage_unique_constraint.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2021 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from migrate.changeset import constraint -import sqlalchemy as sa - - -def upgrade(migrate_engine): - """Update quota_usages table to prevent races on creation. - - Add race_preventer field and a unique constraint to prevent quota usage - duplicates and races that mess the quota system when first creating rows. - """ - # There's no need to set the race_preventer field for existing DB entries, - # since the race we want to prevent is only on creation. - meta = sa.MetaData(bind=migrate_engine) - quota_usages = sa.Table('quota_usages', meta, autoload=True) - - if not hasattr(quota_usages.c, 'race_preventer'): - quota_usages.create_column( - sa.Column('race_preventer', sa.Boolean, nullable=True)) - - unique = constraint.UniqueConstraint( - 'project_id', 'resource', 'race_preventer', - table=quota_usages) - unique.create(engine=migrate_engine) diff --git a/cinder/db/legacy_migrations/versions/142_placeholder.py b/cinder/db/legacy_migrations/versions/142_placeholder.py deleted file mode 100644 index 4034bb5e76f..00000000000 --- a/cinder/db/legacy_migrations/versions/142_placeholder.py +++ /dev/null @@ -1,22 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# This is a placeholder for Wallaby backports. -# Do not use this number for new Xena work. New work starts after all the -# placeholders. -# -# See this for more information: -# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html - - -def upgrade(migrate_engine): - pass diff --git a/cinder/db/legacy_migrations/versions/143_placeholder.py b/cinder/db/legacy_migrations/versions/143_placeholder.py deleted file mode 100644 index 4034bb5e76f..00000000000 --- a/cinder/db/legacy_migrations/versions/143_placeholder.py +++ /dev/null @@ -1,22 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# This is a placeholder for Wallaby backports. -# Do not use this number for new Xena work. New work starts after all the -# placeholders. -# -# See this for more information: -# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html - - -def upgrade(migrate_engine): - pass diff --git a/cinder/db/legacy_migrations/versions/144_placeholder.py b/cinder/db/legacy_migrations/versions/144_placeholder.py deleted file mode 100644 index 4034bb5e76f..00000000000 --- a/cinder/db/legacy_migrations/versions/144_placeholder.py +++ /dev/null @@ -1,22 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# This is a placeholder for Wallaby backports. -# Do not use this number for new Xena work. New work starts after all the -# placeholders. -# -# See this for more information: -# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html - - -def upgrade(migrate_engine): - pass diff --git a/cinder/db/legacy_migrations/versions/145_add_use_quota_fields.py b/cinder/db/legacy_migrations/versions/145_add_use_quota_fields.py deleted file mode 100644 index 48a4b5bd0e9..00000000000 --- a/cinder/db/legacy_migrations/versions/145_add_use_quota_fields.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright 2021 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sqlalchemy as sa - - -def upgrade(migrate_engine): - """Update volumes and snapshots tables with use_quota field. - - Add use_quota field to both volumes and snapshots table to fast and easily - identify resources that must be counted for quota usages. - """ - # Existing resources will be left with None value to allow rolling upgrades - # with the online data migration pattern, since they will identify the - # resources that don't have the field set/known yet. - meta = sa.MetaData(bind=migrate_engine) - for table_name in ('volumes', 'snapshots'): - table = sa.Table(table_name, meta, autoload=True) - - if not hasattr(table.c, 'use_quota'): - column = sa.Column('use_quota', sa.Boolean, nullable=True) - table.create_column(column) diff --git a/cinder/db/legacy_migrations/versions/__init__.py b/cinder/db/legacy_migrations/versions/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/cinder/db/migration.py b/cinder/db/migration.py index 1532ba6760d..ed7bdb22cd6 100644 --- a/cinder/db/migration.py +++ b/cinder/db/migration.py @@ -21,9 +21,6 @@ import os from alembic import command as alembic_api from alembic import config as alembic_config from alembic import migration as alembic_migration -from migrate import exceptions as migrate_exceptions -from migrate.versioning import api as migrate_api -from migrate.versioning import repository as migrate_repo from oslo_config import cfg from oslo_db import options from oslo_log import log as logging @@ -34,20 +31,6 @@ options.set_defaults(cfg.CONF) LOG = logging.getLogger(__name__) -MIGRATE_INIT_VERSION = 134 -ALEMBIC_INIT_VERSION = '921e1a36b076' - - -def _find_migrate_repo(): - """Get the project's change script repository - - :returns: An instance of ``migrate.versioning.repository.Repository`` - """ - path = os.path.join( - os.path.abspath(os.path.dirname(__file__)), 'legacy_migrations', - ) - return migrate_repo.Repository(path) - def _find_alembic_conf(): """Get the project's alembic configuration @@ -66,35 +49,6 @@ def _find_alembic_conf(): return config -def _is_database_under_migrate_control(engine, repository): - try: - migrate_api.db_version(engine, repository) - return True - except migrate_exceptions.DatabaseNotControlledError: - return False - - -def _is_database_under_alembic_control(engine): - with engine.connect() as conn: - context = alembic_migration.MigrationContext.configure(conn) - return bool(context.get_current_revision()) - - -def _init_alembic_on_legacy_database(engine, repository, config): - """Init alembic in an existing environment with sqlalchemy-migrate.""" - LOG.info( - 'The database is still under sqlalchemy-migrate control; ' - 'applying any remaining sqlalchemy-migrate-based migrations ' - 'and fake applying the initial alembic migration' - ) - migrate_api.upgrade(engine, repository) - - # re-use the connection rather than creating a new one - with engine.begin() as connection: - config.attributes['connection'] = connection - alembic_api.stamp(config, ALEMBIC_INIT_VERSION) - - def _upgrade_alembic(engine, config, version): # re-use the connection rather than creating a new one with engine.begin() as connection: @@ -104,20 +58,11 @@ def _upgrade_alembic(engine, config, version): def db_version(): """Get database version.""" - repository = _find_migrate_repo() engine = db_api.get_engine() - migrate_version = None - if _is_database_under_migrate_control(engine, repository): - migrate_version = migrate_api.db_version(engine, repository) - - alembic_version = None - if _is_database_under_alembic_control(engine): - with engine.connect() as conn: - m_context = alembic_migration.MigrationContext.configure(conn) - alembic_version = m_context.get_current_revision() - - return alembic_version or migrate_version + with engine.connect() as conn: + m_context = alembic_migration.MigrationContext.configure(conn) + return m_context.get_current_revision() def db_sync(version=None, engine=None): @@ -140,7 +85,6 @@ def db_sync(version=None, engine=None): if engine is None: engine = db_api.get_engine() - repository = _find_migrate_repo() config = _find_alembic_conf() # discard the URL encoded in alembic.ini in favour of the URL configured @@ -155,17 +99,6 @@ def db_sync(version=None, engine=None): engine_url = str(engine.url).replace('%', '%%') config.set_main_option('sqlalchemy.url', str(engine_url)) - # if we're in a deployment where sqlalchemy-migrate is already present, - # then apply all the updates for that and fake apply the initial alembic - # migration; if we're not then 'upgrade' will take care of everything - # this should be a one-time operation - if ( - _is_database_under_migrate_control(engine, repository) and - not _is_database_under_alembic_control(engine) - ): - _init_alembic_on_legacy_database(engine, repository, config) - - # apply anything later LOG.info('Applying migration(s)') _upgrade_alembic(engine, config, version) LOG.info('Migration(s) applied') diff --git a/cinder/tests/unit/db/test_migration.py b/cinder/tests/unit/db/test_migration.py index 392d79bb7ac..317864f08f3 100644 --- a/cinder/tests/unit/db/test_migration.py +++ b/cinder/tests/unit/db/test_migration.py @@ -13,8 +13,6 @@ from unittest import mock from alembic.runtime import migration as alembic_migration -from migrate import exceptions as migrate_exceptions -from migrate.versioning import api as migrate_api from oslotest import base as test_base from cinder.db import migration @@ -28,161 +26,32 @@ class TestDBSync(test_base.BaseTestCase): self.assertRaises(ValueError, migration.db_sync, '402') @mock.patch.object(migration, '_upgrade_alembic') - @mock.patch.object(migration, '_init_alembic_on_legacy_database') - @mock.patch.object(migration, '_is_database_under_alembic_control') - @mock.patch.object(migration, '_is_database_under_migrate_control') @mock.patch.object(migration, '_find_alembic_conf') - @mock.patch.object(migration, '_find_migrate_repo') @mock.patch.object(db_api, 'get_engine') - def _test_db_sync( - self, has_migrate, has_alembic, mock_get_engine, mock_find_repo, - mock_find_conf, mock_is_migrate, mock_is_alembic, mock_init, - mock_upgrade, - ): - mock_is_migrate.return_value = has_migrate - mock_is_alembic.return_value = has_alembic + def test_db_sync(self, mock_get_engine, mock_find_conf, mock_upgrade): migration.db_sync() mock_get_engine.assert_called_once_with() - mock_find_repo.assert_called_once_with() mock_find_conf.assert_called_once_with() mock_find_conf.return_value.set_main_option.assert_called_once_with( 'sqlalchemy.url', str(mock_get_engine.return_value.url), ) - mock_is_migrate.assert_called_once_with( - mock_get_engine.return_value, mock_find_repo.return_value) - if has_migrate: - mock_is_alembic.assert_called_once_with( - mock_get_engine.return_value) - else: - mock_is_alembic.assert_not_called() - # we should only attempt the upgrade of the remaining - # sqlalchemy-migrate-based migrations and fake apply of the initial - # alembic migrations if sqlalchemy-migrate is in place but alembic - # hasn't been used yet - if has_migrate and not has_alembic: - mock_init.assert_called_once_with( - mock_get_engine.return_value, - mock_find_repo.return_value, mock_find_conf.return_value) - else: - mock_init.assert_not_called() - - # however, we should always attempt to upgrade the requested migration - # to alembic mock_upgrade.assert_called_once_with( - mock_get_engine.return_value, mock_find_conf.return_value, None) - - def test_db_sync_new_deployment(self): - """Mimic a new deployment without existing sqlalchemy-migrate cruft.""" - has_migrate = False - has_alembic = False - self._test_db_sync(has_migrate, has_alembic) - - def test_db_sync_with_existing_migrate_database(self): - """Mimic a deployment currently managed by sqlalchemy-migrate.""" - has_migrate = True - has_alembic = False - self._test_db_sync(has_migrate, has_alembic) - - def test_db_sync_with_existing_alembic_database(self): - """Mimic a deployment that's already switched to alembic.""" - has_migrate = True - has_alembic = True - self._test_db_sync(has_migrate, has_alembic) + mock_get_engine.return_value, mock_find_conf.return_value, None, + ) @mock.patch.object(alembic_migration.MigrationContext, 'configure') -@mock.patch.object(migrate_api, 'db_version') -@mock.patch.object(migration, '_is_database_under_alembic_control') -@mock.patch.object(migration, '_is_database_under_migrate_control') @mock.patch.object(db_api, 'get_engine') -@mock.patch.object(migration, '_find_migrate_repo') class TestDBVersion(test_base.BaseTestCase): - def test_db_version_migrate( - self, mock_find_repo, mock_get_engine, mock_is_migrate, - mock_is_alembic, mock_migrate_version, mock_m_context_configure, - ): - """Database is controlled by sqlalchemy-migrate.""" - mock_is_migrate.return_value = True - mock_is_alembic.return_value = False - ret = migration.db_version() - self.assertEqual(mock_migrate_version.return_value, ret) - mock_find_repo.assert_called_once_with() - mock_get_engine.assert_called_once_with() - mock_is_migrate.assert_called_once() - mock_is_alembic.assert_called_once() - mock_migrate_version.assert_called_once_with( - mock_get_engine.return_value, mock_find_repo.return_value) - mock_m_context_configure.assert_not_called() - - def test_db_version_alembic( - self, mock_find_repo, mock_get_engine, mock_is_migrate, - mock_is_alembic, mock_migrate_version, mock_m_context_configure, - ): + def test_db_version(self, mock_get_engine, mock_m_context_configure): """Database is controlled by alembic.""" - mock_is_migrate.return_value = False - mock_is_alembic.return_value = True ret = migration.db_version() mock_m_context = mock_m_context_configure.return_value self.assertEqual( mock_m_context.get_current_revision.return_value, ret, ) - mock_find_repo.assert_called_once_with() mock_get_engine.assert_called_once_with() - mock_is_migrate.assert_called_once() - mock_is_alembic.assert_called_once() - mock_migrate_version.assert_not_called() mock_m_context_configure.assert_called_once() - - def test_db_version_not_controlled( - self, mock_find_repo, mock_get_engine, mock_is_migrate, - mock_is_alembic, mock_migrate_version, mock_m_context_configure, - ): - """Database is not controlled.""" - mock_is_migrate.return_value = False - mock_is_alembic.return_value = False - ret = migration.db_version() - self.assertIsNone(ret) - mock_find_repo.assert_called_once_with() - mock_get_engine.assert_called_once_with() - mock_is_migrate.assert_called_once() - mock_is_alembic.assert_called_once() - mock_migrate_version.assert_not_called() - mock_m_context_configure.assert_not_called() - - -class TestDatabaseUnderVersionControl(test_base.BaseTestCase): - - @mock.patch.object(migrate_api, 'db_version') - def test__is_database_under_migrate_control__true(self, mock_db_version): - ret = migration._is_database_under_migrate_control('engine', 'repo') - self.assertTrue(ret) - mock_db_version.assert_called_once_with('engine', 'repo') - - @mock.patch.object(migrate_api, 'db_version') - def test__is_database_under_migrate_control__false(self, mock_db_version): - mock_db_version.side_effect = \ - migrate_exceptions.DatabaseNotControlledError() - ret = migration._is_database_under_migrate_control('engine', 'repo') - self.assertFalse(ret) - mock_db_version.assert_called_once_with('engine', 'repo') - - @mock.patch.object(alembic_migration.MigrationContext, 'configure') - def test__is_database_under_alembic_control__true(self, mock_configure): - context = mock_configure.return_value - context.get_current_revision.return_value = 'foo' - engine = mock.MagicMock() - ret = migration._is_database_under_alembic_control(engine) - self.assertTrue(ret) - context.get_current_revision.assert_called_once_with() - - @mock.patch.object(alembic_migration.MigrationContext, 'configure') - def test__is_database_under_alembic_control__false(self, mock_configure): - context = mock_configure.return_value - context.get_current_revision.return_value = None - engine = mock.MagicMock() - ret = migration._is_database_under_alembic_control(engine) - self.assertFalse(ret) - context.get_current_revision.assert_called_once_with() diff --git a/cinder/tests/unit/db/test_migrations.py b/cinder/tests/unit/db/test_migrations.py index 3c8ed6dbdb7..23fe49e7644 100644 --- a/cinder/tests/unit/db/test_migrations.py +++ b/cinder/tests/unit/db/test_migrations.py @@ -16,13 +16,9 @@ the test case runs a series of test cases to ensure that migrations work properly and that no data loss occurs if possible. """ -import os - from alembic import command as alembic_api from alembic import script as alembic_script import fixtures -from migrate.versioning import api as migrate_api -from migrate.versioning import repository from oslo_db.sqlalchemy import enginefacade from oslo_db.sqlalchemy import test_fixtures from oslo_db.sqlalchemy import test_migrations @@ -30,15 +26,11 @@ from oslo_db.sqlalchemy import utils as db_utils from oslo_log.fixture import logging_error as log_fixture from oslotest import base as test_base import sqlalchemy -from sqlalchemy.engine import reflection -import cinder.db.legacy_migrations from cinder.db import migration from cinder.db.sqlalchemy import api from cinder.db.sqlalchemy import models from cinder.tests import fixtures as cinder_fixtures -from cinder.tests.unit import utils as test_utils -from cinder.volume import volume_types class CinderModelsMigrationsSync(test_migrations.ModelsMigrationsSync): @@ -144,7 +136,7 @@ class MigrationsWalk( self.engine = enginefacade.writer.get_engine() self.patch(api, 'get_engine', lambda: self.engine) self.config = migration._find_alembic_conf() - self.init_version = migration.ALEMBIC_INIT_VERSION + self.init_version = '921e1a36b076' def _migrate_up(self, revision, connection): check_method = getattr(self, f'_check_{revision}', None) @@ -250,249 +242,3 @@ class TestMigrationsWalkPostgreSQL( test_base.BaseTestCase, ): FIXTURE = test_fixtures.PostgresqlOpportunisticFixture - - -class LegacyMigrationsWalk(test_migrations.WalkVersionsMixin): - """Test sqlalchemy-migrate migrations.""" - - BOOL_TYPE = sqlalchemy.types.BOOLEAN - TIME_TYPE = sqlalchemy.types.DATETIME - INTEGER_TYPE = sqlalchemy.types.INTEGER - VARCHAR_TYPE = sqlalchemy.types.VARCHAR - TEXT_TYPE = sqlalchemy.types.Text - - def setUp(self): - super().setUp() - self.engine = enginefacade.writer.get_engine() - - @property - def INIT_VERSION(self): - return migration.MIGRATE_INIT_VERSION - - @property - def REPOSITORY(self): - migrate_file = cinder.db.legacy_migrations.__file__ - return repository.Repository( - os.path.abspath(os.path.dirname(migrate_file))) - - @property - def migration_api(self): - return migrate_api - - @property - def migrate_engine(self): - return self.engine - - def get_table_ref(self, engine, name, metadata): - metadata.bind = engine - return sqlalchemy.Table(name, metadata, autoload=True) - - class BannedDBSchemaOperations(fixtures.Fixture): - """Ban some operations for migrations""" - def __init__(self, banned_resources=None): - super().__init__() - self._banned_resources = banned_resources or [] - - @staticmethod - def _explode(resource, op): - print('%s.%s()' % (resource, op)) # noqa - raise Exception( - 'Operation %s.%s() is not allowed in a database migration' % ( - resource, op)) - - def setUp(self): - super().setUp() - for thing in self._banned_resources: - self.useFixture(fixtures.MonkeyPatch( - 'sqlalchemy.%s.drop' % thing, - lambda *a, **k: self._explode(thing, 'drop'))) - self.useFixture(fixtures.MonkeyPatch( - 'sqlalchemy.%s.alter' % thing, - lambda *a, **k: self._explode(thing, 'alter'))) - - def migrate_up(self, version, with_data=False): - # NOTE(dulek): This is a list of migrations where we allow dropping - # things. The rules for adding things here are very very specific. - # Insight on how to drop things from the DB in a backward-compatible - # manner is provided in Cinder's developer documentation. - # Reviewers: DO NOT ALLOW THINGS TO BE ADDED HERE WITHOUT CARE - exceptions = [ - # NOTE(brinzhang): 135 changes size of quota_usage.resource - # to 300. This should be safe for the 'quota_usage' db table, - # because of the 255 is the length limit of volume_type_name, - # it should be add the additional prefix before volume_type_name, - # which we of course allow *this* size to 300. - 135, - # 136 modifies the the tables having volume_type_id field to set - # as non nullable - 136, - ] - - if version not in exceptions: - banned = ['Table', 'Column'] - else: - banned = None - - with LegacyMigrationsWalk.BannedDBSchemaOperations(banned): - super().migrate_up(version, with_data) - - def __check_cinderbase_fields(self, columns): - """Check fields inherited from CinderBase ORM class.""" - self.assertIsInstance(columns.created_at.type, self.TIME_TYPE) - self.assertIsInstance(columns.updated_at.type, self.TIME_TYPE) - self.assertIsInstance(columns.deleted_at.type, self.TIME_TYPE) - self.assertIsInstance(columns.deleted.type, self.BOOL_TYPE) - - def get_table_names(self, engine): - inspector = reflection.Inspector.from_engine(engine) - return inspector.get_table_names() - - def get_foreign_key_columns(self, engine, table_name): - foreign_keys = set() - table = db_utils.get_table(engine, table_name) - inspector = reflection.Inspector.from_engine(engine) - for column_dict in inspector.get_columns(table_name): - column_name = column_dict['name'] - column = getattr(table.c, column_name) - if column.foreign_keys: - foreign_keys.add(column_name) - return foreign_keys - - def get_indexed_columns(self, engine, table_name): - indexed_columns = set() - for index in db_utils.get_indexes(engine, table_name): - for column_name in index['column_names']: - indexed_columns.add(column_name) - return indexed_columns - - def assert_each_foreign_key_is_part_of_an_index(self): - engine = self.migrate_engine - - non_indexed_foreign_keys = set() - - for table_name in self.get_table_names(engine): - indexed_columns = self.get_indexed_columns(engine, table_name) - foreign_key_columns = self.get_foreign_key_columns( - engine, table_name - ) - for column_name in foreign_key_columns - indexed_columns: - non_indexed_foreign_keys.add(table_name + '.' + column_name) - - self.assertSetEqual(set(), non_indexed_foreign_keys) - - def _check_127(self, engine, data): - quota_usage_resource = db_utils.get_table(engine, 'quota_usages') - self.assertIn('resource', quota_usage_resource.c) - self.assertIsInstance(quota_usage_resource.c.resource.type, - self.VARCHAR_TYPE) - self.assertEqual(300, quota_usage_resource.c.resource.type.length) - - def _check_128(self, engine, data): - volume_transfer = db_utils.get_table(engine, 'transfers') - self.assertIn('source_project_id', volume_transfer.c) - self.assertIn('destination_project_id', volume_transfer.c) - self.assertIn('accepted', volume_transfer.c) - - def _check_132(self, engine, data): - """Test create default volume type.""" - vol_types = db_utils.get_table(engine, 'volume_types') - vtype = (vol_types.select(vol_types.c.name == - volume_types.DEFAULT_VOLUME_TYPE) - .execute().first()) - self.assertIsNotNone(vtype) - - def _check_136(self, engine, data): - """Test alter volume_type_id columns.""" - vol_table = db_utils.get_table(engine, 'volumes') - snap_table = db_utils.get_table(engine, 'snapshots') - encrypt_table = db_utils.get_table(engine, 'encryption') - self.assertFalse(vol_table.c.volume_type_id.nullable) - self.assertFalse(snap_table.c.volume_type_id.nullable) - self.assertFalse(encrypt_table.c.volume_type_id.nullable) - - def _check_145(self, engine, data): - """Test add use_quota columns.""" - for name in ('volumes', 'snapshots'): - resources = db_utils.get_table(engine, name) - self.assertIn('use_quota', resources.c) - # TODO: (Y release) Alter in new migration & change to assertFalse - self.assertTrue(resources.c.use_quota.nullable) - - # NOTE: this test becomes slower with each addition of new DB migration. - # 'pymysql' works much slower on slow nodes than 'psycopg2'. And such - # timeout mostly required for testing of 'mysql' backend. - @test_utils.set_timeout(300) - def test_walk_versions(self): - self.walk_versions(False, False) - self.assert_each_foreign_key_is_part_of_an_index() - - -class TestLegacyMigrationsWalkSQLite( - test_fixtures.OpportunisticDBTestMixin, - LegacyMigrationsWalk, - test_base.BaseTestCase, -): - - def assert_each_foreign_key_is_part_of_an_index(self): - # Skip the test for SQLite because SQLite does not list - # UniqueConstraints as indexes, which makes this test fail. - # Given that SQLite is only for testing purposes, it is safe to skip - pass - - -class TestLegacyMigrationsWalkMySQL( - test_fixtures.OpportunisticDBTestMixin, - LegacyMigrationsWalk, - test_base.BaseTestCase, -): - - FIXTURE = test_fixtures.MySQLOpportunisticFixture - BOOL_TYPE = sqlalchemy.dialects.mysql.TINYINT - - @test_utils.set_timeout(300) - def test_mysql_innodb(self): - """Test that table creation on mysql only builds InnoDB tables.""" - # add this to the global lists to make reset work with it, it's removed - # automatically in tearDown so no need to clean it up here. - # sanity check - repo = migration._find_migrate_repo() - migrate_api.version_control( - self.migrate_engine, repo, migration.MIGRATE_INIT_VERSION) - migrate_api.upgrade(self.migrate_engine, repo) - - total = self.migrate_engine.execute( - "SELECT count(*) " - "from information_schema.TABLES " - "where TABLE_SCHEMA='{0}'".format( - self.migrate_engine.url.database)) - self.assertGreater(total.scalar(), 0, - msg="No tables found. Wrong schema?") - - noninnodb = self.migrate_engine.execute( - "SELECT count(*) " - "from information_schema.TABLES " - "where TABLE_SCHEMA='openstack_citest' " - "and ENGINE!='InnoDB' " - "and TABLE_NAME!='migrate_version'") - count = noninnodb.scalar() - self.assertEqual(count, 0, "%d non InnoDB tables created" % count) - - def _check_127(self, engine, data): - quota_usage_resource = db_utils.get_table(engine, 'quota_usages') - self.assertIn('resource', quota_usage_resource.c) - self.assertIsInstance(quota_usage_resource.c.resource.type, - self.VARCHAR_TYPE) - # Depending on the MariaDB version, and the page size, we may not have - # been able to change quota_usage_resource to 300 chars, it could still - # be 255. - self.assertIn(quota_usage_resource.c.resource.type.length, (255, 300)) - - -class TestLegacyMigrationsWalkPostgreSQL( - test_fixtures.OpportunisticDBTestMixin, - LegacyMigrationsWalk, - test_base.BaseTestCase, -): - - FIXTURE = test_fixtures.PostgresqlOpportunisticFixture - TIME_TYPE = sqlalchemy.types.TIMESTAMP diff --git a/doc/source/conf.py b/doc/source/conf.py index c311f4e9166..a80560e3813 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -55,8 +55,6 @@ apidoc_output_dir = 'contributor/api' apidoc_excluded_paths = [ 'tests/*', 'tests', - 'db/legacy_migrations/*', - 'db/legacy_migrations', 'db/migrations/*', 'db/migrations', 'db/sqlalchemy/*', diff --git a/doc/source/contributor/database-migrations.rst b/doc/source/contributor/database-migrations.rst index f9332295bd1..246bcb55063 100644 --- a/doc/source/contributor/database-migrations.rst +++ b/doc/source/contributor/database-migrations.rst @@ -18,11 +18,15 @@ migrations. Schema migrations ----------------- -.. versionchanged:: 24.0.0 (Xena) +.. versionchanged:: 19.0.0 (Xena) The database migration engine was changed from ``sqlalchemy-migrate`` to ``alembic``. +.. versionchanged:: 22.0.0 (Antelope) + + The legacy ``sqlalchemy-migrate``-based database migrations were removed. + The `alembic`__ database migration tool is used to manage schema migrations in cinder. The migration files and related metadata can be found in ``cinder/db/migrations``. As discussed in :doc:`/admin/upgrades`, these can be @@ -32,10 +36,10 @@ run by end users using the :program:`cinder-manage db sync` command. .. note:: - There are also legacy migrations provided in the - ``cinder/db/legacy_migrations`` directory . These are provided to facilitate - upgrades from pre-Xena (24.0.0) deployments and will be removed in a future - release. They should not be modified or extended. + There wer also legacy migrations provided in the + ``cinder/db/legacy_migrations`` directory . These were provided to facilitate + upgrades from pre-Xena (19.0.0) deployments. They were removed in the + 22.0.0 (Antelope) release. The best reference for alembic is the `alembic documentation`__, but a small example is provided here. You can create the migration either manually or diff --git a/releasenotes/notes/remove-sqlalchemy-migrate-c62b541fd5f4ab10.yaml b/releasenotes/notes/remove-sqlalchemy-migrate-c62b541fd5f4ab10.yaml new file mode 100644 index 00000000000..8606c8de45b --- /dev/null +++ b/releasenotes/notes/remove-sqlalchemy-migrate-c62b541fd5f4ab10.yaml @@ -0,0 +1,5 @@ +--- +upgrade: + - | + The legacy ``sqlalchemy-migrate`` migrations, which have been deprecated + since Xena, have been removed. There should be no end-user impact. diff --git a/requirements.txt b/requirements.txt index a7087642920..f269b5d3aa4 100644 --- a/requirements.txt +++ b/requirements.txt @@ -49,7 +49,6 @@ taskflow>=4.5.0 # Apache-2.0 rtslib-fb>=2.1.74 # Apache-2.0 six>=1.15.0 # MIT SQLAlchemy>=1.4.23 # MIT -sqlalchemy-migrate>=0.13.0 # Apache-2.0 stevedore>=3.2.2 # Apache-2.0 tabulate>=0.8.7 # MIT tenacity>=6.3.1 # Apache-2.0