Compact Newton database migrations

This compacts all database migrations up to Newton into one
intial schema to remove the need to apply every database
change along the way.

Change-Id: I7b5833296292df2e6cc7d8d9306115e590fff25a
This commit is contained in:
Sean McGinnis 2017-09-25 17:41:45 -05:00
parent 7aca716a0f
commit a9afbddd11
16 changed files with 219 additions and 1488 deletions

View File

@ -26,7 +26,7 @@ from stevedore import driver
from cinder.db.sqlalchemy import api as db_api
INIT_VERSION = 72
INIT_VERSION = 84
_IMPL = None
_LOCK = threading.Lock()

View File

@ -1,41 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Boolean, Column, DateTime
from sqlalchemy import MetaData, String, Table
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# New table
messages = Table(
'messages',
meta,
Column('id', String(36), primary_key=True, nullable=False),
Column('project_id', String(36), nullable=False),
Column('request_id', String(255), nullable=False),
Column('resource_type', String(36)),
Column('resource_uuid', String(255), nullable=True),
Column('event_id', String(255), nullable=False),
Column('message_level', String(255), nullable=False),
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean),
Column('expires_at', DateTime(timezone=False)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
messages.create()

View File

@ -1,58 +0,0 @@
# Copyright (c) 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Boolean, Column, DateTime, Integer
from sqlalchemy import MetaData, String, Table, UniqueConstraint
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# New cluster table
cluster = Table(
'clusters', meta,
# Inherited fields from CinderBase
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(), default=False),
# Cluster specific fields
Column('id', Integer, primary_key=True, nullable=False),
Column('name', String(255), nullable=False),
Column('binary', String(255), nullable=False),
Column('disabled', Boolean(), default=False),
Column('disabled_reason', String(255)),
Column('race_preventer', Integer, nullable=False, default=0),
# To remove potential races on creation we have a constraint set on
# name and race_preventer fields, and we set value on creation to 0, so
# 2 clusters with the same name will fail this constraint. On deletion
# we change this field to the same value as the id which will be unique
# and will not conflict with the creation of another cluster with the
# same name.
UniqueConstraint('name', 'binary', 'race_preventer'),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
cluster.create()
# Add the cluster flag to Service, ConsistencyGroup, and Volume tables.
for table_name in ('services', 'consistencygroups', 'volumes'):
table = Table(table_name, meta, autoload=True)
cluster_name = Column('cluster_name', String(255), nullable=True)
table.create_column(cluster_name)

View File

@ -1,52 +0,0 @@
# Copyright (c) 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Boolean, Column, DateTime, Integer
from sqlalchemy import MetaData, String, Table, UniqueConstraint
from migrate.changeset.constraint import ForeignKeyConstraint
def upgrade(migrate_engine):
"""Add workers table."""
meta = MetaData()
meta.bind = migrate_engine
workers = Table(
'workers', meta,
# Inherited fields from CinderBase
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(), default=False),
# Workers table specific fields
Column('id', Integer, primary_key=True),
Column('resource_type', String(40), nullable=False),
Column('resource_id', String(36), nullable=False),
Column('status', String(255), nullable=False),
Column('service_id', Integer, nullable=True),
UniqueConstraint('resource_type', 'resource_id'),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
workers.create()
services = Table('services', meta, autoload=True)
ForeignKeyConstraint(
columns=[workers.c.service_id],
refcolumns=[services.c.id]).create()

View File

@ -1,75 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Boolean, Column, DateTime, Integer
from sqlalchemy import ForeignKey, MetaData, String, Table, UniqueConstraint
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# New table
group_types = Table(
'group_types',
meta,
Column('id', String(36), primary_key=True, nullable=False),
Column('name', String(255), nullable=False),
Column('description', String(255)),
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean),
Column('is_public', Boolean),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
group_types.create()
# New table
group_type_specs = Table(
'group_type_specs',
meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('key', String(255)),
Column('value', String(255)),
Column('group_type_id', String(36),
ForeignKey('group_types.id'),
nullable=False),
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
group_type_specs.create()
# New table
group_type_projects = Table(
'group_type_projects', meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('group_type_id', String(36),
ForeignKey('group_types.id')),
Column('project_id', String(length=255)),
Column('deleted', Boolean(create_constraint=True, name=None)),
UniqueConstraint('group_type_id', 'project_id', 'deleted'),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
group_type_projects.create()

View File

@ -1,96 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from sqlalchemy import Boolean, Column, DateTime, Integer
from sqlalchemy import ForeignKey, MetaData, String, Table, func, select
# Default number of quota groups. We should not read from config file.
DEFAULT_QUOTA_GROUPS = 10
CLASS_NAME = 'default'
CREATED_AT = datetime.datetime.now() # noqa
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# New table
groups = Table(
'groups',
meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean),
Column('id', String(36), primary_key=True, nullable=False),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('cluster_name', String(255)),
Column('host', String(length=255)),
Column('availability_zone', String(length=255)),
Column('name', String(length=255)),
Column('description', String(length=255)),
Column('group_type_id', String(length=36)),
Column('status', String(length=255)),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
groups.create()
# Add column to volumes table
volumes = Table('volumes', meta, autoload=True)
group_id = Column('group_id', String(36),
ForeignKey('groups.id'))
volumes.create_column(group_id)
volumes.update().values(group_id=None).execute()
# New group_volume_type_mapping table
Table('volume_types', meta, autoload=True)
grp_vt_mapping = Table(
'group_volume_type_mapping', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
Column('volume_type_id', String(36), ForeignKey('volume_types.id'),
nullable=False),
Column('group_id', String(36),
ForeignKey('groups.id'), nullable=False),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
grp_vt_mapping.create()
# Add group quota data into DB.
quota_classes = Table('quota_classes', meta, autoload=True)
rows = select([func.count()]).select_from(quota_classes).where(
quota_classes.c.resource == 'groups').execute().scalar()
# Do not add entries if there are already 'groups' entries.
if rows:
return
# Set groups
qci = quota_classes.insert()
qci.execute({'created_at': CREATED_AT,
'class_name': CLASS_NAME,
'resource': 'groups',
'hard_limit': DEFAULT_QUOTA_GROUPS,
'deleted': False, })

View File

@ -1,63 +0,0 @@
# Copyright (C) 2016 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Boolean, Column, DateTime
from sqlalchemy import ForeignKey, MetaData, String, Table
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
groups = Table('groups', meta, autoload=True)
# New table
group_snapshots = Table(
'group_snapshots', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', String(36), primary_key=True),
Column('group_id', String(36),
ForeignKey('groups.id'),
nullable=False),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('name', String(length=255)),
Column('description', String(length=255)),
Column('status', String(length=255)),
Column('group_type_id', String(length=36)),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
group_snapshots.create()
# Add group_snapshot_id column to snapshots table
snapshots = Table('snapshots', meta, autoload=True)
group_snapshot_id = Column('group_snapshot_id', String(36),
ForeignKey('group_snapshots.id'))
snapshots.create_column(group_snapshot_id)
snapshots.update().values(group_snapshot_id=None).execute()
# Add group_snapshot_id column to groups table
group_snapshot_id = Column('group_snapshot_id', String(36))
groups.create_column(group_snapshot_id)
# Add source_group_id column to groups table
source_group_id = Column('source_group_id', String(36))
groups.create_column(source_group_id)

View File

@ -1,22 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This is a placeholder for Newton backports.
# Do not use this number for new Ocata work. New work starts after
# all the placeholders.
#
# See this for more information:
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
def upgrade(migrate_engine):
pass

View File

@ -1,22 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This is a placeholder for Newton backports.
# Do not use this number for new Ocata work. New work starts after
# all the placeholders.
#
# See this for more information:
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
def upgrade(migrate_engine):
pass

View File

@ -1,22 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This is a placeholder for Newton backports.
# Do not use this number for new Ocata work. New work starts after
# all the placeholders.
#
# See this for more information:
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
def upgrade(migrate_engine):
pass

View File

@ -1,22 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This is a placeholder for Newton backports.
# Do not use this number for new Ocata work. New work starts after
# all the placeholders.
#
# See this for more information:
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
def upgrade(migrate_engine):
pass

View File

@ -1,22 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This is a placeholder for Newton backports.
# Do not use this number for new Ocata work. New work starts after
# all the placeholders.
#
# See this for more information:
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
def upgrade(migrate_engine):
pass

View File

@ -15,6 +15,8 @@
import datetime
from oslo_config import cfg
from oslo_utils import timeutils
from sqlalchemy.dialects import mysql
from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Index
from sqlalchemy import Integer, MetaData, String, Table, Text, UniqueConstraint
@ -53,6 +55,7 @@ def define_tables(meta):
Column('replication_status', String(36), default='not-capable'),
Column('frozen', Boolean, default=False),
Column('active_backend_id', String(255)),
Column('cluster_name', String(255), nullable=True),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
@ -74,6 +77,7 @@ def define_tables(meta):
Column('status', String(255)),
Column('cgsnapshot_id', String(36)),
Column('source_cgid', String(36)),
Column('cluster_name', String(255), nullable=True),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
@ -97,6 +101,48 @@ def define_tables(meta):
mysql_charset='utf8'
)
groups = Table(
'groups', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean),
Column('id', String(36), primary_key=True, nullable=False),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('cluster_name', String(255)),
Column('host', String(length=255)),
Column('availability_zone', String(length=255)),
Column('name', String(length=255)),
Column('description', String(length=255)),
Column('group_type_id', String(length=36)),
Column('status', String(length=255)),
Column('group_snapshot_id', String(36)),
Column('source_group_id', String(36)),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
group_snapshots = Table(
'group_snapshots', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', String(36), primary_key=True),
Column('group_id', String(36),
ForeignKey('groups.id'),
nullable=False),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('name', String(length=255)),
Column('description', String(length=255)),
Column('status', String(length=255)),
Column('group_type_id', String(length=36)),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
volumes = Table(
'volumes', meta,
Column('created_at', DateTime),
@ -135,6 +181,8 @@ def define_tables(meta):
Column('provider_id', String(255)),
Column('multiattach', Boolean),
Column('previous_status', String(255)),
Column('cluster_name', String(255), nullable=True),
Column('group_id', String(36), ForeignKey('groups.id')),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
@ -184,6 +232,8 @@ def define_tables(meta):
ForeignKey('cgsnapshots.id')),
Column('provider_id', String(255)),
Column('provider_auth', String(255)),
Column('group_snapshot_id', String(36),
ForeignKey('group_snapshots.id')),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
@ -490,8 +540,128 @@ def define_tables(meta):
mysql_charset='utf8'
)
messages = Table(
'messages', meta,
Column('id', String(36), primary_key=True, nullable=False),
Column('project_id', String(36), nullable=False),
Column('request_id', String(255), nullable=False),
Column('resource_type', String(36)),
Column('resource_uuid', String(255), nullable=True),
Column('event_id', String(255), nullable=False),
Column('message_level', String(255), nullable=False),
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean),
Column('expires_at', DateTime(timezone=False)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
cluster = Table(
'clusters', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(), default=False),
Column('id', Integer, primary_key=True, nullable=False),
Column('name', String(255), nullable=False),
Column('binary', String(255), nullable=False),
Column('disabled', Boolean(), default=False),
Column('disabled_reason', String(255)),
Column('race_preventer', Integer, nullable=False, default=0),
# To remove potential races on creation we have a constraint set on
# name and race_preventer fields, and we set value on creation to 0, so
# 2 clusters with the same name will fail this constraint. On deletion
# we change this field to the same value as the id which will be unique
# and will not conflict with the creation of another cluster with the
# same name.
UniqueConstraint('name', 'binary', 'race_preventer'),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
workers = Table(
'workers', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(), default=False),
Column('id', Integer, primary_key=True),
Column('resource_type', String(40), nullable=False),
Column('resource_id', String(36), nullable=False),
Column('status', String(255), nullable=False),
Column('service_id', Integer, ForeignKey('services.id'),
nullable=True),
UniqueConstraint('resource_type', 'resource_id'),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
group_types = Table(
'group_types', meta,
Column('id', String(36), primary_key=True, nullable=False),
Column('name', String(255), nullable=False),
Column('description', String(255)),
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean),
Column('is_public', Boolean),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
group_type_specs = Table(
'group_type_specs', meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('key', String(255)),
Column('value', String(255)),
Column('group_type_id', String(36),
ForeignKey('group_types.id'),
nullable=False),
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
group_type_projects = Table(
'group_type_projects', meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('group_type_id', String(36),
ForeignKey('group_types.id')),
Column('project_id', String(length=255)),
Column('deleted', Boolean(create_constraint=True, name=None)),
UniqueConstraint('group_type_id', 'project_id', 'deleted'),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
grp_vt_mapping = Table(
'group_volume_type_mapping', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
Column('volume_type_id', String(36), ForeignKey('volume_types.id'),
nullable=False),
Column('group_id', String(36),
ForeignKey('groups.id'), nullable=False),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
return [consistencygroups,
cgsnapshots,
groups,
group_snapshots,
volumes,
volume_attachment,
snapshots,
@ -512,7 +682,14 @@ def define_tables(meta):
encryption,
volume_admin_metadata,
initiator_data,
image_volume_cache]
image_volume_cache,
messages,
cluster,
workers,
group_types,
group_type_specs,
group_type_projects,
grp_vt_mapping]
def upgrade(migrate_engine):
@ -592,3 +769,35 @@ def upgrade(migrate_engine):
'resource': 'per_volume_gigabytes',
'hard_limit': -1,
'deleted': False, })
qci.execute({'created_at': CREATED_AT,
'class_name': CLASS_NAME,
'resource': 'groups',
'hard_limit': CONF.quota_groups,
'deleted': False, })
workers = Table('workers', meta, autoload=True)
# This is only necessary for mysql, and since the table is not in use this
# will only be a schema update.
if migrate_engine.name.startswith('mysql'):
try:
workers.c.updated_at.alter(mysql.DATETIME(fsp=6))
except Exception:
# MySQL v5.5 or earlier don't support sub-second resolution so we
# may have cleanup races in Active-Active configurations, that's
# why upgrading is recommended in that case.
# Code in Cinder is capable of working with 5.5, so for 5.5 there's
# no problem
pass
# TODO(geguileo): Once we remove support for MySQL 5.5 we have to create
# an upgrade migration to remove this row.
# Set workers table sub-second support sentinel
wi = workers.insert()
now = timeutils.utcnow().replace(microsecond=123)
wi.execute({'created_at': now,
'updated_at': now,
'deleted': False,
'resource_type': 'SENTINEL',
'resource_id': 'SUB-SECOND',
'status': 'OK'})

View File

@ -1,55 +0,0 @@
# Copyright (c) 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import timeutils
from sqlalchemy.dialects import mysql
from sqlalchemy import MetaData, Table
def upgrade(migrate_engine):
"""Add microseconds precision on updated_at field in MySQL databases.
PostgreSQL, SQLite, and MSSQL have sub-second precision by default, but
MySQL defaults to second precision in DateTime fields, which creates
problems for the resource cleanup mechanism.
"""
meta = MetaData()
meta.bind = migrate_engine
workers = Table('workers', meta, autoload=True)
# This is only necessary for mysql, and since the table is not in use this
# will only be an schema update.
if migrate_engine.name.startswith('mysql'):
try:
workers.c.updated_at.alter(mysql.DATETIME(fsp=6))
except Exception:
# MySQL v5.5 or earlier don't support sub-second resolution so we
# may have cleanup races in Active-Active configurations, that's
# why upgrading is recommended in that case.
# Code in Cinder is capable of working with 5.5, so for 5.5 there's
# no problem
pass
# TODO(geguileo): Once we remove support for MySQL 5.5 we have to create
# an upgrade migration to remove this row.
# Set workers table sub-second support sentinel
wi = workers.insert()
now = timeutils.utcnow().replace(microsecond=123)
wi.execute({'created_at': now,
'updated_at': now,
'deleted': False,
'resource_type': 'SENTINEL',
'resource_id': 'SUB-SECOND',
'status': 'OK'})

View File

@ -20,7 +20,6 @@ if possible.
"""
import os
import uuid
import fixtures
from migrate.versioning import api as migration_api
@ -30,7 +29,6 @@ from oslo_db.sqlalchemy import test_migrations
from oslo_db.sqlalchemy import utils as db_utils
import sqlalchemy
from sqlalchemy.engine import reflection
from sqlalchemy import func, select
from cinder.db import migration
import cinder.db.sqlalchemy.migrate_repo
@ -98,725 +96,23 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
# manner is provided in Cinder's developer documentation.
# Reviewers: DO NOT ALLOW THINGS TO BE ADDED HERE WITHOUT CARE
exceptions = [
# NOTE(dulek): 62 alters the column type from boolean to integer to
# fix the bug 1518363. If we've followed the guidelines for live
# schema upgrades we would end up either waiting 3 releases to fix
# a simple bug or trigger a rebuild index operation in migration
# (because constraint was impossible to delete without deleting
# other foreign key constraints). Either way it's harsh... We've
# decided to go with alter to minimise upgrade impact. The only
# consequence for deployments running recent MySQL is inability
# to perform volume-type-access modifications while running this
# migration.
62,
# NOTE(dulek): 66 sets reservations.usage_id to nullable. This is
# NOTE(ameade): 87 sets messages.request_id to nullable. This is
# 100% backward compatible and according to MySQL docs such ALTER
# is performed with the same restrictions as column addition, which
# we of course allow.
66,
# NOTE(dulek): 73 drops tables and columns we've stopped using a
# release ago.
73,
# NOTE(ameade): 87 sets messages.request_id to nullable. This
# should be safe for the same reason as migration 66.
87,
# NOTE : 104 modifies size of messages.project_id to 255.
# This should be safe for the same reason as migration 87.
104,
]
# NOTE(dulek): We only started requiring things be additive in
# Mitaka, so ignore all migrations before that point.
MITAKA_START = 61
if version >= MITAKA_START and version not in exceptions:
if version not in exceptions:
banned = ['Table', 'Column']
else:
banned = None
with MigrationsMixin.BannedDBSchemaOperations(banned):
super(MigrationsMixin, self).migrate_up(version, with_data)
def _pre_upgrade_004(self, engine):
"""Change volume types to UUID """
data = {
'volumes': [{'id': str(uuid.uuid4()), 'host': 'test1',
'volume_type_id': 1},
{'id': str(uuid.uuid4()), 'host': 'test2',
'volume_type_id': 1},
{'id': str(uuid.uuid4()), 'host': 'test3',
'volume_type_id': 3},
],
'volume_types': [{'name': 'vtype1'},
{'name': 'vtype2'},
{'name': 'vtype3'},
],
'volume_type_extra_specs': [{'volume_type_id': 1,
'key': 'v1',
'value': 'hotep',
},
{'volume_type_id': 1,
'key': 'v2',
'value': 'bending rodrigez',
},
{'volume_type_id': 2,
'key': 'v3',
'value': 'bending rodrigez',
},
]}
volume_types = db_utils.get_table(engine, 'volume_types')
for vtype in data['volume_types']:
r = volume_types.insert().values(vtype).execute()
vtype['id'] = r.inserted_primary_key[0]
volume_type_es = db_utils.get_table(engine, 'volume_type_extra_specs')
for vtes in data['volume_type_extra_specs']:
r = volume_type_es.insert().values(vtes).execute()
vtes['id'] = r.inserted_primary_key[0]
volumes = db_utils.get_table(engine, 'volumes')
for vol in data['volumes']:
r = volumes.insert().values(vol).execute()
vol['id'] = r.inserted_primary_key[0]
return data
def _check_004(self, engine, data):
volumes = db_utils.get_table(engine, 'volumes')
v1 = volumes.select(volumes.c.id ==
data['volumes'][0]['id']
).execute().first()
v2 = volumes.select(volumes.c.id ==
data['volumes'][1]['id']
).execute().first()
v3 = volumes.select(volumes.c.id ==
data['volumes'][2]['id']
).execute().first()
volume_types = db_utils.get_table(engine, 'volume_types')
vt1 = volume_types.select(volume_types.c.name ==
data['volume_types'][0]['name']
).execute().first()
vt2 = volume_types.select(volume_types.c.name ==
data['volume_types'][1]['name']
).execute().first()
vt3 = volume_types.select(volume_types.c.name ==
data['volume_types'][2]['name']
).execute().first()
vtes = db_utils.get_table(engine, 'volume_type_extra_specs')
vtes1 = vtes.select(vtes.c.key ==
data['volume_type_extra_specs'][0]['key']
).execute().first()
vtes2 = vtes.select(vtes.c.key ==
data['volume_type_extra_specs'][1]['key']
).execute().first()
vtes3 = vtes.select(vtes.c.key ==
data['volume_type_extra_specs'][2]['key']
).execute().first()
self.assertEqual(v1['volume_type_id'], vt1['id'])
self.assertEqual(v2['volume_type_id'], vt1['id'])
self.assertEqual(v3['volume_type_id'], vt3['id'])
self.assertEqual(vtes1['volume_type_id'], vt1['id'])
self.assertEqual(vtes2['volume_type_id'], vt1['id'])
self.assertEqual(vtes3['volume_type_id'], vt2['id'])
def _check_005(self, engine, data):
"""Test that adding source_volid column works correctly."""
volumes = db_utils.get_table(engine, 'volumes')
self.assertIsInstance(volumes.c.source_volid.type,
self.VARCHAR_TYPE)
def _check_006(self, engine, data):
snapshots = db_utils.get_table(engine, 'snapshots')
self.assertIsInstance(snapshots.c.provider_location.type,
self.VARCHAR_TYPE)
def _check_007(self, engine, data):
snapshots = db_utils.get_table(engine, 'snapshots')
fkey, = snapshots.c.volume_id.foreign_keys
self.assertIsNotNone(fkey)
def _pre_upgrade_008(self, engine):
self.assertFalse(engine.dialect.has_table(engine.connect(),
"backups"))
def _check_008(self, engine, data):
"""Test that adding and removing the backups table works correctly."""
self.assertTrue(engine.dialect.has_table(engine.connect(),
"backups"))
backups = db_utils.get_table(engine, 'backups')
self.assertIsInstance(backups.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(backups.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(backups.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(backups.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(backups.c.id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.volume_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.user_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.project_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.host.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.availability_zone.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.display_name.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.display_description.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.container.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.status.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.fail_reason.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.service_metadata.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.service.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.size.type,
self.INTEGER_TYPE)
self.assertIsInstance(backups.c.object_count.type,
self.INTEGER_TYPE)
def _check_009(self, engine, data):
"""Test adding snapshot_metadata table works correctly."""
self.assertTrue(engine.dialect.has_table(engine.connect(),
"snapshot_metadata"))
snapshot_metadata = db_utils.get_table(engine, 'snapshot_metadata')
self.assertIsInstance(snapshot_metadata.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(snapshot_metadata.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(snapshot_metadata.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(snapshot_metadata.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(snapshot_metadata.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(snapshot_metadata.c.id.type,
self.INTEGER_TYPE)
self.assertIsInstance(snapshot_metadata.c.snapshot_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(snapshot_metadata.c.key.type,
self.VARCHAR_TYPE)
self.assertIsInstance(snapshot_metadata.c.value.type,
self.VARCHAR_TYPE)
def _check_010(self, engine, data):
"""Test adding transfers table works correctly."""
self.assertTrue(engine.dialect.has_table(engine.connect(),
"transfers"))
transfers = db_utils.get_table(engine, 'transfers')
self.assertIsInstance(transfers.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(transfers.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(transfers.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(transfers.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(transfers.c.id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(transfers.c.volume_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(transfers.c.display_name.type,
self.VARCHAR_TYPE)
self.assertIsInstance(transfers.c.salt.type,
self.VARCHAR_TYPE)
self.assertIsInstance(transfers.c.crypt_hash.type,
self.VARCHAR_TYPE)
self.assertIsInstance(transfers.c.expires_at.type,
self.TIME_TYPE)
def _check_011(self, engine, data):
"""Test adding transfers table works correctly."""
volumes = db_utils.get_table(engine, 'volumes')
self.assertIn('bootable', volumes.c)
self.assertIsInstance(volumes.c.bootable.type,
self.BOOL_TYPE)
def _check_012(self, engine, data):
"""Test that adding attached_host column works correctly."""
volumes = db_utils.get_table(engine, 'volumes')
self.assertIsInstance(volumes.c.attached_host.type,
self.VARCHAR_TYPE)
def _check_013(self, engine, data):
"""Test that adding provider_geometry column works correctly."""
volumes = db_utils.get_table(engine, 'volumes')
self.assertIsInstance(volumes.c.provider_geometry.type,
self.VARCHAR_TYPE)
def _check_014(self, engine, data):
"""Test that adding _name_id column works correctly."""
volumes = db_utils.get_table(engine, 'volumes')
self.assertIsInstance(volumes.c._name_id.type,
self.VARCHAR_TYPE)
def _check_015(self, engine, data):
"""Test removing migrations table works correctly."""
self.assertFalse(engine.dialect.has_table(engine.connect(),
"migrations"))
def _check_016(self, engine, data):
"""Test that dropping xen storage manager tables works correctly."""
self.assertFalse(engine.dialect.has_table(engine.connect(),
'sm_flavors'))
self.assertFalse(engine.dialect.has_table(engine.connect(),
'sm_backend_config'))
self.assertFalse(engine.dialect.has_table(engine.connect(),
'sm_volume'))
def _check_017(self, engine, data):
"""Test that added encryption information works correctly."""
# encryption key UUID
volumes = db_utils.get_table(engine, 'volumes')
self.assertIn('encryption_key_id', volumes.c)
self.assertIsInstance(volumes.c.encryption_key_id.type,
self.VARCHAR_TYPE)
snapshots = db_utils.get_table(engine, 'snapshots')
self.assertIn('encryption_key_id', snapshots.c)
self.assertIsInstance(snapshots.c.encryption_key_id.type,
self.VARCHAR_TYPE)
self.assertIn('volume_type_id', snapshots.c)
self.assertIsInstance(snapshots.c.volume_type_id.type,
self.VARCHAR_TYPE)
# encryption types table
encryption = db_utils.get_table(engine, 'encryption')
self.assertIsInstance(encryption.c.volume_type_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(encryption.c.cipher.type,
self.VARCHAR_TYPE)
self.assertIsInstance(encryption.c.key_size.type,
self.INTEGER_TYPE)
self.assertIsInstance(encryption.c.provider.type,
self.VARCHAR_TYPE)
def _check_018(self, engine, data):
"""Test that added qos_specs table works correctly."""
self.assertTrue(engine.dialect.has_table(
engine.connect(), "quality_of_service_specs"))
qos_specs = db_utils.get_table(engine, 'quality_of_service_specs')
self.assertIsInstance(qos_specs.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(qos_specs.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(qos_specs.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(qos_specs.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(qos_specs.c.id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(qos_specs.c.specs_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(qos_specs.c.key.type,
self.VARCHAR_TYPE)
self.assertIsInstance(qos_specs.c.value.type,
self.VARCHAR_TYPE)
def _check_019(self, engine, data):
"""Test that adding migration_status column works correctly."""
volumes = db_utils.get_table(engine, 'volumes')
self.assertIsInstance(volumes.c.migration_status.type,
self.VARCHAR_TYPE)
def _check_020(self, engine, data):
"""Test adding volume_admin_metadata table works correctly."""
self.assertTrue(engine.dialect.has_table(engine.connect(),
"volume_admin_metadata"))
volume_admin_metadata = db_utils.get_table(engine,
'volume_admin_metadata')
self.assertIsInstance(volume_admin_metadata.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(volume_admin_metadata.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(volume_admin_metadata.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(volume_admin_metadata.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(volume_admin_metadata.c.id.type,
self.INTEGER_TYPE)
self.assertIsInstance(volume_admin_metadata.c.volume_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(volume_admin_metadata.c.key.type,
self.VARCHAR_TYPE)
self.assertIsInstance(volume_admin_metadata.c.value.type,
self.VARCHAR_TYPE)
def _verify_quota_defaults(self, engine):
quota_class_metadata = db_utils.get_table(engine, 'quota_classes')
num_defaults = quota_class_metadata.count().\
where(quota_class_metadata.c.class_name == 'default').\
execute().scalar()
self.assertEqual(3, num_defaults)
def _check_021(self, engine, data):
"""Test adding default data for quota classes works correctly."""
self._verify_quota_defaults(engine)
def _check_022(self, engine, data):
"""Test that adding disabled_reason column works correctly."""
services = db_utils.get_table(engine, 'services')
self.assertIsInstance(services.c.disabled_reason.type,
self.VARCHAR_TYPE)
def _check_023(self, engine, data):
"""Test that adding reservations index works correctly."""
reservations = db_utils.get_table(engine, 'reservations')
index_columns = []
for idx in reservations.indexes:
if idx.name == 'reservations_deleted_expire_idx':
index_columns = idx.columns.keys()
break
self.assertEqual(sorted(['deleted', 'expire']),
sorted(index_columns))
def _check_024(self, engine, data):
"""Test adding replication columns to volume table."""
volumes = db_utils.get_table(engine, 'volumes')
self.assertIsInstance(volumes.c.replication_status.type,
self.VARCHAR_TYPE)
self.assertIsInstance(volumes.c.replication_extended_status.type,
self.VARCHAR_TYPE)
self.assertIsInstance(volumes.c.replication_driver_data.type,
self.VARCHAR_TYPE)
def _check_025(self, engine, data):
"""Test adding table and columns for consistencygroups."""
# Test consistencygroup_id is in Table volumes
metadata = sqlalchemy.MetaData()
volumes = self.get_table_ref(engine, 'volumes', metadata)
self.assertIsInstance(volumes.c.consistencygroup_id.type,
self.VARCHAR_TYPE)
# Test cgsnapshot_id is in Table snapshots
snapshots = self.get_table_ref(engine, 'snapshots', metadata)
self.assertIsInstance(snapshots.c.cgsnapshot_id.type,
self.VARCHAR_TYPE)
# Test Table consistencygroups exists
self.assertTrue(engine.dialect.has_table(engine.connect(),
"consistencygroups"))
consistencygroups = self.get_table_ref(engine,
'consistencygroups',
metadata)
self.assertIsInstance(consistencygroups.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(consistencygroups.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(consistencygroups.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(consistencygroups.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(consistencygroups.c.id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(consistencygroups.c.user_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(consistencygroups.c.project_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(consistencygroups.c.host.type,
self.VARCHAR_TYPE)
self.assertIsInstance(consistencygroups.c.availability_zone.type,
self.VARCHAR_TYPE)
self.assertIsInstance(consistencygroups.c.name.type,
self.VARCHAR_TYPE)
self.assertIsInstance(consistencygroups.c.description.type,
self.VARCHAR_TYPE)
self.assertIsInstance(consistencygroups.c.volume_type_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(consistencygroups.c.status.type,
self.VARCHAR_TYPE)
# Test Table cgsnapshots exists
self.assertTrue(engine.dialect.has_table(engine.connect(),
"cgsnapshots"))
cgsnapshots = self.get_table_ref(engine,
'cgsnapshots',
metadata)
self.assertIsInstance(cgsnapshots.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(cgsnapshots.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(cgsnapshots.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(cgsnapshots.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(cgsnapshots.c.id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(cgsnapshots.c.user_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(cgsnapshots.c.project_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(cgsnapshots.c.consistencygroup_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(cgsnapshots.c.name.type,
self.VARCHAR_TYPE)
self.assertIsInstance(cgsnapshots.c.description.type,
self.VARCHAR_TYPE)
self.assertIsInstance(cgsnapshots.c.status.type,
self.VARCHAR_TYPE)
# Verify foreign keys are created
fkey, = volumes.c.consistencygroup_id.foreign_keys
self.assertEqual(consistencygroups.c.id, fkey.column)
self.assertEqual(1, len(volumes.foreign_keys))
fkey, = snapshots.c.cgsnapshot_id.foreign_keys
self.assertEqual(cgsnapshots.c.id, fkey.column)
fkey, = snapshots.c.volume_id.foreign_keys
self.assertEqual(volumes.c.id, fkey.column)
# 2 foreign keys in Table snapshots
self.assertEqual(2, len(snapshots.foreign_keys))
def _pre_upgrade_026(self, engine):
"""Test adding default data for consistencygroups quota class."""
quota_class_metadata = db_utils.get_table(engine, 'quota_classes')
num_defaults = quota_class_metadata.count().\
where(quota_class_metadata.c.class_name == 'default').\
execute().scalar()
self.assertEqual(3, num_defaults)
def _check_026(self, engine, data):
quota_class_metadata = db_utils.get_table(engine, 'quota_classes')
num_defaults = quota_class_metadata.count().\
where(quota_class_metadata.c.class_name == 'default').\
execute().scalar()
self.assertEqual(4, num_defaults)
def _check_032(self, engine, data):
"""Test adding volume_type_projects table works correctly."""
volume_type_projects = db_utils.get_table(engine,
'volume_type_projects')
self.assertIsInstance(volume_type_projects.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(volume_type_projects.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(volume_type_projects.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(volume_type_projects.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(volume_type_projects.c.id.type,
self.INTEGER_TYPE)
self.assertIsInstance(volume_type_projects.c.volume_type_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(volume_type_projects.c.project_id.type,
self.VARCHAR_TYPE)
volume_types = db_utils.get_table(engine, 'volume_types')
self.assertIsInstance(volume_types.c.is_public.type,
self.BOOL_TYPE)
def _check_033(self, engine, data):
"""Test adding encryption_id column to encryption table."""
encryptions = db_utils.get_table(engine, 'encryption')
self.assertIsInstance(encryptions.c.encryption_id.type,
self.VARCHAR_TYPE)
def _check_034(self, engine, data):
"""Test adding description columns to volume_types table."""
volume_types = db_utils.get_table(engine, 'volume_types')
self.assertIsInstance(volume_types.c.description.type,
self.VARCHAR_TYPE)
def _check_035(self, engine, data):
volumes = db_utils.get_table(engine, 'volumes')
self.assertIsInstance(volumes.c.provider_id.type,
self.VARCHAR_TYPE)
def _check_036(self, engine, data):
snapshots = db_utils.get_table(engine, 'snapshots')
self.assertIsInstance(snapshots.c.provider_id.type,
self.VARCHAR_TYPE)
def _check_037(self, engine, data):
consistencygroups = db_utils.get_table(engine, 'consistencygroups')
self.assertIsInstance(consistencygroups.c.cgsnapshot_id.type,
self.VARCHAR_TYPE)
def _check_038(self, engine, data):
"""Test adding and removing driver_initiator_data table."""
has_table = engine.dialect.has_table(engine.connect(),
"driver_initiator_data")
self.assertTrue(has_table)
private_data = db_utils.get_table(
engine,
'driver_initiator_data'
)
self.assertIsInstance(private_data.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(private_data.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(private_data.c.id.type,
self.INTEGER_TYPE)
self.assertIsInstance(private_data.c.initiator.type,
self.VARCHAR_TYPE)
self.assertIsInstance(private_data.c.namespace.type,
self.VARCHAR_TYPE)
self.assertIsInstance(private_data.c.key.type,
self.VARCHAR_TYPE)
self.assertIsInstance(private_data.c.value.type,
self.VARCHAR_TYPE)
def _check_039(self, engine, data):
backups = db_utils.get_table(engine, 'backups')
self.assertIsInstance(backups.c.parent_id.type,
self.VARCHAR_TYPE)
def _check_040(self, engine, data):
volumes = db_utils.get_table(engine, 'volumes')
self.assertNotIn('instance_uuid', volumes.c)
self.assertNotIn('attached_host', volumes.c)
self.assertNotIn('attach_time', volumes.c)
self.assertNotIn('mountpoint', volumes.c)
self.assertIsInstance(volumes.c.multiattach.type,
self.BOOL_TYPE)
attachments = db_utils.get_table(engine, 'volume_attachment')
self.assertIsInstance(attachments.c.attach_mode.type,
self.VARCHAR_TYPE)
self.assertIsInstance(attachments.c.instance_uuid.type,
self.VARCHAR_TYPE)
self.assertIsInstance(attachments.c.attached_host.type,
self.VARCHAR_TYPE)
self.assertIsInstance(attachments.c.mountpoint.type,
self.VARCHAR_TYPE)
self.assertIsInstance(attachments.c.attach_status.type,
self.VARCHAR_TYPE)
def _check_041(self, engine, data):
"""Test that adding modified_at column works correctly."""
services = db_utils.get_table(engine, 'services')
self.assertIsInstance(services.c.modified_at.type,
self.TIME_TYPE)
def _check_048(self, engine, data):
quotas = db_utils.get_table(engine, 'quotas')
self.assertIsInstance(quotas.c.allocated.type,
self.INTEGER_TYPE)
def _check_049(self, engine, data):
backups = db_utils.get_table(engine, 'backups')
self.assertIsInstance(backups.c.temp_volume_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.temp_snapshot_id.type,
self.VARCHAR_TYPE)
def _check_050(self, engine, data):
volumes = db_utils.get_table(engine, 'volumes')
self.assertIsInstance(volumes.c.previous_status.type,
self.VARCHAR_TYPE)
def _check_051(self, engine, data):
consistencygroups = db_utils.get_table(engine, 'consistencygroups')
self.assertIsInstance(consistencygroups.c.source_cgid.type,
self.VARCHAR_TYPE)
def _check_052(self, engine, data):
snapshots = db_utils.get_table(engine, 'snapshots')
self.assertIsInstance(snapshots.c.provider_auth.type,
self.VARCHAR_TYPE)
def _check_053(self, engine, data):
services = db_utils.get_table(engine, 'services')
self.assertIsInstance(services.c.rpc_current_version.type,
self.VARCHAR_TYPE)
self.assertIsInstance(services.c.rpc_available_version.type,
self.VARCHAR_TYPE)
self.assertIsInstance(services.c.object_current_version.type,
self.VARCHAR_TYPE)
self.assertIsInstance(services.c.object_available_version.type,
self.VARCHAR_TYPE)
def _check_054(self, engine, data):
backups = db_utils.get_table(engine, 'backups')
self.assertIsInstance(backups.c.num_dependent_backups.type,
self.INTEGER_TYPE)
def _check_055(self, engine, data):
"""Test adding image_volume_cache_entries table."""
has_table = engine.dialect.has_table(engine.connect(),
"image_volume_cache_entries")
self.assertTrue(has_table)
private_data = db_utils.get_table(
engine,
'image_volume_cache_entries'
)
self.assertIsInstance(private_data.c.id.type,
self.INTEGER_TYPE)
self.assertIsInstance(private_data.c.host.type,
self.VARCHAR_TYPE)
self.assertIsInstance(private_data.c.image_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(private_data.c.image_updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(private_data.c.volume_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(private_data.c.size.type,
self.INTEGER_TYPE)
self.assertIsInstance(private_data.c.last_used.type,
self.TIME_TYPE)
def _check_061(self, engine, data):
backups = db_utils.get_table(engine, 'backups')
self.assertIsInstance(backups.c.snapshot_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.data_timestamp.type,
self.TIME_TYPE)
def _check_062(self, engine, data):
volume_type_projects = db_utils.get_table(engine,
'volume_type_projects')
self.assertIsInstance(volume_type_projects.c.id.type,
self.INTEGER_TYPE)
def _check_064(self, engine, data):
backups = db_utils.get_table(engine, 'backups')
self.assertIsInstance(backups.c.restore_volume_id.type,
self.VARCHAR_TYPE)
def _check_065(self, engine, data):
services = db_utils.get_table(engine, 'services')
self.assertIsInstance(services.c.replication_status.type,
self.VARCHAR_TYPE)
self.assertIsInstance(services.c.frozen.type,
self.BOOL_TYPE)
self.assertIsInstance(services.c.active_backend_id.type,
self.VARCHAR_TYPE)
def _check_066(self, engine, data):
reservations = db_utils.get_table(engine, 'reservations')
self.assertIsInstance(reservations.c.allocated_id.type,
self.INTEGER_TYPE)
def __check_cinderbase_fields(self, columns):
"""Check fields inherited from CinderBase ORM class."""
self.assertIsInstance(columns.created_at.type, self.TIME_TYPE)
@ -824,236 +120,6 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
self.assertIsInstance(columns.deleted_at.type, self.TIME_TYPE)
self.assertIsInstance(columns.deleted.type, self.BOOL_TYPE)
def _check_067(self, engine, data):
iscsi_targets = db_utils.get_table(engine, 'iscsi_targets')
fkey, = iscsi_targets.c.volume_id.foreign_keys
self.assertIsNotNone(fkey)
def _check_074(self, engine, data):
"""Test adding message table."""
self.assertTrue(engine.dialect.has_table(engine.connect(),
"messages"))
messages = db_utils.get_table(engine, 'messages')
self.assertIsInstance(messages.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(messages.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(messages.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(messages.c.message_level.type,
self.VARCHAR_TYPE)
self.assertIsInstance(messages.c.project_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(messages.c.id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(messages.c.request_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(messages.c.resource_uuid.type,
self.VARCHAR_TYPE)
self.assertIsInstance(messages.c.event_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(messages.c.resource_type.type,
self.VARCHAR_TYPE)
def _check_075(self, engine, data):
"""Test adding cluster table and cluster_id fields."""
self.assertTrue(engine.dialect.has_table(engine.connect(), 'clusters'))
clusters = db_utils.get_table(engine, 'clusters')
columns = clusters.c
self.__check_cinderbase_fields(columns)
# Cluster specific fields
self.assertIsInstance(columns.id.type, self.INTEGER_TYPE)
self.assertIsInstance(columns.name.type, self.VARCHAR_TYPE)
self.assertIsInstance(columns.binary.type, self.VARCHAR_TYPE)
self.assertIsInstance(columns.disabled.type, self.BOOL_TYPE)
self.assertIsInstance(columns.disabled_reason.type, self.VARCHAR_TYPE)
# Check that we have added cluster_name field to all required tables
for table_name in ('services', 'consistencygroups', 'volumes'):
table = db_utils.get_table(engine, table_name)
self.assertIsInstance(table.c.cluster_name.type,
self.VARCHAR_TYPE)
def _check_076(self, engine, data):
workers = db_utils.get_table(engine, 'workers')
columns = workers.c
self.__check_cinderbase_fields(columns)
# Workers specific fields
self.assertIsInstance(columns.id.type, self.INTEGER_TYPE)
self.assertIsInstance(columns.resource_type.type, self.VARCHAR_TYPE)
self.assertIsInstance(columns.resource_id.type, self.VARCHAR_TYPE)
self.assertIsInstance(columns.status.type, self.VARCHAR_TYPE)
self.assertIsInstance(columns.service_id.type, self.INTEGER_TYPE)
def _check_077(self, engine, data):
"""Test adding group types and specs tables."""
self.assertTrue(engine.dialect.has_table(engine.connect(),
"group_types"))
group_types = db_utils.get_table(engine, 'group_types')
self.assertIsInstance(group_types.c.id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_types.c.name.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_types.c.description.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_types.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(group_types.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(group_types.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(group_types.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(group_types.c.is_public.type,
self.BOOL_TYPE)
self.assertTrue(engine.dialect.has_table(engine.connect(),
"group_type_specs"))
group_specs = db_utils.get_table(engine, 'group_type_specs')
self.assertIsInstance(group_specs.c.id.type,
self.INTEGER_TYPE)
self.assertIsInstance(group_specs.c.key.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_specs.c.value.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_specs.c.group_type_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_specs.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(group_specs.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(group_specs.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(group_specs.c.deleted.type,
self.BOOL_TYPE)
self.assertTrue(engine.dialect.has_table(engine.connect(),
"group_type_projects"))
type_projects = db_utils.get_table(engine, 'group_type_projects')
self.assertIsInstance(type_projects.c.id.type,
self.INTEGER_TYPE)
self.assertIsInstance(type_projects.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(type_projects.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(type_projects.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(type_projects.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(type_projects.c.group_type_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(type_projects.c.project_id.type,
self.VARCHAR_TYPE)
def _check_078(self, engine, data):
"""Test adding groups tables."""
self.assertTrue(engine.dialect.has_table(engine.connect(),
"groups"))
groups = db_utils.get_table(engine, 'groups')
self.assertIsInstance(groups.c.id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(groups.c.name.type,
self.VARCHAR_TYPE)
self.assertIsInstance(groups.c.description.type,
self.VARCHAR_TYPE)
self.assertIsInstance(groups.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(groups.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(groups.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(groups.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(groups.c.user_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(groups.c.project_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(groups.c.host.type,
self.VARCHAR_TYPE)
self.assertIsInstance(groups.c.availability_zone.type,
self.VARCHAR_TYPE)
self.assertIsInstance(groups.c.group_type_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(groups.c.status.type,
self.VARCHAR_TYPE)
self.assertTrue(engine.dialect.has_table(engine.connect(),
"group_volume_type_mapping"))
mapping = db_utils.get_table(engine, 'group_volume_type_mapping')
self.assertIsInstance(mapping.c.id.type,
self.INTEGER_TYPE)
self.assertIsInstance(mapping.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(mapping.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(mapping.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(mapping.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(mapping.c.volume_type_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(mapping.c.group_id.type,
self.VARCHAR_TYPE)
volumes = db_utils.get_table(engine, 'volumes')
self.assertIsInstance(volumes.c.group_id.type,
self.VARCHAR_TYPE)
quota_classes = db_utils.get_table(engine, 'quota_classes')
rows = select([func.count()]).select_from(
quota_classes).where(quota_classes.c.resource == 'groups').\
execute().scalar()
self.assertEqual(1, rows)
def _check_079(self, engine, data):
"""Test adding group_snapshots tables."""
self.assertTrue(engine.dialect.has_table(engine.connect(),
"group_snapshots"))
group_snapshots = db_utils.get_table(engine, 'group_snapshots')
self.assertIsInstance(group_snapshots.c.id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_snapshots.c.name.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_snapshots.c.description.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_snapshots.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(group_snapshots.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(group_snapshots.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(group_snapshots.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(group_snapshots.c.user_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_snapshots.c.project_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_snapshots.c.group_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_snapshots.c.group_type_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_snapshots.c.status.type,
self.VARCHAR_TYPE)
snapshots = db_utils.get_table(engine, 'snapshots')
self.assertIsInstance(snapshots.c.group_snapshot_id.type,
self.VARCHAR_TYPE)
groups = db_utils.get_table(engine, 'groups')
self.assertIsInstance(groups.c.group_snapshot_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(groups.c.source_group_id.type,
self.VARCHAR_TYPE)
def _check_086(self, engine, data):
"""Test inserting default cgsnapshot group type."""
self.assertTrue(engine.dialect.has_table(engine.connect(),

View File

@ -0,0 +1,6 @@
---
upgrade:
- |
The Cinder database can now only be ugpraded from changes since the Newton
release. In order to upgrade from a version prior to that, you must now
upgrade to at least Newton first, then to Queens or later.