squash migrations - kilo

only support db schema upgrades from kilo onwards

a few notes for reviewers...

* 052 and 063 were not ported over since they negated each other
* 066 was not ported over since it just changed existing data
* 067 was not ported over since it this was clean up from 062
* removed the downgrade block from 067

Change-Id: I07539920eed15290b6036906e34805a0f175a07a
Closes-Bug: 1541092
This commit is contained in:
Steve Martinelli 2016-02-04 02:24:12 -05:00
parent 6a4926b863
commit f5c64718a1
27 changed files with 116 additions and 946 deletions

View File

@ -13,5 +13,5 @@
# License for the specific language governing permissions and limitations
# under the License.
DB_INIT_VERSION = 43
# NOTE(stevemar): This has to be one less than the current lowest db migration
DB_INIT_VERSION = 66

View File

@ -1,21 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This is a placeholder for Icehouse backports. Do not use this number for new
# Juno work. New Juno work starts after all the placeholders.
#
# See blueprint reserved-db-migrations-icehouse and the related discussion:
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
def upgrade(migrate_engine):
pass

View File

@ -1,21 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This is a placeholder for Icehouse backports. Do not use this number for new
# Juno work. New Juno work starts after all the placeholders.
#
# See blueprint reserved-db-migrations-icehouse and the related discussion:
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
def upgrade(migrate_engine):
pass

View File

@ -1,21 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This is a placeholder for Icehouse backports. Do not use this number for new
# Juno work. New Juno work starts after all the placeholders.
#
# See blueprint reserved-db-migrations-icehouse and the related discussion:
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
def upgrade(migrate_engine):
pass

View File

@ -1,21 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This is a placeholder for Icehouse backports. Do not use this number for new
# Juno work. New Juno work starts after all the placeholders.
#
# See blueprint reserved-db-migrations-icehouse and the related discussion:
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
def upgrade(migrate_engine):
pass

View File

@ -1,21 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This is a placeholder for Icehouse backports. Do not use this number for new
# Juno work. New Juno work starts after all the placeholders.
#
# See blueprint reserved-db-migrations-icehouse and the related discussion:
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
def upgrade(migrate_engine):
pass

View File

@ -1,43 +0,0 @@
# Copyright 2014 Mirantis.inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sa
def upgrade(migrate_engine):
if migrate_engine.name == 'mysql':
meta = sa.MetaData(bind=migrate_engine)
endpoint = sa.Table('endpoint', meta, autoload=True)
# NOTE(i159): MySQL requires indexes on referencing columns, and those
# indexes create automatically. That those indexes will have different
# names, depending on version of MySQL used. We should make this naming
# consistent, by reverting index name to a consistent condition.
if any(i for i in endpoint.indexes if
list(i.columns.keys()) == ['service_id']
and i.name != 'service_id'):
# NOTE(i159): by this action will be made re-creation of an index
# with the new name. This can be considered as renaming under the
# MySQL rules.
sa.Index('service_id', endpoint.c.service_id).create()
user_group_membership = sa.Table('user_group_membership',
meta, autoload=True)
if any(i for i in user_group_membership.indexes if
list(i.columns.keys()) == ['group_id']
and i.name != 'group_id'):
sa.Index('group_id', user_group_membership.c.group_id).create()

View File

@ -1,41 +0,0 @@
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sql
from keystone.identity.mapping_backends import mapping
MAPPING_TABLE = 'id_mapping'
def upgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
mapping_table = sql.Table(
MAPPING_TABLE,
meta,
sql.Column('public_id', sql.String(64), primary_key=True),
sql.Column('domain_id', sql.String(64), nullable=False),
sql.Column('local_id', sql.String(64), nullable=False),
sql.Column('entity_type', sql.Enum(
mapping.EntityType.USER,
mapping.EntityType.GROUP,
name='entity_type'),
nullable=False),
sql.UniqueConstraint('domain_id', 'local_id', 'entity_type'),
mysql_engine='InnoDB',
mysql_charset='utf8')
mapping_table.create(migrate_engine, checkfirst=True)

View File

@ -1,27 +0,0 @@
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sql
_REGION_TABLE_NAME = 'region'
def upgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
region_table = sql.Table(_REGION_TABLE_NAME, meta, autoload=True)
url_column = sql.Column('url', sql.String(255), nullable=True)
region_table.create_column(url_column)

View File

@ -1,90 +0,0 @@
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Migrated the endpoint 'region' column to 'region_id.
In addition to the rename, the new column is made a foreign key to the
respective 'region' in the region table, ensuring that we auto-create
any regions that are missing. Further, since the old region column
was 255 chars, and the id column in the region table is 64 chars, the size
of the id column in the region table is increased to match.
To Upgrade:
Region Table
Increase the size of the if column in the region table
Endpoint Table
a. Add the endpoint region_id column, that is a foreign key to the region table
b. For each endpoint
i. Ensure there is matching region in region table, and if not, create it
ii. Assign the id to the region_id column
c. Remove the column region
"""
import migrate
import sqlalchemy as sql
from sqlalchemy.orm import sessionmaker
def _migrate_to_region_id(migrate_engine, region_table, endpoint_table):
endpoints = list(endpoint_table.select().execute())
for endpoint in endpoints:
if endpoint.region is None:
continue
region = list(region_table.select(
whereclause=region_table.c.id == endpoint.region).execute())
if len(region) == 1:
region_id = region[0].id
else:
region_id = endpoint.region
region = {'id': region_id,
'description': '',
'extra': '{}'}
session = sessionmaker(bind=migrate_engine)()
region_table.insert(region).execute()
session.commit()
new_values = {'region_id': region_id}
f = endpoint_table.c.id == endpoint.id
update = endpoint_table.update().where(f).values(new_values)
migrate_engine.execute(update)
migrate.ForeignKeyConstraint(
columns=[endpoint_table.c.region_id],
refcolumns=[region_table.c.id],
name='fk_endpoint_region_id').create()
def upgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
region_table = sql.Table('region', meta, autoload=True)
region_table.c.id.alter(type=sql.String(length=255))
region_table.c.parent_region_id.alter(type=sql.String(length=255))
endpoint_table = sql.Table('endpoint', meta, autoload=True)
region_id_column = sql.Column('region_id',
sql.String(length=255), nullable=True)
region_id_column.create(endpoint_table)
_migrate_to_region_id(migrate_engine, region_table, endpoint_table)
endpoint_table.c.region.drop()

View File

@ -1,27 +0,0 @@
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sql
ASSIGNMENT_TABLE = 'assignment'
def upgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
assignment = sql.Table(ASSIGNMENT_TABLE, meta, autoload=True)
idx = sql.Index('ix_actor_id', assignment.c.actor_id)
idx.create(migrate_engine)

View File

@ -1,25 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Add indexes to `user_id` and `trust_id` columns for the `token` table."""
import sqlalchemy as sql
def upgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
token = sql.Table('token', meta, autoload=True)
sql.Index('ix_token_user_id', token.c.user_id).create()
sql.Index('ix_token_trust_id', token.c.trust_id).create()

View File

@ -1,18 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This is a placeholder for Juno backports. Do not use this number for new
# Kilo work. New Kilo work starts after all the placeholders.
def upgrade(migrate_engine):
pass

View File

@ -1,18 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This is a placeholder for Juno backports. Do not use this number for new
# Kilo work. New Kilo work starts after all the placeholders.
def upgrade(migrate_engine):
pass

View File

@ -1,18 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This is a placeholder for Juno backports. Do not use this number for new
# Kilo work. New Kilo work starts after all the placeholders.
def upgrade(migrate_engine):
pass

View File

@ -1,18 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This is a placeholder for Juno backports. Do not use this number for new
# Kilo work. New Kilo work starts after all the placeholders.
def upgrade(migrate_engine):
pass

View File

@ -1,18 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This is a placeholder for Juno backports. Do not use this number for new
# Kilo work. New Kilo work starts after all the placeholders.
def upgrade(migrate_engine):
pass

View File

@ -1,41 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sql
from keystone.common.sql import migration_helpers
_PROJECT_TABLE_NAME = 'project'
_PARENT_ID_COLUMN_NAME = 'parent_id'
def list_constraints(project_table):
constraints = [{'table': project_table,
'fk_column': _PARENT_ID_COLUMN_NAME,
'ref_column': project_table.c.id}]
return constraints
def upgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
project_table = sql.Table(_PROJECT_TABLE_NAME, meta, autoload=True)
parent_id = sql.Column(_PARENT_ID_COLUMN_NAME, sql.String(64),
nullable=True)
project_table.create_column(parent_id)
if migrate_engine.name == 'sqlite':
return
migration_helpers.add_constraints(list_constraints(project_table))

View File

@ -1,35 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy
from keystone.common.sql import migration_helpers
def list_constraints(migrate_engine):
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
assignment_table = sqlalchemy.Table('assignment', meta, autoload=True)
role_table = sqlalchemy.Table('role', meta, autoload=True)
constraints = [{'table': assignment_table,
'fk_column': 'role_id',
'ref_column': role_table.c.id}]
return constraints
def upgrade(migrate_engine):
# SQLite does not support constraints, and querying the constraints
# raises an exception
if migrate_engine.name == 'sqlite':
return
migration_helpers.remove_constraints(list_constraints(migrate_engine))

View File

@ -1,24 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sql
_REGION_TABLE_NAME = 'region'
def upgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
region_table = sql.Table(_REGION_TABLE_NAME, meta, autoload=True)
region_table.drop_column('url')

View File

@ -1,39 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy
from keystone.common.sql import migration_helpers
def list_constraints(migrate_engine):
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
user_table = sqlalchemy.Table('user', meta, autoload=True)
group_table = sqlalchemy.Table('group', meta, autoload=True)
domain_table = sqlalchemy.Table('domain', meta, autoload=True)
constraints = [{'table': user_table,
'fk_column': 'domain_id',
'ref_column': domain_table.c.id},
{'table': group_table,
'fk_column': 'domain_id',
'ref_column': domain_table.c.id}]
return constraints
def upgrade(migrate_engine):
# SQLite does not support constraints, and querying the constraints
# raises an exception
if migrate_engine.name == 'sqlite':
return
migration_helpers.remove_constraints(list_constraints(migrate_engine))

View File

@ -1,46 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sql
from keystone.common import sql as ks_sql
WHITELIST_TABLE = 'whitelisted_config'
SENSITIVE_TABLE = 'sensitive_config'
def upgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
whitelist_table = sql.Table(
WHITELIST_TABLE,
meta,
sql.Column('domain_id', sql.String(64), primary_key=True),
sql.Column('group', sql.String(255), primary_key=True),
sql.Column('option', sql.String(255), primary_key=True),
sql.Column('value', ks_sql.JsonBlob.impl, nullable=False),
mysql_engine='InnoDB',
mysql_charset='utf8')
whitelist_table.create(migrate_engine, checkfirst=True)
sensitive_table = sql.Table(
SENSITIVE_TABLE,
meta,
sql.Column('domain_id', sql.String(64), primary_key=True),
sql.Column('group', sql.String(255), primary_key=True),
sql.Column('option', sql.String(255), primary_key=True),
sql.Column('value', ks_sql.JsonBlob.impl, nullable=False),
mysql_engine='InnoDB',
mysql_charset='utf8')
sensitive_table.create(migrate_engine, checkfirst=True)

View File

@ -1,40 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
import sqlalchemy as sql
def upgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
service_table = sql.Table('service', meta, autoload=True)
services = list(service_table.select().execute())
for service in services:
if service.extra is not None:
extra_dict = jsonutils.loads(service.extra)
else:
extra_dict = {}
# Skip records where service is not null
if extra_dict.get('name') is not None:
continue
# Default the name to empty string
extra_dict['name'] = ''
new_values = {
'extra': jsonutils.dumps(extra_dict),
}
f = service_table.c.id == service.id
update = service_table.update().where(f).values(new_values)
migrate_engine.execute(update)

View File

@ -1,25 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy
def upgrade(migrate_engine):
# NOTE(viktors): Migration 062 removed FK from `assignment` table, but
# MySQL silently creates indexes on FK constraints, so we should remove
# this index manually.
if migrate_engine.name == 'mysql':
meta = sqlalchemy.MetaData(bind=migrate_engine)
table = sqlalchemy.Table('assignment', meta, autoload=True)
for index in table.indexes:
if [c.name for c in index.columns] == ['role_id']:
index.drop(migrate_engine)

View File

@ -20,6 +20,7 @@ from sqlalchemy import orm
from keystone.assignment.backends import sql as assignment_sql
from keystone.common import sql as ks_sql
from keystone.common.sql import migration_helpers
from keystone.identity.mapping_backends import mapping as mapping_backend
LOG = log.getLogger(__name__)
@ -64,12 +65,12 @@ def upgrade(migrate_engine):
sql.Column('id', sql.String(length=64), primary_key=True),
sql.Column('legacy_endpoint_id', sql.String(length=64)),
sql.Column('interface', sql.String(length=8), nullable=False),
sql.Column('region', sql.String(length=255)),
sql.Column('service_id', sql.String(length=64), nullable=False),
sql.Column('url', sql.Text, nullable=False),
sql.Column('extra', ks_sql.JsonBlob.impl),
sql.Column('enabled', sql.Boolean, nullable=False, default=True,
server_default='1'),
sql.Column('region_id', sql.String(length=255), nullable=True),
mysql_engine='InnoDB',
mysql_charset='utf8')
@ -100,6 +101,7 @@ def upgrade(migrate_engine):
sql.Column('description', sql.Text),
sql.Column('enabled', sql.Boolean),
sql.Column('domain_id', sql.String(length=64), nullable=False),
sql.Column('parent_id', sql.String(64), nullable=True),
mysql_engine='InnoDB',
mysql_charset='utf8')
@ -177,9 +179,9 @@ def upgrade(migrate_engine):
region = sql.Table(
'region',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('id', sql.String(255), primary_key=True),
sql.Column('description', sql.String(255), nullable=False),
sql.Column('parent_region_id', sql.String(64), nullable=True),
sql.Column('parent_region_id', sql.String(255), nullable=True),
sql.Column('extra', sql.Text()),
mysql_engine='InnoDB',
mysql_charset='utf8')
@ -202,11 +204,45 @@ def upgrade(migrate_engine):
mysql_engine='InnoDB',
mysql_charset='utf8')
mapping = sql.Table(
'id_mapping',
meta,
sql.Column('public_id', sql.String(64), primary_key=True),
sql.Column('domain_id', sql.String(64), nullable=False),
sql.Column('local_id', sql.String(64), nullable=False),
sql.Column('entity_type', sql.Enum(
mapping_backend.EntityType.USER,
mapping_backend.EntityType.GROUP,
name='entity_type'),
nullable=False),
mysql_engine='InnoDB',
mysql_charset='utf8')
domain_config_whitelist = sql.Table(
'whitelisted_config',
meta,
sql.Column('domain_id', sql.String(64), primary_key=True),
sql.Column('group', sql.String(255), primary_key=True),
sql.Column('option', sql.String(255), primary_key=True),
sql.Column('value', ks_sql.JsonBlob.impl, nullable=False),
mysql_engine='InnoDB',
mysql_charset='utf8')
domain_config_sensitive = sql.Table(
'sensitive_config',
meta,
sql.Column('domain_id', sql.String(64), primary_key=True),
sql.Column('group', sql.String(255), primary_key=True),
sql.Column('option', sql.String(255), primary_key=True),
sql.Column('value', ks_sql.JsonBlob.impl, nullable=False),
mysql_engine='InnoDB',
mysql_charset='utf8')
# create all tables
tables = [credential, domain, endpoint, group,
policy, project, role, service,
token, trust, trust_role, user,
user_group_membership, region, assignment]
tables = [credential, domain, endpoint, group, policy, project, role,
service, token, trust, trust_role, user, user_group_membership,
region, assignment, mapping, domain_config_whitelist,
domain_config_sensitive]
for table in tables:
try:
@ -229,11 +265,22 @@ def upgrade(migrate_engine):
name='ixu_project_name_domain_id').create()
migrate.UniqueConstraint(domain.c.name,
name='ixu_domain_name').create()
migrate.UniqueConstraint(mapping.c.domain_id,
mapping.c.local_id,
mapping.c.entity_type,
name='domain_id').create()
# Indexes
sql.Index('ix_token_expires', token.c.expires).create()
sql.Index('ix_token_expires_valid', token.c.expires,
token.c.valid).create()
sql.Index('ix_actor_id', assignment.c.actor_id).create()
sql.Index('ix_token_user_id', token.c.user_id).create()
sql.Index('ix_token_trust_id', token.c.trust_id).create()
# NOTE(stevemar): The two indexes below were named 'service_id' and
# 'group_id' in 050_fk_consistent_indexes.py, and need to be preserved
sql.Index('service_id', endpoint.c.service_id).create()
sql.Index('group_id', user_group_membership.c.group_id).create()
fkeys = [
{'columns': [endpoint.c.service_id],
@ -247,22 +294,27 @@ def upgrade(migrate_engine):
'references':[user.c.id],
'name': 'fk_user_group_membership_user_id'},
{'columns': [user.c.domain_id],
'references': [domain.c.id],
'name': 'fk_user_domain_id'},
{'columns': [group.c.domain_id],
'references': [domain.c.id],
'name': 'fk_group_domain_id'},
{'columns': [project.c.domain_id],
'references': [domain.c.id],
'name': 'fk_project_domain_id'},
{'columns': [assignment.c.role_id],
'references': [role.c.id]}
{'columns': [endpoint.c.region_id],
'references': [region.c.id],
'name': 'fk_endpoint_region_id'},
{'columns': [project.c.parent_id],
'references': [project.c.id],
'name': 'project_parent_id_fkey'},
]
if migrate_engine.name == 'sqlite':
# NOTE(stevemar): We need to keep this FK constraint due to 073, but
# only for sqlite, once we collapse 073 we can remove this constraint
fkeys.append(
{'columns': [assignment.c.role_id],
'references': [role.c.id],
'name': 'fk_assignment_role_id'})
for fkey in fkeys:
migrate.ForeignKeyConstraint(columns=fkey['columns'],
refcolumns=fkey['references'],
@ -272,8 +324,3 @@ def upgrade(migrate_engine):
session = orm.sessionmaker(bind=migrate_engine)()
domain.insert(migration_helpers.get_default_domain()).execute()
session.commit()
def downgrade(migrate_engine):
raise NotImplementedError('Downgrade to pre-Icehouse release db schema is '
'unsupported.')

View File

@ -66,8 +66,8 @@ INITIAL_TABLE_STRUCTURE = {
'id', 'name', 'enabled', 'extra',
],
'endpoint': [
'id', 'legacy_endpoint_id', 'interface', 'region', 'service_id', 'url',
'enabled', 'extra',
'id', 'legacy_endpoint_id', 'interface', 'region_id', 'service_id',
'url', 'enabled', 'extra',
],
'group': [
'id', 'domain_id', 'name', 'description', 'extra',
@ -77,6 +77,7 @@ INITIAL_TABLE_STRUCTURE = {
],
'project': [
'id', 'name', 'extra', 'description', 'enabled', 'domain_id',
'parent_id',
],
'role': [
'id', 'name', 'extra',
@ -107,6 +108,15 @@ INITIAL_TABLE_STRUCTURE = {
'assignment': [
'type', 'actor_id', 'target_id', 'role_id', 'inherited',
],
'id_mapping': [
'public_id', 'domain_id', 'local_id', 'entity_type',
],
'whitelisted_config': [
'domain_id', 'group', 'option', 'value',
],
'sensitive_config': [
'domain_id', 'group', 'option', 'value',
],
}
@ -298,130 +308,44 @@ class SqlUpgradeTests(SqlMigrateBase):
session.execute(insert)
session.commit()
def test_id_mapping(self):
self.upgrade(50)
self.assertTableDoesNotExist('id_mapping')
self.upgrade(51)
self.assertTableExists('id_mapping')
def test_kilo_squash(self):
self.upgrade(67)
def test_region_url_upgrade(self):
self.upgrade(52)
self.assertTableColumns('region',
['id', 'description', 'parent_region_id',
'extra', 'url'])
# In 053 the size of ID and parent region ID columns were changed
table = sqlalchemy.Table('region', self.metadata, autoload=True)
self.assertEqual(255, table.c.id.type.length)
self.assertEqual(255, table.c.parent_region_id.type.length)
table = sqlalchemy.Table('endpoint', self.metadata, autoload=True)
self.assertEqual(255, table.c.region_id.type.length)
def test_endpoint_region_upgrade_columns(self):
self.upgrade(53)
self.assertTableColumns('endpoint',
['id', 'legacy_endpoint_id', 'interface',
'service_id', 'url', 'extra', 'enabled',
'region_id'])
region_table = sqlalchemy.Table('region', self.metadata, autoload=True)
self.assertEqual(255, region_table.c.id.type.length)
self.assertEqual(255, region_table.c.parent_region_id.type.length)
endpoint_table = sqlalchemy.Table('endpoint',
self.metadata,
autoload=True)
self.assertEqual(255, endpoint_table.c.region_id.type.length)
def test_endpoint_region_migration(self):
self.upgrade(52)
session = self.Session()
_small_region_name = '0' * 30
_long_region_name = '0' * 255
_clashing_region_name = '0' * 70
def add_service():
service_id = uuid.uuid4().hex
# Older style service ref, must create by hand
service = {
'id': service_id,
'type': uuid.uuid4().hex
}
self.insert_dict(session, 'service', service)
return service_id
def add_endpoint(service_id, region):
endpoint_id = uuid.uuid4().hex
# Can't use new_endpoint_ref to make the older style endpoint
# so make it by hand.
endpoint = {
'id': endpoint_id,
'interface': uuid.uuid4().hex[:8],
'service_id': service_id,
'url': uuid.uuid4().hex,
'region': region
}
self.insert_dict(session, 'endpoint', endpoint)
return endpoint_id
_service_id_ = add_service()
add_endpoint(_service_id_, region=_long_region_name)
add_endpoint(_service_id_, region=_long_region_name)
add_endpoint(_service_id_, region=_clashing_region_name)
add_endpoint(_service_id_, region=_small_region_name)
add_endpoint(_service_id_, region=None)
# upgrade to 53
session.close()
self.upgrade(53)
session = self.Session()
self.metadata.clear()
region_table = sqlalchemy.Table('region', self.metadata, autoload=True)
self.assertEqual(1, session.query(region_table).
filter_by(id=_long_region_name).count())
self.assertEqual(1, session.query(region_table).
filter_by(id=_clashing_region_name).count())
self.assertEqual(1, session.query(region_table).
filter_by(id=_small_region_name).count())
endpoint_table = sqlalchemy.Table('endpoint',
self.metadata,
autoload=True)
self.assertEqual(5, session.query(endpoint_table).count())
self.assertEqual(2, session.query(endpoint_table).
filter_by(region_id=_long_region_name).count())
self.assertEqual(1, session.query(endpoint_table).
filter_by(region_id=_clashing_region_name).count())
self.assertEqual(1, session.query(endpoint_table).
filter_by(region_id=_small_region_name).count())
def test_add_actor_id_index(self):
self.upgrade(53)
self.upgrade(54)
# In 054 an index was created for the actor_id of the assignment table
table = sqlalchemy.Table('assignment', self.metadata, autoload=True)
index_data = [(idx.name, list(idx.columns.keys()))
for idx in table.indexes]
self.assertIn(('ix_actor_id', ['actor_id']), index_data)
def test_token_user_id_and_trust_id_index_upgrade(self):
self.upgrade(54)
self.upgrade(55)
# In 055 indexes were created for user and trust IDs in the token table
table = sqlalchemy.Table('token', self.metadata, autoload=True)
index_data = [(idx.name, list(idx.columns.keys()))
for idx in table.indexes]
self.assertIn(('ix_token_user_id', ['user_id']), index_data)
self.assertIn(('ix_token_trust_id', ['trust_id']), index_data)
def test_project_parent_id_upgrade(self):
self.upgrade(61)
self.assertTableColumns('project',
['id', 'name', 'extra', 'description',
'enabled', 'domain_id', 'parent_id'])
def test_drop_assignment_role_fk(self):
self.upgrade(61)
self.assertTrue(self.does_fk_exist('assignment', 'role_id'))
self.upgrade(62)
if self.engine.name != 'sqlite':
# SQLite does not support FK deletions (or enforcement)
# In 062 the role ID foreign key was removed from the assignment table
if self.engine.name == "mysql":
self.assertFalse(self.does_fk_exist('assignment', 'role_id'))
# In 064 the domain ID FK was removed from the group and user tables
if self.engine.name != 'sqlite':
# sqlite does not support FK deletions (or enforcement)
self.assertFalse(self.does_fk_exist('group', 'domain_id'))
self.assertFalse(self.does_fk_exist('user', 'domain_id'))
# In 067 the role ID index was removed from the assignment table
if self.engine.name == "mysql":
self.assertFalse(self._does_index_exist('assignment',
'assignment_role_id_fkey'))
def test_insert_assignment_inherited_pk(self):
ASSIGNMENT_TABLE_NAME = 'assignment'
INHERITED_COLUMN_NAME = 'inherited'
@ -502,33 +426,10 @@ class SqlUpgradeTests(SqlMigrateBase):
return True
return False
def test_drop_region_url_upgrade(self):
self.upgrade(63)
self.assertTableColumns('region',
['id', 'description', 'parent_region_id',
'extra'])
def test_domain_fk(self):
self.upgrade(63)
self.assertTrue(self.does_fk_exist('group', 'domain_id'))
self.assertTrue(self.does_fk_exist('user', 'domain_id'))
self.upgrade(64)
if self.engine.name != 'sqlite':
# sqlite does not support FK deletions (or enforcement)
self.assertFalse(self.does_fk_exist('group', 'domain_id'))
self.assertFalse(self.does_fk_exist('user', 'domain_id'))
def test_add_domain_config(self):
whitelisted_table = 'whitelisted_config'
sensitive_table = 'sensitive_config'
self.upgrade(64)
self.assertTableDoesNotExist(whitelisted_table)
self.assertTableDoesNotExist(sensitive_table)
self.upgrade(65)
self.assertTableColumns(whitelisted_table,
['domain_id', 'group', 'option', 'value'])
self.assertTableColumns(sensitive_table,
['domain_id', 'group', 'option', 'value'])
def does_index_exist(self, table_name, index_name):
meta = sqlalchemy.MetaData(bind=self.engine)
table = sqlalchemy.Table(table_name, meta, autoload=True)
return index_name in [idx.name for idx in table.indexes]
def test_endpoint_policy_upgrade(self):
self.assertTableDoesNotExist('policy_association')
@ -679,92 +580,6 @@ class SqlUpgradeTests(SqlMigrateBase):
# that 084 did not create the table.
self.assertTableDoesNotExist('revocation_event')
def test_fixup_service_name_value_upgrade(self):
"""Update service name data from `extra` to empty string."""
def add_service(**extra_data):
service_id = uuid.uuid4().hex
# Older style service ref, must create by hand
service = {
'id': service_id,
'type': uuid.uuid4().hex,
'extra': json.dumps(extra_data),
}
self.insert_dict(session, 'service', service)
return service_id
self.upgrade(65)
session = self.Session()
# Services with extra values having a random attribute and
# different combinations of name
random_attr_name = uuid.uuid4().hex
random_attr_value = uuid.uuid4().hex
random_attr_str = "%s='%s'" % (random_attr_name, random_attr_value)
random_attr_no_name = {random_attr_name: random_attr_value}
random_attr_no_name_str = "%s='%s'" % (random_attr_name,
random_attr_value)
random_attr_name_value = {random_attr_name: random_attr_value,
'name': 'myname'}
random_attr_name_value_str = 'name=myname,%s' % random_attr_str
random_attr_name_empty = {random_attr_name: random_attr_value,
'name': ''}
random_attr_name_empty_str = 'name=,%s' % random_attr_str
random_attr_name_none = {random_attr_name: random_attr_value,
'name': None}
random_attr_name_none_str = 'name=None,%s' % random_attr_str
services = [
(add_service(**random_attr_no_name),
random_attr_name_empty, random_attr_no_name_str),
(add_service(**random_attr_name_value),
random_attr_name_value, random_attr_name_value_str),
(add_service(**random_attr_name_empty),
random_attr_name_empty, random_attr_name_empty_str),
(add_service(**random_attr_name_none),
random_attr_name_empty, random_attr_name_none_str),
]
# NOTE(viktors): Add a service with empty extra field
self.insert_dict(session, 'service',
{'id': uuid.uuid4().hex, 'type': uuid.uuid4().hex})
session.close()
self.upgrade(66)
session = self.Session()
# Verify that the services have the expected values.
self.metadata.clear()
service_table = sqlalchemy.Table('service', self.metadata,
autoload=True)
def fetch_service_extra(service_id):
cols = [service_table.c.extra]
f = service_table.c.id == service_id
s = sqlalchemy.select(cols).where(f)
service = session.execute(s).fetchone()
return json.loads(service.extra)
for service_id, exp_extra, msg in services:
extra = fetch_service_extra(service_id)
self.assertDictEqual(exp_extra, extra, msg)
def _does_index_exist(self, table_name, index_name):
meta = sqlalchemy.MetaData(bind=self.engine)
table = sqlalchemy.Table('assignment', meta, autoload=True)
return index_name in [idx.name for idx in table.indexes]
def test_drop_assignment_role_id_index_mysql(self):
self.upgrade(66)
if self.engine.name == "mysql":
self.assertTrue(self._does_index_exist('assignment',
'assignment_role_id_fkey'))
self.upgrade(67)
if self.engine.name == "mysql":
self.assertFalse(self._does_index_exist('assignment',
'assignment_role_id_fkey'))
def test_project_is_domain_upgrade(self):
self.upgrade(74)
self.assertTableColumns('project',

View File

@ -0,0 +1,6 @@
---
upgrade:
- >
[`bug 1541092 <https://bugs.launchpad.net/keystone/+bug/1541092>`_]
Database schema migrations have been squashed. Only database upgrades from
Kilo and newer are supported.