Migrations squash

SQL Migrations for Kilo require the schema version to be at 044
which is the final verison for the Icehouse release. This limits the
change sets that could affect assumptions made in earlier migration
files. Migrating to before the Icehouse release migration is no longer
supported.

Implements: blueprint kilo-sql-squash

Change-Id: I65c48f1dc336cef14a365e37dc4084a4abc6ee51
This commit is contained in:
Boris Bobrov 2015-03-06 17:26:44 +03:00 committed by Morgan Fainberg
parent 29e0ca84b7
commit 8539ef4f6c
13 changed files with 62 additions and 1905 deletions

View File

@ -14,4 +14,4 @@
# under the License.
DB_INIT_VERSION = 33
DB_INIT_VERSION = 43

View File

@ -1,31 +0,0 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sql
def upgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
token = sql.Table('token', meta, autoload=True)
idx = sql.Index('ix_token_expires_valid', token.c.expires, token.c.valid)
idx.create(migrate_engine)
def downgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
token = sql.Table('token', meta, autoload=True)
idx = sql.Index('ix_token_expires_valid', token.c.expires, token.c.valid)
idx.drop(migrate_engine)

View File

@ -1,31 +0,0 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sql
def upgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
token = sql.Table('token', meta, autoload=True)
idx = sql.Index('ix_token_valid', token.c.valid)
idx.drop(migrate_engine)
def downgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
token = sql.Table('token', meta, autoload=True)
idx = sql.Index('ix_token_valid', token.c.valid)
idx.create(migrate_engine)

View File

@ -1,39 +0,0 @@
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sql
def upgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
region_table = sql.Table(
'region',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('description', sql.String(255), nullable=False),
sql.Column('parent_region_id', sql.String(64), nullable=True),
sql.Column('extra', sql.Text()),
mysql_engine='InnoDB',
mysql_charset='utf8')
region_table.create(migrate_engine, checkfirst=True)
def downgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
region = sql.Table('region', meta, autoload=True)
region.drop(migrate_engine, checkfirst=True)

View File

@ -1,53 +0,0 @@
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sql
from keystone.assignment.backends import sql as assignment_sql
ASSIGNMENT_TABLE = 'assignment'
def upgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
sql.Table('role', meta, autoload=True)
assignment_table = sql.Table(
ASSIGNMENT_TABLE,
meta,
sql.Column('type', sql.Enum(
assignment_sql.AssignmentType.USER_PROJECT,
assignment_sql.AssignmentType.GROUP_PROJECT,
assignment_sql.AssignmentType.USER_DOMAIN,
assignment_sql.AssignmentType.GROUP_DOMAIN,
name='type'),
nullable=False),
sql.Column('actor_id', sql.String(64), nullable=False),
sql.Column('target_id', sql.String(64), nullable=False),
sql.Column('role_id', sql.String(64), sql.ForeignKey('role.id'),
nullable=False),
sql.Column('inherited', sql.Boolean, default=False, nullable=False),
sql.PrimaryKeyConstraint('type', 'actor_id', 'target_id', 'role_id'),
mysql_engine='InnoDB',
mysql_charset='utf8')
assignment_table.create(migrate_engine, checkfirst=True)
def downgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
assignment = sql.Table(ASSIGNMENT_TABLE, meta, autoload=True)
assignment.drop(migrate_engine, checkfirst=True)

View File

@ -1,233 +0,0 @@
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import sqlalchemy as sql
from keystone.assignment.backends import sql as assignment_sql
USER_PROJECT_TABLE = 'user_project_metadata'
GROUP_PROJECT_TABLE = 'group_project_metadata'
USER_DOMAIN_TABLE = 'user_domain_metadata'
GROUP_DOMAIN_TABLE = 'group_domain_metadata'
ASSIGNMENT_TABLE = 'assignment'
GRANT_TABLES = [USER_PROJECT_TABLE, USER_DOMAIN_TABLE,
GROUP_PROJECT_TABLE, GROUP_DOMAIN_TABLE]
def migrate_grant_table(meta, session, table_name):
def extract_actor_and_target(table_name, composite_grant):
if table_name == USER_PROJECT_TABLE:
return {'type': assignment_sql.AssignmentType.USER_PROJECT,
'actor_id': composite_grant.user_id,
'target_id': composite_grant.project_id}
elif table_name == GROUP_PROJECT_TABLE:
return {'type': assignment_sql.AssignmentType.GROUP_PROJECT,
'actor_id': composite_grant.group_id,
'target_id': composite_grant.project_id}
elif table_name == USER_DOMAIN_TABLE:
return {'type': assignment_sql.AssignmentType.USER_DOMAIN,
'actor_id': composite_grant.user_id,
'target_id': composite_grant.domain_id}
else:
return {'type': assignment_sql.AssignmentType.GROUP_DOMAIN,
'actor_id': composite_grant.group_id,
'target_id': composite_grant.domain_id}
def grant_to_grant_dict_list(table_name, composite_grant):
"""Make each role in the list of this entry a separate assignment."""
json_metadata = json.loads(composite_grant.data)
role_dict_list = []
if 'roles' in json_metadata:
for x in json_metadata['roles']:
if x.get('id') is None:
# Looks like an invalid role, drop it
break
grant = extract_actor_and_target(table_name, composite_grant)
grant['role_id'] = x.get('id')
grant['inherited'] = False
if x.get('inherited_to') == 'projects':
grant['inherited'] = True
role_dict_list.append(grant)
return role_dict_list
upgrade_table = sql.Table(table_name, meta, autoload=True)
assignment_table = sql.Table(ASSIGNMENT_TABLE, meta, autoload=True)
# For each grant in this table, expand it out to be an assignment entry for
# each role in the metadata
for grant in session.query(upgrade_table).all():
for grant_role in grant_to_grant_dict_list(table_name, grant):
new_entry = assignment_table.insert().values(
type=grant_role['type'],
actor_id=grant_role['actor_id'],
target_id=grant_role['target_id'],
role_id=grant_role['role_id'],
inherited=grant_role['inherited'])
session.execute(new_entry)
session.commit()
# Delete all the rows
session.execute(upgrade_table.delete())
session.commit()
def downgrade_assignment_table(meta, migrate_engine):
def add_to_dict_list(metadata, assignment_row):
"""Update a metadata dict list with the role.
For the assignment row supplied, we need to append the role_id into
the metadata list of dicts. If the row is inherited, then we mark
it so in the dict we append.
"""
new_entry = {'id': assignment_row.role_id}
if assignment_row.inherited and (
assignment_row.type ==
assignment_sql.AssignmentType.USER_DOMAIN or
assignment_row.type ==
assignment_sql.AssignmentType.GROUP_DOMAIN):
new_entry['inherited_to'] = 'projects'
if metadata is not None:
json_metadata = json.loads(metadata)
else:
json_metadata = {}
if json_metadata.get('roles') is None:
json_metadata['roles'] = []
json_metadata['roles'].append(new_entry)
return json.dumps(json_metadata)
def build_user_project_entry(meta, session, row):
update_table = sql.Table(USER_PROJECT_TABLE, meta, autoload=True)
q = session.query(update_table)
q = q.filter_by(user_id=row.actor_id)
q = q.filter_by(project_id=row.target_id)
ref = q.first()
if ref is not None:
values = {'data': add_to_dict_list(ref.data, row)}
update = update_table.update().where(
update_table.c.user_id == ref.user_id).where(
update_table.c.project_id == ref.project_id).values(values)
else:
values = {'user_id': row.actor_id,
'project_id': row.target_id,
'data': add_to_dict_list(None, row)}
update = update_table.insert().values(values)
return update
def build_group_project_entry(meta, session, row):
update_table = sql.Table(GROUP_PROJECT_TABLE, meta, autoload=True)
q = session.query(update_table)
q = q.filter_by(group_id=row.actor_id)
q = q.filter_by(project_id=row.target_id)
ref = q.first()
if ref is not None:
values = {'data': add_to_dict_list(ref.data, row)}
update = update_table.update().where(
update_table.c.group_id == ref.group_id).where(
update_table.c.project_id == ref.project_id).values(values)
else:
values = {'group_id': row.actor_id,
'project_id': row.target_id,
'data': add_to_dict_list(None, row)}
update = update_table.insert().values(values)
return update
def build_user_domain_entry(meta, session, row):
update_table = sql.Table(USER_DOMAIN_TABLE, meta, autoload=True)
q = session.query(update_table)
q = q.filter_by(user_id=row.actor_id)
q = q.filter_by(domain_id=row.target_id)
ref = q.first()
if ref is not None:
values = {'data': add_to_dict_list(ref.data, row)}
update = update_table.update().where(
update_table.c.user_id == ref.user_id).where(
update_table.c.domain_id == ref.domain_id).values(values)
else:
values = {'user_id': row.actor_id,
'domain_id': row.target_id,
'data': add_to_dict_list(None, row)}
update = update_table.insert().values(values)
return update
def build_group_domain_entry(meta, session, row):
update_table = sql.Table(GROUP_DOMAIN_TABLE, meta, autoload=True)
q = session.query(update_table)
q = q.filter_by(group_id=row.actor_id)
q = q.filter_by(domain_id=row.target_id)
ref = q.first()
if ref is not None:
values = {'data': add_to_dict_list(ref.data, row)}
update = update_table.update().where(
update_table.c.group_id == ref.group_id).where(
update_table.c.domain_id == ref.domain_id).values(values)
else:
values = {'group_id': row.actor_id,
'domain_id': row.target_id,
'data': add_to_dict_list(None, row)}
update = update_table.insert().values(values)
return update
def build_update(meta, session, row):
"""Build an update or an insert to the correct metadata table."""
if row.type == assignment_sql.AssignmentType.USER_PROJECT:
return build_user_project_entry(meta, session, row)
elif row.type == assignment_sql.AssignmentType.GROUP_PROJECT:
return build_group_project_entry(meta, session, row)
elif row.type == assignment_sql.AssignmentType.USER_DOMAIN:
return build_user_domain_entry(meta, session, row)
elif row.type == assignment_sql.AssignmentType.GROUP_DOMAIN:
return build_group_domain_entry(meta, session, row)
# If the row type doesn't match any that we understand we drop
# the data.
session = sql.orm.sessionmaker(bind=migrate_engine)()
downgrade_table = sql.Table(ASSIGNMENT_TABLE, meta, autoload=True)
for assignment in session.query(downgrade_table).all():
update = build_update(meta, session, assignment)
if update is not None:
session.execute(update)
session.commit()
# Delete all the rows
session.execute(downgrade_table.delete())
session.commit()
session.close()
def upgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
session = sql.orm.sessionmaker(bind=migrate_engine)()
for table_name in GRANT_TABLES:
migrate_grant_table(meta, session, table_name)
session.close()
def downgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
downgrade_assignment_table(meta, migrate_engine)

View File

@ -1,106 +0,0 @@
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sql
USER_PROJECT_TABLE = 'user_project_metadata'
GROUP_PROJECT_TABLE = 'group_project_metadata'
USER_DOMAIN_TABLE = 'user_domain_metadata'
GROUP_DOMAIN_TABLE = 'group_domain_metadata'
GRANT_TABLES = [USER_PROJECT_TABLE, USER_DOMAIN_TABLE,
GROUP_PROJECT_TABLE, GROUP_DOMAIN_TABLE]
def recreate_grant_tables(meta, migrate_engine):
sql.Table('user', meta, autoload=True)
sql.Table('group', meta, autoload=True)
sql.Table('project', meta, autoload=True)
sql.Table('domain', meta, autoload=True)
user_project_metadata_table = sql.Table(
USER_PROJECT_TABLE,
meta,
sql.Column(
'user_id',
sql.String(64),
primary_key=True),
sql.Column(
'project_id',
sql.String(64),
sql.ForeignKey('project.id'),
primary_key=True),
sql.Column('data', sql.Text()))
user_project_metadata_table.create(migrate_engine, checkfirst=True)
group_project_metadata_table = sql.Table(
GROUP_PROJECT_TABLE,
meta,
sql.Column(
'group_id',
sql.String(64),
primary_key=True),
sql.Column(
'project_id',
sql.String(64),
sql.ForeignKey('project.id'),
primary_key=True),
sql.Column('data', sql.Text()))
group_project_metadata_table.create(migrate_engine, checkfirst=True)
user_domain_metadata_table = sql.Table(
USER_DOMAIN_TABLE,
meta,
sql.Column(
'user_id',
sql.String(64),
primary_key=True),
sql.Column(
'domain_id',
sql.String(64),
sql.ForeignKey('domain.id'),
primary_key=True),
sql.Column('data', sql.Text()))
user_domain_metadata_table.create(migrate_engine, checkfirst=True)
group_domain_metadata_table = sql.Table(
GROUP_DOMAIN_TABLE,
meta,
sql.Column(
'group_id',
sql.String(64),
primary_key=True),
sql.Column(
'domain_id',
sql.String(64),
sql.ForeignKey('domain.id'),
primary_key=True),
sql.Column('data', sql.Text()))
group_domain_metadata_table.create(migrate_engine, checkfirst=True)
def upgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
for table_name in GRANT_TABLES:
grant_table = sql.Table(table_name, meta, autoload=True)
grant_table.drop(migrate_engine, checkfirst=True)
def downgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
recreate_grant_tables(meta, migrate_engine)

View File

@ -1,44 +0,0 @@
# Copyright (c) 2014 Matthieu Huin <mhu@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy
def downgrade_trust_table_with_column_drop(meta, migrate_engine):
trust_table = sqlalchemy.Table('trust', meta, autoload=True)
# delete trusts with a limited use count, we are downgrading so uses
# will not be tracked anymore.
d = trust_table.delete(trust_table.c.remaining_uses >= 0)
d.execute()
trust_table.drop_column('remaining_uses')
def upgrade_trust_table(meta, migrate_engine):
trust_table = sqlalchemy.Table('trust', meta, autoload=True)
trust_table.create_column(sqlalchemy.Column('remaining_uses',
sqlalchemy.Integer(),
nullable=True))
def upgrade(migrate_engine):
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
upgrade_trust_table(meta, migrate_engine)
def downgrade(migrate_engine):
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
downgrade_trust_table_with_column_drop(meta, migrate_engine)

View File

@ -1,168 +0,0 @@
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Adds an `enabled` column to the `endpoint` table.
The enabled value for the `endpoint` table was stored in the `extra` column
as part of a JSON string.
To upgrade, the `enabled` column is added with a default value of ``true``,
then we check all the `extra` JSON for disabled and set the value to ``false``
for those.
Downgrade is essentially the opposite -- we update the JSON with
``"enabled": false`` for any endpoints that are disabled and drop the `enabled`
column.
"""
from oslo_serialization import jsonutils
from oslo_utils import strutils
import sqlalchemy as sql
from sqlalchemy.orm import Session
def _migrate_enabled_from_extra(migrate_engine, endpoint_table):
"""Remove `enabled` from `extra`, put it in the `enabled` column."""
eps = list(endpoint_table.select().execute())
for ep in eps:
extra_dict = jsonutils.loads(ep.extra)
if 'enabled' not in extra_dict:
# `enabled` and `extra` are already as expected.
continue
enabled = extra_dict.pop('enabled')
if enabled is None:
enabled = True
else:
enabled = strutils.bool_from_string(enabled, default=True)
new_values = {
'enabled': enabled,
'extra': jsonutils.dumps(extra_dict),
}
f = endpoint_table.c.id == ep.id
update = endpoint_table.update().where(f).values(new_values)
migrate_engine.execute(update)
def _migrate_enabled_to_extra(migrate_engine, endpoint_table):
"""Get enabled value from 'enabled' column and put it in 'extra' JSON.
Only put the enabled value to the 'extra' JSON if it's False, since the
default is True.
"""
eps = list(endpoint_table.select().execute())
for ep in eps:
if ep.enabled:
# Nothing to do since the endpoint is enabled.
continue
extra_dict = jsonutils.loads(ep.extra)
extra_dict['enabled'] = False
new_values = {
'extra': jsonutils.dumps(extra_dict),
}
f = endpoint_table.c.id == ep.id
update = endpoint_table.update().where(f).values(new_values)
migrate_engine.execute(update)
def upgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
endpoint_table = sql.Table('endpoint', meta, autoload=True)
enabled_column = sql.Column('enabled', sql.Boolean, nullable=False,
default=True, server_default='1')
enabled_column.create(endpoint_table)
_migrate_enabled_from_extra(migrate_engine, endpoint_table)
def _downgrade_endpoint_table_with_copy(meta, migrate_engine):
# Used with databases that don't support dropping a column (e.g., sqlite).
orig_endpoint_table = sql.Table(
'endpoint', meta, autoload=True)
orig_endpoint_table.deregister()
orig_endpoint_table.rename('orig_endpoint')
session = Session(bind=migrate_engine)
with session.transaction:
# Need to load the metadata for the service table since it's used as
# foreign key.
sql.Table(
'service', meta, autoload=True,
autoload_with=session.connection())
endpoint_table = sql.Table(
'endpoint',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('legacy_endpoint_id', sql.String(64)),
sql.Column('interface', sql.String(8), nullable=False),
sql.Column('region', sql.String(255)),
sql.Column(
'service_id', sql.String(64),
sql.ForeignKey('service.id'),
nullable=False),
sql.Column('url', sql.Text(), nullable=False),
sql.Column('extra', sql.Text()))
endpoint_table.create(migrate_engine, checkfirst=True)
orig_endpoint_table = sql.Table(
'orig_endpoint', meta, autoload=True,
autoload_with=session.connection())
for endpoint in session.query(orig_endpoint_table):
new_values = {
'id': endpoint.id,
'legacy_endpoint_id': endpoint.legacy_endpoint_id,
'interface': endpoint.interface,
'region': endpoint.region,
'service_id': endpoint.service_id,
'url': endpoint.url,
'extra': endpoint.extra,
}
session.execute('insert into endpoint (id, legacy_endpoint_id, '
'interface, region, service_id, url, extra) '
'values ( :id, :legacy_endpoint_id, :interface, '
':region, :service_id, :url, :extra);',
new_values)
orig_endpoint_table.drop()
def downgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
endpoint_table = sql.Table('endpoint', meta, autoload=True)
_migrate_enabled_to_extra(migrate_engine, endpoint_table)
if migrate_engine.name == 'sqlite':
meta.clear()
_downgrade_endpoint_table_with_copy(meta, migrate_engine)
return
endpoint_table.c.enabled.drop()

View File

@ -1,90 +0,0 @@
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Relax the uniqueness of `description` column in region table.
The region table has a dedicated column for the region `description`. This
column originally was not nullable and had to be unique. So if a user wanted
to create a region without sending a `description` in the request, they would
experience an SQL error because the `description` column can't be null for a
region. This means that every region had to have a unique description.
To upgrade, we are going to transfer all the data from the existing region
table to a temporary table, drop the original region table, and then finally
rename the temporary table to the correct name.
There is no downgrade path as the original migration has been fixed to not
include the unique constraint on description column.
"""
import migrate
import sqlalchemy as sql
from sqlalchemy.orm import sessionmaker
_TEMP_REGION_TABLE_NAME = 'temp_region'
_REGION_TABLE_NAME = 'region'
def _migrate_to_new_region_table(meta, migrate_engine, region_table):
# Create a temporary region table to hold data while we recreate the
# new region table without a unique constraint on the description column
session = sessionmaker(bind=migrate_engine)()
temp_region_table = sql.Table(
_TEMP_REGION_TABLE_NAME,
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('description', sql.String(255), nullable=False),
sql.Column('parent_region_id', sql.String(64), nullable=True),
sql.Column('extra', sql.Text()))
temp_region_table.create(migrate_engine, checkfirst=True)
# Migrate the data
for region in list(session.query(region_table)):
session.execute(temp_region_table.insert().values(
id=region.id,
description=region.description,
parent_region_id=region.parent_region_id,
extra=region.extra))
session.commit()
session.close()
# Drop the old region table
region_table.drop(checkfirst=True)
migrate.rename_table(temp_region_table, _REGION_TABLE_NAME, meta.bind)
def upgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
region_table = sql.Table(_REGION_TABLE_NAME, meta, autoload=True)
for idx in region_table.indexes:
if ((idx.columns.get('description') == region_table.c.description) and
len(idx.columns) is 1):
# Constraint was found, do the migration.
_migrate_to_new_region_table(meta, migrate_engine, region_table)
break
def downgrade(migrate_engine):
# There is no downgrade option. The unique constraint should not have
# existed and therefore does not need to be re-added. The previous
# migration has been modified to not contain the unique constraint.
pass

View File

@ -1,14 +1,15 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import migrate
from oslo_config import cfg
@ -16,6 +17,7 @@ from oslo_log import log
import sqlalchemy as sql
from sqlalchemy import orm
from keystone.assignment.backends import sql as assignment_sql
from keystone.common import sql as ks_sql
from keystone.common.sql import migration_helpers
@ -66,6 +68,8 @@ def upgrade(migrate_engine):
sql.Column('service_id', sql.String(length=64), nullable=False),
sql.Column('url', sql.Text, nullable=False),
sql.Column('extra', ks_sql.JsonBlob.impl),
sql.Column('enabled', sql.Boolean, nullable=False, default=True,
server_default='1'),
mysql_engine='InnoDB',
mysql_charset='utf8')
@ -79,22 +83,6 @@ def upgrade(migrate_engine):
mysql_engine='InnoDB',
mysql_charset='utf8')
group_domain_metadata = sql.Table(
'group_domain_metadata', meta,
sql.Column('group_id', sql.String(length=64), primary_key=True),
sql.Column('domain_id', sql.String(length=64), primary_key=True),
sql.Column('data', ks_sql.JsonBlob.impl),
mysql_engine='InnoDB',
mysql_charset='utf8')
group_project_metadata = sql.Table(
'group_project_metadata', meta,
sql.Column('group_id', sql.String(length=64), primary_key=True),
sql.Column('project_id', sql.String(length=64), primary_key=True),
sql.Column('data', ks_sql.JsonBlob.impl),
mysql_engine='InnoDB',
mysql_charset='utf8')
policy = sql.Table(
'policy', meta,
sql.Column('id', sql.String(length=64), primary_key=True),
@ -127,6 +115,8 @@ def upgrade(migrate_engine):
'service', meta,
sql.Column('id', sql.String(length=64), primary_key=True),
sql.Column('type', sql.String(length=255)),
sql.Column('enabled', sql.Boolean, nullable=False, default=True,
server_default='1'),
sql.Column('extra', ks_sql.JsonBlob.impl),
mysql_engine='InnoDB',
mysql_charset='utf8')
@ -151,6 +141,7 @@ def upgrade(migrate_engine):
sql.Column('impersonation', sql.Boolean, nullable=False),
sql.Column('deleted_at', sql.DateTime),
sql.Column('expires_at', sql.DateTime),
sql.Column('remaining_uses', sql.Integer, nullable=True),
sql.Column('extra', ks_sql.JsonBlob.impl),
mysql_engine='InnoDB',
mysql_charset='utf8')
@ -176,14 +167,6 @@ def upgrade(migrate_engine):
mysql_engine='InnoDB',
mysql_charset='utf8')
user_domain_metadata = sql.Table(
'user_domain_metadata', meta,
sql.Column('user_id', sql.String(length=64), primary_key=True),
sql.Column('domain_id', sql.String(length=64), primary_key=True),
sql.Column('data', ks_sql.JsonBlob.impl),
mysql_engine='InnoDB',
mysql_charset='utf8')
user_group_membership = sql.Table(
'user_group_membership', meta,
sql.Column('user_id', sql.String(length=64), primary_key=True),
@ -191,19 +174,39 @@ def upgrade(migrate_engine):
mysql_engine='InnoDB',
mysql_charset='utf8')
user_project_metadata = sql.Table(
'user_project_metadata', meta,
sql.Column('user_id', sql.String(length=64), primary_key=True),
sql.Column('project_id', sql.String(length=64), primary_key=True),
sql.Column('data', ks_sql.JsonBlob.impl),
region = sql.Table(
'region',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('description', sql.String(255), nullable=False),
sql.Column('parent_region_id', sql.String(64), nullable=True),
sql.Column('extra', sql.Text()),
mysql_engine='InnoDB',
mysql_charset='utf8')
assignment = sql.Table(
'assignment',
meta,
sql.Column('type', sql.Enum(
assignment_sql.AssignmentType.USER_PROJECT,
assignment_sql.AssignmentType.GROUP_PROJECT,
assignment_sql.AssignmentType.USER_DOMAIN,
assignment_sql.AssignmentType.GROUP_DOMAIN,
name='type'),
nullable=False),
sql.Column('actor_id', sql.String(64), nullable=False),
sql.Column('target_id', sql.String(64), nullable=False),
sql.Column('role_id', sql.String(64), nullable=False),
sql.Column('inherited', sql.Boolean, default=False, nullable=False),
sql.PrimaryKeyConstraint('type', 'actor_id', 'target_id', 'role_id'),
mysql_engine='InnoDB',
mysql_charset='utf8')
# create all tables
tables = [credential, domain, endpoint, group, group_domain_metadata,
group_project_metadata, policy, project, role, service,
token, trust, trust_role, user, user_domain_metadata,
user_group_membership, user_project_metadata]
tables = [credential, domain, endpoint, group,
policy, project, role, service,
token, trust, trust_role, user,
user_group_membership, region, assignment]
for table in tables:
try:
@ -229,25 +232,10 @@ def upgrade(migrate_engine):
# Indexes
sql.Index('ix_token_expires', token.c.expires).create()
sql.Index('ix_token_valid', token.c.valid).create()
sql.Index('ix_token_expires_valid', token.c.expires,
token.c.valid).create()
fkeys = [
{'columns': [user_project_metadata.c.project_id],
'references': [project.c.id],
'name': 'fk_user_project_metadata_project_id'},
{'columns': [user_domain_metadata.c.domain_id],
'references': [domain.c.id],
'name': 'fk_user_domain_metadata_domain_id'},
{'columns': [group_project_metadata.c.project_id],
'references': [project.c.id],
'name': 'fk_group_project_metadata_project_id'},
{'columns': [group_domain_metadata.c.domain_id],
'references': [domain.c.id],
'name': 'fk_group_domain_metadata_domain_id'},
{'columns': [endpoint.c.service_id],
'references': [service.c.id]},
@ -269,7 +257,10 @@ def upgrade(migrate_engine):
{'columns': [project.c.domain_id],
'references': [domain.c.id],
'name': 'fk_project_domain_id'}
'name': 'fk_project_domain_id'},
{'columns': [assignment.c.role_id],
'references': [role.c.id]}
]
for fkey in fkeys:
@ -284,5 +275,5 @@ def upgrade(migrate_engine):
def downgrade(migrate_engine):
raise NotImplementedError('Downgrade to pre-Havana release db schema is '
raise NotImplementedError('Downgrade to pre-Icehouse release db schema is '
'unsupported.')

View File

@ -1,148 +0,0 @@
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Adds an `enabled` column to the `service` table.
The enabled value for the `service` table was stored in the `extra` column
as part of a JSON string.
To upgrade, the `enabled` column is added with a default value of ``true``,
then we check all the `extra` JSON for disabled and set the value to ``false``
for those.
Downgrade is essentially the opposite -- we update the JSON with
``"enabled": false`` for any services that are disabled and drop the `enabled`
column.
"""
from oslo_serialization import jsonutils
from oslo_utils import strutils
import sqlalchemy as sql
from sqlalchemy.orm import sessionmaker
def _migrate_enabled_from_extra(migrate_engine, service_table):
"""Remove `enabled` from `extra`, put it in the `enabled` column."""
services = list(service_table.select().execute())
for service in services:
extra_dict = jsonutils.loads(service.extra)
if 'enabled' not in extra_dict:
# `enabled` and `extra` are already as expected.
continue
enabled = extra_dict.pop('enabled')
if enabled is None:
enabled = True
else:
enabled = strutils.bool_from_string(enabled, default=True)
new_values = {
'enabled': enabled,
'extra': jsonutils.dumps(extra_dict),
}
f = service_table.c.id == service.id
update = service_table.update().where(f).values(new_values)
migrate_engine.execute(update)
def _migrate_enabled_to_extra(migrate_engine, service_table):
"""Get enabled value from 'enabled' column and put it in 'extra' JSON.
Only put the enabled value to the 'extra' JSON if it's False, since the
default is True.
"""
services = list(service_table.select().execute())
for service in services:
if service.enabled:
# Nothing to do since the service is enabled.
continue
extra_dict = jsonutils.loads(service.extra)
extra_dict['enabled'] = False
new_values = {
'extra': jsonutils.dumps(extra_dict),
}
f = service_table.c.id == service.id
update = service_table.update().where(f).values(new_values)
migrate_engine.execute(update)
def upgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
service_table = sql.Table('service', meta, autoload=True)
enabled_column = sql.Column('enabled', sql.Boolean, nullable=False,
default=True, server_default='1')
enabled_column.create(service_table)
_migrate_enabled_from_extra(migrate_engine, service_table)
def _downgrade_service_table_with_copy(meta, migrate_engine):
# Used with databases that don't support dropping a column (e.g., sqlite).
maker = sessionmaker(bind=migrate_engine)
session = maker()
orig_service_table = sql.Table('service', meta, autoload=True)
orig_service_table.deregister()
orig_service_table.rename('orig_service')
service_table = sql.Table(
'service',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('type', sql.String(255)),
sql.Column('extra', sql.Text()))
service_table.create(migrate_engine, checkfirst=True)
with session.transaction:
for service in session.query(orig_service_table):
new_values = {
'id': service.id,
'type': service.type,
'extra': service.extra,
}
session.execute('insert into service (id, type, extra) '
'values ( :id, :type, :extra);',
new_values)
session.close()
orig_service_table.drop()
def downgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
service_table = sql.Table('service', meta, autoload=True)
_migrate_enabled_to_extra(migrate_engine, service_table)
if migrate_engine.name == 'sqlite':
meta.clear()
_downgrade_service_table_with_copy(meta, migrate_engine)
return
service_table.c.enabled.drop()

View File

@ -43,7 +43,6 @@ from sqlalchemy.engine import reflection
import sqlalchemy.exc
from sqlalchemy import schema
from keystone.assignment.backends import sql as assignment_sql
from keystone.common import sql
from keystone.common.sql import migrate_repo
from keystone.common.sql import migration_helpers
@ -70,17 +69,11 @@ INITIAL_TABLE_STRUCTURE = {
],
'endpoint': [
'id', 'legacy_endpoint_id', 'interface', 'region', 'service_id', 'url',
'extra',
'enabled', 'extra',
],
'group': [
'id', 'domain_id', 'name', 'description', 'extra',
],
'group_domain_metadata': [
'group_id', 'domain_id', 'data',
],
'group_project_metadata': [
'group_id', 'project_id', 'data',
],
'policy': [
'id', 'type', 'blob', 'extra',
],
@ -91,14 +84,14 @@ INITIAL_TABLE_STRUCTURE = {
'id', 'name', 'extra',
],
'service': [
'id', 'type', 'extra',
'id', 'type', 'extra', 'enabled',
],
'token': [
'id', 'expires', 'extra', 'valid', 'trust_id', 'user_id',
],
'trust': [
'id', 'trustor_user_id', 'trustee_user_id', 'project_id',
'impersonation', 'deleted_at', 'expires_at', 'extra',
'impersonation', 'deleted_at', 'expires_at', 'remaining_uses', 'extra',
],
'trust_role': [
'trust_id', 'role_id',
@ -107,14 +100,14 @@ INITIAL_TABLE_STRUCTURE = {
'id', 'name', 'extra', 'password', 'enabled', 'domain_id',
'default_project_id',
],
'user_domain_metadata': [
'user_id', 'domain_id', 'data',
],
'user_group_membership': [
'user_id', 'group_id',
],
'user_project_metadata': [
'user_id', 'project_id', 'data',
'region': [
'id', 'description', 'parent_region_id', 'extra',
],
'assignment': [
'type', 'actor_id', 'target_id', 'role_id', 'inherited',
],
}
@ -365,7 +358,7 @@ class SqlUpgradeTests(SqlMigrateBase):
table_set.remove('migrate_version')
self.assertSetEqual(initial_table_set, table_set)
# Downgrade to before Havana's release schema version (036) is not
# Downgrade to before Icehouse's release schema version (044) is not
# supported. A NotImplementedError should be raised when attempting to
# downgrade.
self.assertRaises(NotImplementedError, self.downgrade,
@ -382,890 +375,6 @@ class SqlUpgradeTests(SqlMigrateBase):
session.execute(insert)
session.commit()
def test_region_migration(self):
self.assertTableDoesNotExist('region')
self.upgrade(37)
self.assertTableExists('region')
self.downgrade(36)
self.assertTableDoesNotExist('region')
def test_assignment_table_migration(self):
def create_base_data(session):
domain_table = sqlalchemy.Table('domain', self.metadata,
autoload=True)
user_table = sqlalchemy.Table('user', self.metadata, autoload=True)
group_table = sqlalchemy.Table('group', self.metadata,
autoload=True)
role_table = sqlalchemy.Table('role', self.metadata, autoload=True)
project_table = sqlalchemy.Table(
'project', self.metadata, autoload=True)
base_data = {}
# Create a Domain
base_data['domain'] = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'enabled': True}
session.execute(domain_table.insert().values(base_data['domain']))
# Create another Domain
base_data['domain2'] = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'enabled': True}
session.execute(domain_table.insert().values(base_data['domain2']))
# Create a Project
base_data['project'] = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': base_data['domain']['id'],
'extra': "{}"}
session.execute(
project_table.insert().values(base_data['project']))
# Create another Project
base_data['project2'] = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': base_data['domain']['id'],
'extra': "{}"}
session.execute(
project_table.insert().values(base_data['project2']))
# Create a User
base_data['user'] = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': base_data['domain']['id'],
'password': uuid.uuid4().hex,
'enabled': True,
'extra': "{}"}
session.execute(user_table.insert().values(base_data['user']))
# Create a Group
base_data['group'] = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': base_data['domain']['id'],
'extra': "{}"}
session.execute(group_table.insert().values(base_data['group']))
# Create roles
base_data['roles'] = []
for _ in range(9):
role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
session.execute(role_table.insert().values(role))
base_data['roles'].append(role)
return base_data
def populate_grants(session, base_data):
user_project_table = sqlalchemy.Table(
'user_project_metadata', self.metadata, autoload=True)
user_domain_table = sqlalchemy.Table(
'user_domain_metadata', self.metadata, autoload=True)
group_project_table = sqlalchemy.Table(
'group_project_metadata', self.metadata, autoload=True)
group_domain_table = sqlalchemy.Table(
'group_domain_metadata', self.metadata, autoload=True)
# Grant a role to user on project
grant = {'user_id': base_data['user']['id'],
'project_id': base_data['project']['id'],
'data': json.dumps(
{'roles': [{'id': base_data['roles'][0]['id']}]})}
session.execute(user_project_table.insert().values(grant))
# Grant two roles to user on project2
grant = {'user_id': base_data['user']['id'],
'project_id': base_data['project2']['id'],
'data': json.dumps(
{'roles': [{'id': base_data['roles'][1]['id']},
{'id': base_data['roles'][2]['id']}]})}
session.execute(user_project_table.insert().values(grant))
# Grant role to group on project
grant = {'group_id': base_data['group']['id'],
'project_id': base_data['project']['id'],
'data': json.dumps(
{'roles': [{'id': base_data['roles'][3]['id']}]})}
session.execute(group_project_table.insert().values(grant))
# Grant two roles to group on project2
grant = {'group_id': base_data['group']['id'],
'project_id': base_data['project2']['id'],
'data': json.dumps(
{'roles': [{'id': base_data['roles'][4]['id']},
{'id': base_data['roles'][5]['id']}]})}
session.execute(group_project_table.insert().values(grant))
# Grant two roles to group on domain, one inherited, one not
grant = {'group_id': base_data['group']['id'],
'domain_id': base_data['domain']['id'],
'data': json.dumps(
{'roles': [{'id': base_data['roles'][6]['id']},
{'id': base_data['roles'][7]['id'],
'inherited_to': 'projects'}]})}
session.execute(group_domain_table.insert().values(grant))
# Grant inherited role to user on domain
grant = {'user_id': base_data['user']['id'],
'domain_id': base_data['domain']['id'],
'data': json.dumps(
{'roles': [{'id': base_data['roles'][8]['id'],
'inherited_to': 'projects'}]})}
session.execute(user_domain_table.insert().values(grant))
# Grant two non-inherited roles to user on domain2, using roles
# that are also assigned to other actors/targets
grant = {'user_id': base_data['user']['id'],
'domain_id': base_data['domain2']['id'],
'data': json.dumps(
{'roles': [{'id': base_data['roles'][6]['id']},
{'id': base_data['roles'][7]['id']}]})}
session.execute(user_domain_table.insert().values(grant))
session.commit()
def check_grants(session, base_data):
user_project_table = sqlalchemy.Table(
'user_project_metadata', self.metadata, autoload=True)
user_domain_table = sqlalchemy.Table(
'user_domain_metadata', self.metadata, autoload=True)
group_project_table = sqlalchemy.Table(
'group_project_metadata', self.metadata, autoload=True)
group_domain_table = sqlalchemy.Table(
'group_domain_metadata', self.metadata, autoload=True)
s = sqlalchemy.select([user_project_table.c.data]).where(
(user_project_table.c.user_id == base_data['user']['id']) &
(user_project_table.c.project_id ==
base_data['project']['id']))
r = session.execute(s)
data = json.loads(r.fetchone()['data'])
self.assertEqual(1, len(data['roles']))
self.assertIn({'id': base_data['roles'][0]['id']}, data['roles'])
s = sqlalchemy.select([user_project_table.c.data]).where(
(user_project_table.c.user_id == base_data['user']['id']) &
(user_project_table.c.project_id ==
base_data['project2']['id']))
r = session.execute(s)
data = json.loads(r.fetchone()['data'])
self.assertEqual(2, len(data['roles']))
self.assertIn({'id': base_data['roles'][1]['id']}, data['roles'])
self.assertIn({'id': base_data['roles'][2]['id']}, data['roles'])
s = sqlalchemy.select([group_project_table.c.data]).where(
(group_project_table.c.group_id == base_data['group']['id']) &
(group_project_table.c.project_id ==
base_data['project']['id']))
r = session.execute(s)
data = json.loads(r.fetchone()['data'])
self.assertEqual(1, len(data['roles']))
self.assertIn({'id': base_data['roles'][3]['id']}, data['roles'])
s = sqlalchemy.select([group_project_table.c.data]).where(
(group_project_table.c.group_id == base_data['group']['id']) &
(group_project_table.c.project_id ==
base_data['project2']['id']))
r = session.execute(s)
data = json.loads(r.fetchone()['data'])
self.assertEqual(2, len(data['roles']))
self.assertIn({'id': base_data['roles'][4]['id']}, data['roles'])
self.assertIn({'id': base_data['roles'][5]['id']}, data['roles'])
s = sqlalchemy.select([group_domain_table.c.data]).where(
(group_domain_table.c.group_id == base_data['group']['id']) &
(group_domain_table.c.domain_id == base_data['domain']['id']))
r = session.execute(s)
data = json.loads(r.fetchone()['data'])
self.assertEqual(2, len(data['roles']))
self.assertIn({'id': base_data['roles'][6]['id']}, data['roles'])
self.assertIn({'id': base_data['roles'][7]['id'],
'inherited_to': 'projects'}, data['roles'])
s = sqlalchemy.select([user_domain_table.c.data]).where(
(user_domain_table.c.user_id == base_data['user']['id']) &
(user_domain_table.c.domain_id == base_data['domain']['id']))
r = session.execute(s)
data = json.loads(r.fetchone()['data'])
self.assertEqual(1, len(data['roles']))
self.assertIn({'id': base_data['roles'][8]['id'],
'inherited_to': 'projects'}, data['roles'])
s = sqlalchemy.select([user_domain_table.c.data]).where(
(user_domain_table.c.user_id == base_data['user']['id']) &
(user_domain_table.c.domain_id == base_data['domain2']['id']))
r = session.execute(s)
data = json.loads(r.fetchone()['data'])
self.assertEqual(2, len(data['roles']))
self.assertIn({'id': base_data['roles'][6]['id']}, data['roles'])
self.assertIn({'id': base_data['roles'][7]['id']}, data['roles'])
def check_assignments(session, base_data):
def check_assignment_type(refs, type):
for ref in refs:
self.assertEqual(type, ref.type)
assignment_table = sqlalchemy.Table(
'assignment', self.metadata, autoload=True)
refs = session.query(assignment_table).all()
self.assertEqual(11, len(refs))
q = session.query(assignment_table)
q = q.filter_by(actor_id=base_data['user']['id'])
q = q.filter_by(target_id=base_data['project']['id'])
refs = q.all()
self.assertEqual(1, len(refs))
self.assertEqual(base_data['roles'][0]['id'], refs[0].role_id)
self.assertFalse(refs[0].inherited)
check_assignment_type(refs,
assignment_sql.AssignmentType.USER_PROJECT)
q = session.query(assignment_table)
q = q.filter_by(actor_id=base_data['user']['id'])
q = q.filter_by(target_id=base_data['project2']['id'])
refs = q.all()
self.assertEqual(2, len(refs))
role_ids = [base_data['roles'][1]['id'],
base_data['roles'][2]['id']]
self.assertIn(refs[0].role_id, role_ids)
self.assertIn(refs[1].role_id, role_ids)
self.assertFalse(refs[0].inherited)
self.assertFalse(refs[1].inherited)
check_assignment_type(refs,
assignment_sql.AssignmentType.USER_PROJECT)
q = session.query(assignment_table)
q = q.filter_by(actor_id=base_data['group']['id'])
q = q.filter_by(target_id=base_data['project']['id'])
refs = q.all()
self.assertEqual(1, len(refs))
self.assertEqual(base_data['roles'][3]['id'], refs[0].role_id)
self.assertFalse(refs[0].inherited)
check_assignment_type(refs,
assignment_sql.AssignmentType.GROUP_PROJECT)
q = session.query(assignment_table)
q = q.filter_by(actor_id=base_data['group']['id'])
q = q.filter_by(target_id=base_data['project2']['id'])
refs = q.all()
self.assertEqual(2, len(refs))
role_ids = [base_data['roles'][4]['id'],
base_data['roles'][5]['id']]
self.assertIn(refs[0].role_id, role_ids)
self.assertIn(refs[1].role_id, role_ids)
self.assertFalse(refs[0].inherited)
self.assertFalse(refs[1].inherited)
check_assignment_type(refs,
assignment_sql.AssignmentType.GROUP_PROJECT)
q = session.query(assignment_table)
q = q.filter_by(actor_id=base_data['group']['id'])
q = q.filter_by(target_id=base_data['domain']['id'])
refs = q.all()
self.assertEqual(2, len(refs))
role_ids = [base_data['roles'][6]['id'],
base_data['roles'][7]['id']]
self.assertIn(refs[0].role_id, role_ids)
self.assertIn(refs[1].role_id, role_ids)
if refs[0].role_id == base_data['roles'][7]['id']:
self.assertTrue(refs[0].inherited)
self.assertFalse(refs[1].inherited)
else:
self.assertTrue(refs[1].inherited)
self.assertFalse(refs[0].inherited)
check_assignment_type(refs,
assignment_sql.AssignmentType.GROUP_DOMAIN)
q = session.query(assignment_table)
q = q.filter_by(actor_id=base_data['user']['id'])
q = q.filter_by(target_id=base_data['domain']['id'])
refs = q.all()
self.assertEqual(1, len(refs))
self.assertEqual(base_data['roles'][8]['id'], refs[0].role_id)
self.assertTrue(refs[0].inherited)
check_assignment_type(refs,
assignment_sql.AssignmentType.USER_DOMAIN)
q = session.query(assignment_table)
q = q.filter_by(actor_id=base_data['user']['id'])
q = q.filter_by(target_id=base_data['domain2']['id'])
refs = q.all()
self.assertEqual(2, len(refs))
role_ids = [base_data['roles'][6]['id'],
base_data['roles'][7]['id']]
self.assertIn(refs[0].role_id, role_ids)
self.assertIn(refs[1].role_id, role_ids)
self.assertFalse(refs[0].inherited)
self.assertFalse(refs[1].inherited)
check_assignment_type(refs,
assignment_sql.AssignmentType.USER_DOMAIN)
self.upgrade(37)
session = self.Session()
self.assertTableDoesNotExist('assignment')
base_data = create_base_data(session)
populate_grants(session, base_data)
check_grants(session, base_data)
session.commit()
session.close()
self.upgrade(40)
session = self.Session()
self.assertTableExists('assignment')
self.assertTableDoesNotExist('user_project_metadata')
self.assertTableDoesNotExist('group_project_metadata')
self.assertTableDoesNotExist('user_domain_metadata')
self.assertTableDoesNotExist('group_domain_metadata')
check_assignments(session, base_data)
session.close()
self.downgrade(37)
session = self.Session()
self.assertTableDoesNotExist('assignment')
check_grants(session, base_data)
session.close()
def test_limited_trusts_upgrade(self):
# make sure that the remaining_uses column is created
self.upgrade(41)
self.assertTableColumns('trust',
['id', 'trustor_user_id',
'trustee_user_id',
'project_id', 'impersonation',
'deleted_at',
'expires_at', 'extra',
'remaining_uses'])
def test_limited_trusts_downgrade(self):
# make sure that the remaining_uses column is removed
self.upgrade(41)
self.downgrade(40)
self.assertTableColumns('trust',
['id', 'trustor_user_id',
'trustee_user_id',
'project_id', 'impersonation',
'deleted_at',
'expires_at', 'extra'])
def test_limited_trusts_downgrade_trusts_cleanup(self):
# make sure that only trusts with unlimited uses are kept in the
# downgrade
self.upgrade(41)
session = self.Session()
limited_trust = {
'id': uuid.uuid4().hex,
'trustor_user_id': uuid.uuid4().hex,
'trustee_user_id': uuid.uuid4().hex,
'project_id': uuid.uuid4().hex,
'impersonation': True,
'remaining_uses': 5
}
consumed_trust = {
'id': uuid.uuid4().hex,
'trustor_user_id': uuid.uuid4().hex,
'trustee_user_id': uuid.uuid4().hex,
'project_id': uuid.uuid4().hex,
'impersonation': True,
'remaining_uses': 0
}
unlimited_trust = {
'id': uuid.uuid4().hex,
'trustor_user_id': uuid.uuid4().hex,
'trustee_user_id': uuid.uuid4().hex,
'project_id': uuid.uuid4().hex,
'impersonation': True,
'remaining_uses': None
}
self.insert_dict(session, 'trust', limited_trust)
self.insert_dict(session, 'trust', consumed_trust)
self.insert_dict(session, 'trust', unlimited_trust)
trust_table = sqlalchemy.Table(
'trust', self.metadata, autoload=True)
# we should have 3 trusts in base
self.assertEqual(3, session.query(trust_table).count())
session.close()
self.downgrade(40)
session = self.Session()
trust_table = sqlalchemy.Table(
'trust', self.metadata, autoload=True)
# Now only one trust remains ...
self.assertEqual(1, session.query(trust_table.columns.id).count())
# ... and this trust is the one that was not limited in uses
self.assertEqual(
unlimited_trust['id'],
session.query(trust_table.columns.id).one()[0])
def test_upgrade_service_enabled_cols(self):
"""Migration 44 added `enabled` column to `service` table."""
self.upgrade(44)
# Verify that there's an 'enabled' field.
exp_cols = ['id', 'type', 'extra', 'enabled']
self.assertTableColumns('service', exp_cols)
def test_downgrade_service_enabled_cols(self):
"""Check columns when downgrade to migration 43.
The downgrade from migration 44 removes the `enabled` column from the
`service` table.
"""
self.upgrade(44)
self.downgrade(43)
exp_cols = ['id', 'type', 'extra']
self.assertTableColumns('service', exp_cols)
def test_upgrade_service_enabled_data(self):
"""Migration 44 has to migrate data from `extra` to `enabled`."""
def add_service(**extra_data):
service_id = uuid.uuid4().hex
service = {
'id': service_id,
'type': uuid.uuid4().hex,
'extra': json.dumps(extra_data),
}
self.insert_dict(session, 'service', service)
return service_id
self.upgrade(43)
session = self.Session()
# Different services with expected enabled and extra values, and a
# description.
random_attr_name = uuid.uuid4().hex
random_attr_value = uuid.uuid4().hex
random_attr = {random_attr_name: random_attr_value}
random_attr_str = "%s='%s'" % (random_attr_name, random_attr_value)
random_attr_enabled_false = {random_attr_name: random_attr_value,
'enabled': False}
random_attr_enabled_false_str = 'enabled=False,%s' % random_attr_str
services = [
# Some values for True.
(add_service(), (True, {}), 'no enabled'),
(add_service(enabled=True), (True, {}), 'enabled=True'),
(add_service(enabled='true'), (True, {}), "enabled='true'"),
(add_service(**random_attr),
(True, random_attr), random_attr_str),
(add_service(enabled=None), (True, {}), 'enabled=None'),
# Some values for False.
(add_service(enabled=False), (False, {}), 'enabled=False'),
(add_service(enabled='false'), (False, {}), "enabled='false'"),
(add_service(enabled='0'), (False, {}), "enabled='0'"),
(add_service(**random_attr_enabled_false),
(False, random_attr), random_attr_enabled_false_str),
]
session.close()
self.upgrade(44)
session = self.Session()
# Verify that the services have the expected values.
self.metadata.clear()
service_table = sqlalchemy.Table('service', self.metadata,
autoload=True)
def fetch_service(service_id):
cols = [service_table.c.enabled, service_table.c.extra]
f = service_table.c.id == service_id
s = sqlalchemy.select(cols).where(f)
ep = session.execute(s).fetchone()
return ep.enabled, json.loads(ep.extra)
for service_id, exp, msg in services:
exp_enabled, exp_extra = exp
enabled, extra = fetch_service(service_id)
self.assertEqual(exp_enabled, enabled, msg)
self.assertEqual(exp_extra, extra, msg)
def test_downgrade_service_enabled_data(self):
"""Downgrade from migration 44 migrates data.
Downgrade from migration 44 migrates data from `enabled` to
`extra`. Any disabled services have 'enabled': False put into 'extra'.
"""
def add_service(enabled=True, **extra_data):
service_id = uuid.uuid4().hex
service = {
'id': service_id,
'type': uuid.uuid4().hex,
'extra': json.dumps(extra_data),
'enabled': enabled
}
self.insert_dict(session, 'service', service)
return service_id
self.upgrade(44)
session = self.Session()
# Insert some services using the new format.
# We'll need a service entry since it's the foreign key for services.
service_id = add_service(True)
new_service = (lambda enabled, **extra_data:
add_service(enabled, **extra_data))
# Different services with expected extra values, and a
# description.
services = [
# True tests
(new_service(True), {}, 'enabled'),
(new_service(True, something='whatever'),
{'something': 'whatever'},
"something='whatever'"),
# False tests
(new_service(False), {'enabled': False}, 'enabled=False'),
(new_service(False, something='whatever'),
{'enabled': False, 'something': 'whatever'},
"enabled=False, something='whatever'"),
]
session.close()
self.downgrade(43)
session = self.Session()
# Verify that the services have the expected values.
self.metadata.clear()
service_table = sqlalchemy.Table('service', self.metadata,
autoload=True)
def fetch_service(service_id):
cols = [service_table.c.extra]
f = service_table.c.id == service_id
s = sqlalchemy.select(cols).where(f)
ep = session.execute(s).fetchone()
return json.loads(ep.extra)
for service_id, exp_extra, msg in services:
extra = fetch_service(service_id)
self.assertEqual(exp_extra, extra, msg)
def test_upgrade_endpoint_enabled_cols(self):
"""Migration 42 added `enabled` column to `endpoint` table."""
self.upgrade(42)
# Verify that there's an 'enabled' field.
exp_cols = ['id', 'legacy_endpoint_id', 'interface', 'region',
'service_id', 'url', 'extra', 'enabled']
self.assertTableColumns('endpoint', exp_cols)
def test_downgrade_endpoint_enabled_cols(self):
"""Check columns when downgrade from migration 41.
The downgrade from migration 42 removes the `enabled` column from the
`endpoint` table.
"""
self.upgrade(42)
self.downgrade(41)
exp_cols = ['id', 'legacy_endpoint_id', 'interface', 'region',
'service_id', 'url', 'extra']
self.assertTableColumns('endpoint', exp_cols)
def test_upgrade_endpoint_enabled_data(self):
"""Migration 42 has to migrate data from `extra` to `enabled`."""
def add_service():
service_id = uuid.uuid4().hex
service = {
'id': service_id,
'type': uuid.uuid4().hex
}
self.insert_dict(session, 'service', service)
return service_id
def add_endpoint(service_id, **extra_data):
endpoint_id = uuid.uuid4().hex
endpoint = {
'id': endpoint_id,
'interface': uuid.uuid4().hex[:8],
'service_id': service_id,
'url': uuid.uuid4().hex,
'extra': json.dumps(extra_data)
}
self.insert_dict(session, 'endpoint', endpoint)
return endpoint_id
self.upgrade(41)
session = self.Session()
# Insert some endpoints using the old format where `enabled` is in
# `extra` JSON.
# We'll need a service entry since it's the foreign key for endpoints.
service_id = add_service()
new_ep = lambda **extra_data: add_endpoint(service_id, **extra_data)
# Different endpoints with expected enabled and extra values, and a
# description.
random_attr_name = uuid.uuid4().hex
random_attr_value = uuid.uuid4().hex
random_attr = {random_attr_name: random_attr_value}
random_attr_str = "%s='%s'" % (random_attr_name, random_attr_value)
random_attr_enabled_false = {random_attr_name: random_attr_value,
'enabled': False}
random_attr_enabled_false_str = 'enabled=False,%s' % random_attr_str
endpoints = [
# Some values for True.
(new_ep(), (True, {}), 'no enabled'),
(new_ep(enabled=True), (True, {}), 'enabled=True'),
(new_ep(enabled='true'), (True, {}), "enabled='true'"),
(new_ep(**random_attr),
(True, random_attr), random_attr_str),
(new_ep(enabled=None), (True, {}), 'enabled=None'),
# Some values for False.
(new_ep(enabled=False), (False, {}), 'enabled=False'),
(new_ep(enabled='false'), (False, {}), "enabled='false'"),
(new_ep(enabled='0'), (False, {}), "enabled='0'"),
(new_ep(**random_attr_enabled_false),
(False, random_attr), random_attr_enabled_false_str),
]
session.close()
self.upgrade(42)
session = self.Session()
# Verify that the endpoints have the expected values.
self.metadata.clear()
endpoint_table = sqlalchemy.Table('endpoint', self.metadata,
autoload=True)
def fetch_endpoint(endpoint_id):
cols = [endpoint_table.c.enabled, endpoint_table.c.extra]
f = endpoint_table.c.id == endpoint_id
s = sqlalchemy.select(cols).where(f)
ep = session.execute(s).fetchone()
return ep.enabled, json.loads(ep.extra)
for endpoint_id, exp, msg in endpoints:
exp_enabled, exp_extra = exp
enabled, extra = fetch_endpoint(endpoint_id)
# NOTE(henry-nash): Different databases may return enabled as a
# real boolean of 0/1 - so we use assertEqual not assertIs here.
self.assertEqual(exp_enabled, enabled, msg)
self.assertEqual(exp_extra, extra, msg)
def test_downgrade_endpoint_enabled_data(self):
"""Downgrade from migration 42 migrates data.
Downgrade from migration 42 migrates data from `enabled` to
`extra`. Any disabled endpoints have 'enabled': False put into 'extra'.
"""
def add_service():
service_id = uuid.uuid4().hex
service = {
'id': service_id,
'type': uuid.uuid4().hex
}
self.insert_dict(session, 'service', service)
return service_id
def add_endpoint(service_id, enabled, **extra_data):
endpoint_id = uuid.uuid4().hex
endpoint = {
'id': endpoint_id,
'interface': uuid.uuid4().hex[:8],
'service_id': service_id,
'url': uuid.uuid4().hex,
'extra': json.dumps(extra_data),
'enabled': enabled
}
self.insert_dict(session, 'endpoint', endpoint)
return endpoint_id
self.upgrade(42)
session = self.Session()
# Insert some endpoints using the new format.
# We'll need a service entry since it's the foreign key for endpoints.
service_id = add_service()
new_ep = (lambda enabled, **extra_data:
add_endpoint(service_id, enabled, **extra_data))
# Different endpoints with expected extra values, and a
# description.
endpoints = [
# True tests
(new_ep(True), {}, 'enabled'),
(new_ep(True, something='whatever'), {'something': 'whatever'},
"something='whatever'"),
# False tests
(new_ep(False), {'enabled': False}, 'enabled=False'),
(new_ep(False, something='whatever'),
{'enabled': False, 'something': 'whatever'},
"enabled=False, something='whatever'"),
]
session.close()
self.downgrade(41)
session = self.Session()
# Verify that the endpoints have the expected values.
self.metadata.clear()
endpoint_table = sqlalchemy.Table('endpoint', self.metadata,
autoload=True)
def fetch_endpoint(endpoint_id):
cols = [endpoint_table.c.extra]
f = endpoint_table.c.id == endpoint_id
s = sqlalchemy.select(cols).where(f)
ep = session.execute(s).fetchone()
return json.loads(ep.extra)
for endpoint_id, exp_extra, msg in endpoints:
extra = fetch_endpoint(endpoint_id)
self.assertEqual(exp_extra, extra, msg)
def test_upgrade_region_non_unique_description(self):
"""Test upgrade to migration 43.
This migration should occur with no unique constraint on the region
description column.
Create two regions with the same description.
"""
def add_region():
region_uuid = uuid.uuid4().hex
region = {
'id': region_uuid,
'description': ''
}
self.insert_dict(session, 'region', region)
return region_uuid
self.upgrade(43)
session = self.Session()
# Write one region to the database
add_region()
# Write another region to the database with the same description
add_region()
def test_upgrade_region_unique_description(self):
"""Test upgrade to migration 43.
This test models a migration where there is a unique constraint on the
description column.
Create two regions with the same description.
"""
def add_region(table):
region_uuid = uuid.uuid4().hex
region = {
'id': region_uuid,
'description': ''
}
self.insert_dict(session, 'region', region, table=table)
return region_uuid
def get_metadata():
meta = sqlalchemy.MetaData()
meta.bind = self.engine
return meta
# Migrate to version 42
self.upgrade(42)
session = self.Session()
region_table = sqlalchemy.Table('region',
get_metadata(),
autoload=True)
# create the unique constraint and load the new version of the
# reflection cache
idx = sqlalchemy.Index('description', region_table.c.description,
unique=True)
idx.create(self.engine)
region_unique_table = sqlalchemy.Table('region',
get_metadata(),
autoload=True)
add_region(region_unique_table)
self.assertEqual(1, session.query(region_unique_table).count())
# verify the unique constraint is enforced
self.assertRaises(
# FIXME (I159): Since oslo_db wraps all the database exceptions
# into more specific exception objects, we should catch both of
# sqlalchemy and oslo_db exceptions. If an old oslo_db version
# is installed, IntegrityError is raised. If >=0.4.0 version of
# oslo_db is installed, DBError is raised.
# When the global requirements is updated with
# the version fixes exceptions wrapping, IntegrityError must be
# removed from the tuple.
# NOTE(henry-nash): The above re-creation of the (now erased from
# history) unique constraint doesn't appear to work well with the
# Postgresql SQA driver, leading to it throwing a ValueError, so
# we also catch that here.
(sqlalchemy.exc.IntegrityError, db_exception.DBError, ValueError),
add_region,
table=region_unique_table)
# migrate to 43, unique constraint should be dropped
session.close()
self.upgrade(43)
session = self.Session()
# reload the region table from the schema
region_nonunique = sqlalchemy.Table('region',
get_metadata(),
autoload=True)
self.assertEqual(1, session.query(region_nonunique).count())
# Write a second region to the database with the same description
add_region(region_nonunique)
self.assertEqual(2, session.query(region_nonunique).count())
def test_id_mapping(self):
self.upgrade(50)
self.assertTableDoesNotExist('id_mapping')