sql: Add support for auto-generation

Add the ability to autogenerate migrations. Because we need to support
different types of migration (expand and contract), this ends up being
significantly more complicated than what was needed in nova and cinder
and more akin to what was done in neutron. The key feature is here is
the use of an alembic hook called 'process_revision_directives', which
is called whenever one calls 'alembic revision --autogenerate'. We
extend this to allow us to hook into the autogeneration process and
ensure we only spit out directives for the relevant phase.

While we're here, we open up the Bobcat DB branch. This is similar to
what Neutron do (e.g. change I13ba740d245a46c41a969ff198e08ddff896eb1a).
Documentation will follow.

Change-Id: I17c9ff9508c5e2bd9521c18973af093d7550ab5a
Signed-off-by: Stephen Finucane <stephenfin@redhat.com>
This commit is contained in:
Stephen Finucane 2022-01-21 18:29:37 +00:00 committed by Stephen Finucane
parent 5e9f32469e
commit 1bcf8cee0d
8 changed files with 569 additions and 9 deletions

View File

@ -61,7 +61,10 @@ apidoc_excluded_paths = [
# TODO(gmann): with new release of SQLAlchemy(1.4.27) TypeDecorator used
# in common/sql/core.py file started failing. Remove this oncethe issue of
# TypeDecorator is fixed.
'common/sql/core.py']
'common/sql/core.py',
'common/sql/migrations/*',
'common/sql/migrations',
]
apidoc_separate_modules = True
# sphinxcontrib.seqdiag options
@ -88,7 +91,7 @@ source_suffix = '.rst'
master_doc = 'index'
# General information about the project.
copyright = '2012, OpenStack Foundation'
copyright = '2012-Present, OpenInfra Foundation'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.

View File

@ -0,0 +1,131 @@
# Copyright (c) 2015 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from alembic.operations import ops
from alembic.util import Dispatcher
from alembic.util import rev_id as new_rev_id
from keystone.common.sql import upgrades
from keystone.i18n import _
_ec_dispatcher = Dispatcher()
def process_revision_directives(context, revision, directives):
directives[:] = list(_assign_directives(context, directives))
def _assign_directives(context, directives, phase=None):
for directive in directives:
decider = _ec_dispatcher.dispatch(directive)
if phase is None:
phases = upgrades.MIGRATION_BRANCHES
else:
phases = (phase,)
for phase in phases:
decided = decider(context, directive, phase)
if decided:
yield decided
@_ec_dispatcher.dispatch_for(ops.MigrationScript)
def _migration_script_ops(context, directive, phase):
"""Generate a new ops.MigrationScript() for a given phase.
E.g. given an ops.MigrationScript() directive from a vanilla autogenerate
and an expand/contract phase name, produce a new ops.MigrationScript()
which contains only those sub-directives appropriate to "expand" or
"contract". Also ensure that the branch directory exists and that
the correct branch labels/depends_on/head revision are set up.
"""
autogen_kwargs = {}
version_path = upgrades.get_version_branch_path(
release=upgrades.CURRENT_RELEASE,
branch=phase,
)
upgrades.check_bootstrap_new_branch(phase, version_path, autogen_kwargs)
op = ops.MigrationScript(
new_rev_id(),
ops.UpgradeOps(
ops=list(
_assign_directives(context, directive.upgrade_ops.ops, phase)
)
),
ops.DowngradeOps(ops=[]),
message=directive.message,
**autogen_kwargs
)
if not op.upgrade_ops.is_empty():
return op
@_ec_dispatcher.dispatch_for(ops.AddConstraintOp)
@_ec_dispatcher.dispatch_for(ops.CreateIndexOp)
@_ec_dispatcher.dispatch_for(ops.CreateTableOp)
@_ec_dispatcher.dispatch_for(ops.AddColumnOp)
def _expands(context, directive, phase):
if phase == 'expand':
return directive
else:
return None
@_ec_dispatcher.dispatch_for(ops.DropConstraintOp)
@_ec_dispatcher.dispatch_for(ops.DropIndexOp)
@_ec_dispatcher.dispatch_for(ops.DropTableOp)
@_ec_dispatcher.dispatch_for(ops.DropColumnOp)
def _contracts(context, directive, phase):
if phase == 'contract':
return directive
else:
return None
@_ec_dispatcher.dispatch_for(ops.AlterColumnOp)
def _alter_column(context, directive, phase):
is_expand = phase == 'expand'
if is_expand and directive.modify_nullable is True:
return directive
elif not is_expand and directive.modify_nullable is False:
return directive
else:
# TODO(stephenfin): This logic is taken from neutron but I don't think
# it's correct. As-is, this prevents us from auto-generating migrations
# that change the nullable value of a field since the modify_nullable
# value will be either True or False and we run through both expand and
# contract phases so it'll fail one of the above checks. However,
# setting nullable=True is clearly an expand operation (it makes the
# database more permissive) and the opposite is also true. As such,
# shouldn't we simply emit the directive if we're in the relevant phase
# and skip otherwise? This is only left because zzzeek wrote that
# neutron code and I'm sure he had good reason for this.
msg = _(
"Don't know if operation is an expand or contract at the moment: "
"%s"
)
raise NotImplementedError(msg % directive)
@_ec_dispatcher.dispatch_for(ops.ModifyTableOps)
def _modify_table_ops(context, directive, phase):
op = ops.ModifyTableOps(
directive.table_name,
ops=list(_assign_directives(context, directive.ops, phase)),
schema=directive.schema,
)
if not op.is_empty():
return op

View File

@ -16,20 +16,144 @@ from alembic import context
from sqlalchemy import engine_from_config
from sqlalchemy import pool
from keystone.common.sql import core
from keystone.common.sql.migrations import autogen
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging unless we're told not to.
# This line sets up loggers basically.
# interpret the config file for Python logging unless we're told not to;
# this line sets up loggers basically.
if config.attributes.get('configure_logger', True):
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = None
# keystone model MetaData object
target_metadata = core.ModelBase.metadata
def include_object(object, name, type_, reflected, compare_to):
BORKED_COLUMNS = (
# nullable values are incorrect
('credential', 'encrypted_blob'),
('credential', 'key_hash'),
('federated_user', 'user_id'),
('federated_user', 'idp_id'),
('local_user', 'user_id'),
('nonlocal_user', 'user_id'),
('password', 'local_user_id'),
# default values are incorrect
('password', 'created_at_int'),
('password', 'self_service'),
('project', 'is_domain'),
('service_provider', 'relay_state_prefix'),
)
BORKED_UNIQUE_CONSTRAINTS = (
# removed constraints
('project_tag', ['project_id', 'name']),
(
'trust',
[
'trustor_user_id',
'trustee_user_id',
'project_id',
'impersonation',
'expires_at',
],
),
# added constraints
('access_rule', ['external_id']),
(
'trust',
[
'trustor_user_id',
'trustee_user_id',
'project_id',
'impersonation',
'expires_at',
'expires_at_int',
],
),
)
BORKED_FK_CONSTRAINTS = (
# removed fks
('application_credential_access_rule', ['access_rule_id']),
('limit', ['registered_limit_id']),
('registered_limit', ['service_id']),
('registered_limit', ['region_id']),
('endpoint', ['region_id']),
# added fks
('application_credential_access_rule', ['access_rule_id']),
('endpoint', ['region_id']),
('assignment', ['role_id']),
)
BORKED_INDEXES = (
# removed indexes
('access_rule', ['external_id']),
('access_rule', ['user_id']),
('revocation_event', ['revoked_at']),
('system_assignment', ['actor_id']),
('user', ['default_project_id']),
# added indexes
('access_rule', ['external_id']),
('access_rule', ['user_id']),
('access_token', ['consumer_id']),
('endpoint', ['service_id']),
('revocation_event', ['revoked_at']),
('user', ['default_project_id']),
('user_group_membership', ['group_id']),
(
'trust',
[
'trustor_user_id',
'trustee_user_id',
'project_id',
'impersonation',
'expires_at',
'expires_at_int',
],
),
)
# NOTE(stephenfin): By skipping these items, we skip *all* changes to the
# affected item. However, we only want to skip the actual things we know
# about untl we have enough time to fix them. These issues are listed in
# keystone.tests.unit.common.sql.test_upgrades.KeystoneModelsMigrationsSync
# However, this isn't a bug issues since the test is more specific and will
# catch other issues and anyone making changes to the columns and hoping to
# autogenerate them would need to fix the latent issue first anyway.
if type_ == 'column':
return (object.table.name, name) not in BORKED_COLUMNS
if type_ == 'unique_constraint':
columns = [c.name for c in object.columns]
return (object.table.name, columns) not in BORKED_UNIQUE_CONSTRAINTS
if type_ == 'foreign_key_constraint':
columns = [c.name for c in object.columns]
return (object.table.name, columns) not in BORKED_FK_CONSTRAINTS
if type_ == 'index':
columns = [c.name for c in object.columns]
return (object.table.name, columns) not in BORKED_INDEXES
return True
def include_name(name, type_, parent_names):
"""Determine which tables or columns to skip.
This is used where we have migrations that are out-of-sync with the models.
"""
REMOVED_TABLES = ('token',)
if type_ == 'table':
return name not in REMOVED_TABLES
return True
def run_migrations_offline():
@ -45,6 +169,9 @@ def run_migrations_offline():
context.configure(
url=url,
target_metadata=target_metadata,
render_as_batch=True,
include_name=include_name,
include_object=include_object,
literal_binds=True,
dialect_opts={"paramstyle": "named"},
)
@ -58,6 +185,12 @@ def run_migrations_online():
In this scenario we need to create an Engine and associate a connection
with the context.
This is modified from the default based on the below, since we want to
share an engine when unit testing so in-memory database testing actually
works.
https://alembic.sqlalchemy.org/en/latest/cookbook.html#connection-sharing
"""
connectable = config.attributes.get('connection', None)
@ -77,6 +210,9 @@ def run_migrations_online():
connection=connection,
target_metadata=target_metadata,
render_as_batch=True,
include_name=include_name,
include_object=include_object,
process_revision_directives=autogen.process_revision_directives,
)
with context.begin_transaction():

View File

@ -0,0 +1,258 @@
#!/usr/bin/env python3
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from alembic import command as alembic_command
from alembic import script as alembic_script
from alembic import util as alembic_util
from oslo_config import cfg
from oslo_log import log
import pbr.version
from keystone.common import sql
from keystone.common.sql import upgrades
import keystone.conf
from keystone.i18n import _
# We need to import all of these so the tables are registered. It would be
# easier if these were all in a central location :(
import keystone.application_credential.backends.sql # noqa: F401
import keystone.assignment.backends.sql # noqa: F401
import keystone.assignment.role_backends.sql_model # noqa: F401
import keystone.catalog.backends.sql # noqa: F401
import keystone.credential.backends.sql # noqa: F401
import keystone.endpoint_policy.backends.sql # noqa: F401
import keystone.federation.backends.sql # noqa: F401
import keystone.identity.backends.sql_model # noqa: F401
import keystone.identity.mapping_backends.sql # noqa: F401
import keystone.limit.backends.sql # noqa: F401
import keystone.oauth1.backends.sql # noqa: F401
import keystone.policy.backends.sql # noqa: F401
import keystone.resource.backends.sql_model # noqa: F401
import keystone.resource.config_backends.sql # noqa: F401
import keystone.revoke.backends.sql # noqa: F401
import keystone.trust.backends.sql # noqa: F401
CONF = keystone.conf.CONF
LOG = log.getLogger(__name__)
def do_alembic_command(config, cmd, revision=None, **kwargs):
args = []
if revision:
args.append(revision)
try:
getattr(alembic_command, cmd)(config, *args, **kwargs)
except alembic_util.CommandError as e:
alembic_util.err(str(e))
def do_generic_show(config, cmd):
kwargs = {'verbose': CONF.command.verbose}
do_alembic_command(config, cmd, **kwargs)
def do_validate(config, cmd):
do_alembic_command(config, 'branches')
# TODO(stephenfin): Implement these
# validate_revisions(config)
# TODO(stephenfin): Implement these
# validate_head_files(config)
def _find_milestone_revisions(config, milestone, branch=None):
"""Return the revision(s) for a given milestone."""
script = alembic_script.ScriptDirectory.from_config(config)
return [
(m.revision, label)
for m in _get_revisions(script)
for label in (m.branch_labels or [None])
if milestone in getattr(m.module, 'keystone_milestone', [])
and (branch is None or branch in m.branch_labels)
]
def _get_revisions(script):
return list(script.walk_revisions(base='base', head='heads'))
def do_upgrade(config, cmd):
branch = None
if (CONF.command.revision or CONF.command.delta) and (
CONF.command.expand or CONF.command.contract
):
msg = _('Phase upgrade options do not accept revision specification')
raise SystemExit(msg)
if CONF.command.expand:
branch = upgrades.EXPAND_BRANCH
revision = f'{upgrades.EXPAND_BRANCH}@head'
elif CONF.command.contract:
branch = upgrades.CONTRACT_BRANCH
revision = f'{upgrades.CONTRACT_BRANCH}@head'
elif not CONF.command.revision and not CONF.command.delta:
msg = _('You must provide a revision or relative delta')
raise SystemExit(msg)
else:
revision = CONF.command.revision or ''
if '-' in revision:
msg = _('Negative relative revision (downgrade) not supported')
raise SystemExit(msg)
delta = CONF.command.delta
if delta:
if '+' in revision:
msg = _('Use either --delta or relative revision, not both')
raise SystemExit(msg)
if delta < 0:
msg = _('Negative delta (downgrade) not supported')
raise SystemExit(msg)
revision = '%s+%d' % (revision, delta)
# leave branchless 'head' revision request backward compatible by
# applying all heads in all available branches.
if revision == 'head':
revision = 'heads'
if revision in upgrades.MILESTONES:
expand_revisions = _find_milestone_revisions(
config,
revision,
upgrades.EXPAND_BRANCH,
)
contract_revisions = _find_milestone_revisions(
config,
revision,
upgrades.CONTRACT_BRANCH,
)
# Expand revisions must be run before contract revisions
revisions = expand_revisions + contract_revisions
else:
revisions = [(revision, branch)]
for revision, branch in revisions:
# if not CONF.command.sql:
# run_sanity_checks(config, revision)
do_alembic_command(
config,
cmd,
revision=revision,
sql=CONF.command.sql,
)
def do_revision(config, cmd):
kwargs = {
'message': CONF.command.message,
'autogenerate': CONF.command.autogenerate,
'sql': CONF.command.sql,
}
branches = []
if CONF.command.expand:
kwargs['head'] = 'expand@head'
branches.append(upgrades.EXPAND_BRANCH)
elif CONF.command.contract:
kwargs['head'] = 'contract@head'
branches.append(upgrades.CONTRACT_BRANCH)
else:
branches = upgrades.MIGRATION_BRANCHES
if not CONF.command.autogenerate:
for branch in branches:
args = copy.copy(kwargs)
version_path = upgrades.get_version_branch_path(
release=upgrades.CURRENT_RELEASE,
branch=branch,
)
upgrades.check_bootstrap_new_branch(branch, version_path, args)
do_alembic_command(config, cmd, **args)
else: # CONF.command.autogenerate
# autogeneration code will take care of enforcing proper directories
do_alembic_command(config, cmd, **kwargs)
# TODO(stephenfin): Implement these
# update_head_files(config)
def add_branch_options(parser):
group = parser.add_mutually_exclusive_group()
group.add_argument('--expand', action='store_true')
group.add_argument('--contract', action='store_true')
return group
def add_alembic_subparser(sub, cmd):
return sub.add_parser(cmd, help=getattr(alembic_command, cmd).__doc__)
def add_command_parsers(subparsers):
for name in ['current', 'history', 'branches', 'heads']:
parser = add_alembic_subparser(subparsers, name)
parser.set_defaults(func=do_generic_show)
parser.add_argument(
'--verbose',
action='store_true',
help='Display more verbose output for the specified command',
)
parser = add_alembic_subparser(subparsers, 'upgrade')
parser.add_argument('--delta', type=int)
parser.add_argument('--sql', action='store_true')
parser.add_argument('revision', nargs='?')
add_branch_options(parser)
parser.set_defaults(func=do_upgrade)
parser = subparsers.add_parser(
'validate',
help=alembic_command.branches.__doc__ + ' and validate head file',
)
parser.set_defaults(func=do_validate)
parser = add_alembic_subparser(subparsers, 'revision')
parser.add_argument('-m', '--message')
parser.add_argument('--sql', action='store_true')
group = add_branch_options(parser)
group.add_argument('--autogenerate', action='store_true')
parser.set_defaults(func=do_revision)
command_opt = cfg.SubCommandOpt(
'command',
title='Command',
help=_('Available commands'),
handler=add_command_parsers,
)
def main():
CONF.register_cli_opt(command_opt)
keystone.conf.configure()
sql.initialize()
CONF(
project='keystone',
version=pbr.version.VersionInfo('keystone').version_string(),
)
config = upgrades.get_alembic_config()
return bool(CONF.command.func(config, CONF.command.name))
if __name__ == '__main__':
main()

View File

@ -21,6 +21,8 @@ Create Date: 2022-01-21 00:00:00.000000
revision = 'e25ffa003242'
down_revision = '27e647c0fad4'
branch_labels = ('contract',)
# milestone identifier
keystone_milestone = ['yoga']
def upgrade():

View File

@ -21,6 +21,8 @@ Create Date: 2022-01-21 00:00:00.000000
revision = '29e87d24a316'
down_revision = '27e647c0fad4'
branch_labels = ('expand',)
# milestone identifier
keystone_milestone = ['yoga']
def upgrade():

View File

@ -22,6 +22,7 @@ from alembic import migration as alembic_migration
from alembic import script as alembic_script
from oslo_db import exception as db_exception
from oslo_log import log as logging
from oslo_utils import fileutils
from keystone.common import sql
import keystone.conf
@ -37,7 +38,13 @@ CONTRACT_BRANCH = 'contract'
RELEASES = (
'yoga',
'bobcat',
)
MILESTONES = (
'yoga',
# Do not add the milestone until the end of the release
)
CURRENT_RELEASE = RELEASES[-1]
MIGRATION_BRANCHES = (EXPAND_BRANCH, CONTRACT_BRANCH)
VERSIONS_PATH = os.path.join(
os.path.dirname(sql.__file__),
@ -46,6 +53,23 @@ VERSIONS_PATH = os.path.join(
)
def get_version_branch_path(release=None, branch=None):
"""Get the path to a version branch."""
version_path = VERSIONS_PATH
if branch and release:
return os.path.join(version_path, release, branch)
return version_path
def check_bootstrap_new_branch(branch, version_path, addn_kwargs):
"""Bootstrap a new migration branch if it does not exist."""
addn_kwargs['version_path'] = version_path
addn_kwargs['head'] = f'{branch}@head'
if not os.path.exists(version_path):
# Bootstrap initial directory structure
fileutils.ensure_tree(version_path, mode=0o755)
def _find_alembic_conf():
"""Get the project's alembic configuration.
@ -75,6 +99,10 @@ def _find_alembic_conf():
return config
def get_alembic_config():
return _find_alembic_conf()
def _get_current_heads(engine, config):
script = alembic_script.ScriptDirectory.from_config(config)