sql: Remove dead code
Change I41584c652ab34a267009136ee2a2e159ee2f2a6e ("sql: Squash mitaka migrations") removed the final users of the 'get_constraints_names', 'add_constraints' and 'remove_constraints' functions. Change I96cab42cfcfd3e86b53f25abf4cf4043af3b5667 ("sql: Squash ocata migrations") removed the final use of the 'USE_TRIGGERS' constant. Change I59882d88fe593ec1ae37415b2157584f7f3c85f8 ("sql: Remove legacy 'migrate_repo' migration repo") removed the final use of '_assert_not_schema_downgrade' function. Change-Id: I8aa811ea336e9f613300bc21125e7582010cf5a5 Signed-off-by: Stephen Finucane <stephenfin@redhat.com>
This commit is contained in:
parent
9f42c5ad6a
commit
15847926ef
@ -17,17 +17,12 @@
|
|||||||
import os
|
import os
|
||||||
|
|
||||||
import migrate
|
import migrate
|
||||||
from migrate import exceptions
|
|
||||||
from migrate.versioning import api as versioning_api
|
from migrate.versioning import api as versioning_api
|
||||||
from oslo_db import exception as db_exception
|
from oslo_db import exception as db_exception
|
||||||
from oslo_db.sqlalchemy import migration
|
from oslo_db.sqlalchemy import migration
|
||||||
import sqlalchemy
|
|
||||||
|
|
||||||
from keystone.common import sql
|
from keystone.common import sql
|
||||||
from keystone import exception
|
from keystone import exception
|
||||||
from keystone.i18n import _
|
|
||||||
|
|
||||||
USE_TRIGGERS = True
|
|
||||||
|
|
||||||
INITIAL_VERSION = 72
|
INITIAL_VERSION = 72
|
||||||
EXPAND_REPO = 'expand_repo'
|
EXPAND_REPO = 'expand_repo'
|
||||||
@ -71,58 +66,6 @@ class Repository(object):
|
|||||||
session.get_bind(), self.repo_path, self.min_version)
|
session.get_bind(), self.repo_path, self.min_version)
|
||||||
|
|
||||||
|
|
||||||
# Different RDBMSs use different schemes for naming the Foreign Key
|
|
||||||
# Constraints. SQLAlchemy does not yet attempt to determine the name
|
|
||||||
# for the constraint, and instead attempts to deduce it from the column.
|
|
||||||
# This fails on MySQL.
|
|
||||||
def get_constraints_names(table, column_name):
|
|
||||||
fkeys = [fk.name for fk in table.constraints
|
|
||||||
if (isinstance(fk, sqlalchemy.ForeignKeyConstraint) and
|
|
||||||
column_name in fk.columns)]
|
|
||||||
return fkeys
|
|
||||||
|
|
||||||
|
|
||||||
# remove_constraints and add_constraints both accept a list of dictionaries
|
|
||||||
# that contain:
|
|
||||||
# {'table': a sqlalchemy table. The constraint is added to dropped from
|
|
||||||
# this table.
|
|
||||||
# 'fk_column': the name of a column on the above table, The constraint
|
|
||||||
# is added to or dropped from this column
|
|
||||||
# 'ref_column':a sqlalchemy column object. This is the reference column
|
|
||||||
# for the constraint.
|
|
||||||
def remove_constraints(constraints):
|
|
||||||
for constraint_def in constraints:
|
|
||||||
constraint_names = get_constraints_names(constraint_def['table'],
|
|
||||||
constraint_def['fk_column'])
|
|
||||||
for constraint_name in constraint_names:
|
|
||||||
migrate.ForeignKeyConstraint(
|
|
||||||
columns=[getattr(constraint_def['table'].c,
|
|
||||||
constraint_def['fk_column'])],
|
|
||||||
refcolumns=[constraint_def['ref_column']],
|
|
||||||
name=constraint_name).drop()
|
|
||||||
|
|
||||||
|
|
||||||
def add_constraints(constraints):
|
|
||||||
for constraint_def in constraints:
|
|
||||||
|
|
||||||
if constraint_def['table'].kwargs.get('mysql_engine') == 'MyISAM':
|
|
||||||
# Don't try to create constraint when using MyISAM because it's
|
|
||||||
# not supported.
|
|
||||||
continue
|
|
||||||
|
|
||||||
ref_col = constraint_def['ref_column']
|
|
||||||
ref_engine = ref_col.table.kwargs.get('mysql_engine')
|
|
||||||
if ref_engine == 'MyISAM':
|
|
||||||
# Don't try to create constraint when using MyISAM because it's
|
|
||||||
# not supported.
|
|
||||||
continue
|
|
||||||
|
|
||||||
migrate.ForeignKeyConstraint(
|
|
||||||
columns=[getattr(constraint_def['table'].c,
|
|
||||||
constraint_def['fk_column'])],
|
|
||||||
refcolumns=[constraint_def['ref_column']]).create()
|
|
||||||
|
|
||||||
|
|
||||||
def find_repo(repo_name):
|
def find_repo(repo_name):
|
||||||
"""Return the absolute path to the named repository."""
|
"""Return the absolute path to the named repository."""
|
||||||
path = os.path.abspath(os.path.join(
|
path = os.path.abspath(os.path.join(
|
||||||
@ -146,19 +89,6 @@ def _sync_repo(repo_name):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def _assert_not_schema_downgrade(version=None):
|
|
||||||
if version is not None:
|
|
||||||
try:
|
|
||||||
current_ver = int(str(get_db_version()))
|
|
||||||
if int(version) < current_ver:
|
|
||||||
raise migration.exception.DBMigrationError(
|
|
||||||
_("Unable to downgrade schema"))
|
|
||||||
except exceptions.DatabaseNotControlledError: # nosec
|
|
||||||
# NOTE(morganfainberg): The database is not controlled, this action
|
|
||||||
# cannot be a downgrade.
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def offline_sync_database_to_version(version=None):
|
def offline_sync_database_to_version(version=None):
|
||||||
"""Perform and off-line sync of the database.
|
"""Perform and off-line sync of the database.
|
||||||
|
|
||||||
@ -170,17 +100,7 @@ def offline_sync_database_to_version(version=None):
|
|||||||
version. Downgrading is not supported. If version is specified, then only
|
version. Downgrading is not supported. If version is specified, then only
|
||||||
the main database migration is carried out - and the expand, migration and
|
the main database migration is carried out - and the expand, migration and
|
||||||
contract phases will NOT be run.
|
contract phases will NOT be run.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
global USE_TRIGGERS
|
|
||||||
|
|
||||||
# This flags let's us bypass trigger setup & teardown for non-rolling
|
|
||||||
# upgrades. We set this as a global variable immediately before handing off
|
|
||||||
# to sqlalchemy-migrate, because we can't pass arguments directly to
|
|
||||||
# migrations that depend on it. We could also register this as a CONF
|
|
||||||
# option, but the idea here is that we aren't exposing a new API.
|
|
||||||
USE_TRIGGERS = False
|
|
||||||
|
|
||||||
if version:
|
if version:
|
||||||
raise Exception('Specifying a version is no longer supported')
|
raise Exception('Specifying a version is no longer supported')
|
||||||
|
|
||||||
@ -241,7 +161,6 @@ def expand_schema():
|
|||||||
|
|
||||||
This is run manually by the keystone-manage command before the first
|
This is run manually by the keystone-manage command before the first
|
||||||
keystone node is migrated to the latest release.
|
keystone node is migrated to the latest release.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
validate_upgrade_order(EXPAND_REPO)
|
validate_upgrade_order(EXPAND_REPO)
|
||||||
_sync_repo(repo_name=EXPAND_REPO)
|
_sync_repo(repo_name=EXPAND_REPO)
|
||||||
@ -252,7 +171,6 @@ def migrate_data():
|
|||||||
|
|
||||||
This is run manually by the keystone-manage command once the keystone
|
This is run manually by the keystone-manage command once the keystone
|
||||||
schema has been expanded for the new release.
|
schema has been expanded for the new release.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
validate_upgrade_order(DATA_MIGRATION_REPO)
|
validate_upgrade_order(DATA_MIGRATION_REPO)
|
||||||
_sync_repo(repo_name=DATA_MIGRATION_REPO)
|
_sync_repo(repo_name=DATA_MIGRATION_REPO)
|
||||||
@ -264,7 +182,6 @@ def contract_schema():
|
|||||||
This is run manually by the keystone-manage command once the keystone
|
This is run manually by the keystone-manage command once the keystone
|
||||||
nodes have been upgraded to the latest release and will remove any old
|
nodes have been upgraded to the latest release and will remove any old
|
||||||
tables/columns that are no longer required.
|
tables/columns that are no longer required.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
validate_upgrade_order(CONTRACT_REPO)
|
validate_upgrade_order(CONTRACT_REPO)
|
||||||
_sync_repo(repo_name=CONTRACT_REPO)
|
_sync_repo(repo_name=CONTRACT_REPO)
|
||||||
|
Loading…
Reference in New Issue
Block a user