Merge "Remove downgrade functions"

This commit is contained in:
Jenkins 2015-04-24 08:47:52 +00:00 committed by Gerrit Code Review
commit b47c4fc189
48 changed files with 0 additions and 957 deletions

View File

@ -158,8 +158,3 @@ def upgrade(migrate_engine):
# to the previously existing state.
meta.drop_all(tables=tables[:index])
raise
def downgrade(migrate_engine):
raise NotImplementedError('Database downgrade not supported - '
'would drop all tables')

View File

@ -20,14 +20,3 @@ def upgrade(migrate_engine):
stack = sqlalchemy.Table('stack', meta, autoload=True)
stack.c.timeout.alter(nullable=True)
def downgrade(migrate_engine):
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
stack = sqlalchemy.Table('stack', meta, autoload=True)
# NOTE(viktors): We must be sure, that there are no nullable columns in
# `stack` table before we alter it.
migrate_engine.execute('UPDATE stack set timeout=60 WHERE timeout IS NULL')
stack.c.timeout.alter(nullable=False)

View File

@ -24,12 +24,3 @@ def upgrade(migrate_engine):
# action is e.g "CREATE" and status is e.g "IN_PROGRESS"
event.c.name.alter(name='resource_status')
sqlalchemy.Column('resource_action', sqlalchemy.String(255)).create(event)
def downgrade(migrate_engine):
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
event = sqlalchemy.Table('event', meta, autoload=True)
event.c.resource_status.alter(name='name')
event.c.resource_action.drop()

View File

@ -23,15 +23,3 @@ def upgrade(migrate_engine):
resource.c.id.alter(sqlalchemy.String(36), primary_key=True,
default=lambda: str(uuid.uuid4()))
def downgrade(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
resource = sqlalchemy.Table('resource', meta, autoload=True)
try:
resource.c.id.alter(sqlalchemy.Integer, primary_key=True)
except Exception:
# NOTE(sshturm): since there is no way to downgrade just passing
pass

View File

@ -25,12 +25,3 @@ def upgrade(migrate_engine):
action.create(resource)
resource.c.state.alter(name='status')
resource.c.state_description.alter(name='status_reason')
def downgrade(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
resource = sqlalchemy.Table('resource', meta, autoload=True)
resource.c.action.drop()
resource.c.status.alter(name='state')
resource.c.status_reason.alter(name='state_description')

View File

@ -22,10 +22,3 @@ def upgrade(migrate_engine):
action = sqlalchemy.Column('action',
sqlalchemy.String(length=255))
action.create(stack)
def downgrade(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
stack = sqlalchemy.Table('stack', meta, autoload=True)
stack.c.action.drop()

View File

@ -38,11 +38,3 @@ def upgrade(migrate_engine):
)
sqlalchemy.Table('resource', meta, autoload=True)
resource_data.create()
def downgrade(migrate_engine):
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
resource_data = sqlalchemy.Table('resource_data', meta, autoload=True)
resource_data.drop()

View File

@ -19,34 +19,3 @@ def upgrade(migrate_engine):
stack = sqlalchemy.Table('stack', meta, autoload=True)
sqlalchemy.Column('deleted_at', sqlalchemy.DateTime).create(stack)
def downgrade(migrate_engine):
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
stack = sqlalchemy.Table('stack', meta, autoload=True)
event = sqlalchemy.Table('event', meta, autoload=True)
user_creds = sqlalchemy.Table('user_creds', meta, autoload=True)
raw_template = sqlalchemy.Table('raw_template', meta, autoload=True)
# Remove soft deleted data
not_deleted = None
stmt = sqlalchemy.select(
[stack.c.id,
stack.c.raw_template_id,
stack.c.user_creds_id]
).where(stack.c.deleted_at != not_deleted)
deleted_stacks = migrate_engine.execute(stmt)
for s in deleted_stacks:
event_del = event.delete().where(event.c.stack_id == s[0])
migrate_engine.execute(event_del)
stack_del = stack.delete().where(stack.c.id == s[0])
migrate_engine.execute(stack_del)
raw_template_del = raw_template.delete(
).where(raw_template.c.id == s[1])
migrate_engine.execute(raw_template_del)
user_creds_del = user_creds.delete().where(user_creds.c.id == s[2])
migrate_engine.execute(user_creds_del)
stack.c.deleted_at.drop()

View File

@ -13,7 +13,6 @@
import sqlalchemy
from sqlalchemy.dialects import mysql
from sqlalchemy import types as sqltypes
def upgrade(migrate_engine):
@ -23,12 +22,3 @@ def upgrade(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
raw_template = sqlalchemy.Table('raw_template', meta, autoload=True)
raw_template.c.template.alter(type=mysql.LONGTEXT())
def downgrade(migrate_engine):
if migrate_engine.name != 'mysql':
return
meta = sqlalchemy.MetaData(bind=migrate_engine)
raw_template = sqlalchemy.Table('raw_template', meta, autoload=True)
raw_template.c.template.alter(type=sqltypes.TEXT())

View File

@ -19,10 +19,3 @@ def upgrade(migrate_engine):
meta.bind = migrate_engine
event = sqlalchemy.Table('event', meta, autoload=True)
event.c.logical_resource_id.alter(name='resource_name')
def downgrade(migrate_engine):
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
event = sqlalchemy.Table('event', meta, autoload=True)
event.c.resource_name.alter(name='logical_resource_id')

View File

@ -21,16 +21,3 @@ def upgrade(migrate_engine):
user_creds.c.service_user.drop()
user_creds.c.service_password.drop()
def downgrade(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
user_creds = sqlalchemy.Table('user_creds', meta, autoload=True)
service_user = sqlalchemy.Column('service_user',
sqlalchemy.String(length=255))
service_user.create(user_creds)
service_password = sqlalchemy.Column('service_password',
sqlalchemy.String(length=255))
service_password.create(user_creds)

View File

@ -21,16 +21,3 @@ def upgrade(migrate_engine):
user_creds.c.aws_creds.drop()
user_creds.c.aws_auth_url.drop()
def downgrade(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
user_creds = sqlalchemy.Table('user_creds', meta, autoload=True)
aws_creds = sqlalchemy.Column('aws_creds',
sqlalchemy.String(length=255))
aws_creds.create(user_creds)
aws_auth_url = sqlalchemy.Column('aws_auth_url',
sqlalchemy.String(length=255))
aws_auth_url.create(user_creds)

View File

@ -27,11 +27,3 @@ def upgrade(migrate_engine):
trust_id = sqlalchemy.Column('trust_id', sqlalchemy.String(length=255))
trustor_user_id.create(user_creds)
trust_id.create(user_creds)
def downgrade(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
user_creds = sqlalchemy.Table('user_creds', meta, autoload=True)
user_creds.c.trustor_user_id.drop()
user_creds.c.trust_id.drop()

View File

@ -13,7 +13,6 @@
import sqlalchemy
from sqlalchemy.dialects import mysql
from sqlalchemy import types as sqltypes
def upgrade(migrate_engine):
@ -33,22 +32,3 @@ def upgrade(migrate_engine):
watch_data = sqlalchemy.Table('watch_data', meta, autoload=True)
watch_data.c.data.alter(type=mysql.LONGTEXT())
def downgrade(migrate_engine):
if migrate_engine.name != 'mysql':
return
meta = sqlalchemy.MetaData(bind=migrate_engine)
stack = sqlalchemy.Table('stack', meta, autoload=True)
stack.c.parameters.alter(type=sqltypes.TEXT())
resource = sqlalchemy.Table('resource', meta, autoload=True)
resource.c.rsrc_metadata.alter(type=sqltypes.TEXT())
watch_rule = sqlalchemy.Table('watch_rule', meta, autoload=True)
watch_rule.c.rule.alter(type=sqltypes.TEXT())
watch_data = sqlalchemy.Table('watch_data', meta, autoload=True)
watch_data.c.data.alter(type=sqltypes.TEXT())

View File

@ -13,11 +13,8 @@
import uuid
from migrate.versioning import util as migrate_util
import sqlalchemy
from heat.common.i18n import _
def upgrade(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
@ -25,19 +22,3 @@ def upgrade(migrate_engine):
event = sqlalchemy.Table('event', meta, autoload=True)
event.c.id.alter(type=sqlalchemy.String(36), primary_key=True,
default=lambda: str(uuid.uuid4()))
def downgrade(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
event = sqlalchemy.Table('event', meta, autoload=True)
try:
event.c.id.alter(type=sqlalchemy.Integer, primary_key=True)
except Exception:
# NOTE(pafuent): since there is no way to downgrade just passing
# The same is did in 018_resource_id_uuid.py
migrate_util.log.warning(_('If you really want to downgrade to this '
'version, you should drop all the records.'
))
pass

View File

@ -30,8 +30,3 @@ def upgrade(migrate_engine):
resource = sqlalchemy.Table('resource', meta, autoload=True)
resource.c.id.alter(type=sqlalchemy.String(36), primary_key=True,
default=lambda: str(uuid.uuid4()))
def downgrade(migrate_engine):
# since uuid.uuid4() works so no need to do downgrade
pass

View File

@ -32,11 +32,3 @@ def upgrade(migrate_engine):
)
sqlalchemy.Table('stack', meta, autoload=True)
stack_lock.create()
def downgrade(migrate_engine):
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
stack_lock = sqlalchemy.Table('stack_lock', meta, autoload=True)
stack_lock.drop()

View File

@ -24,11 +24,3 @@ def upgrade(migrate_engine):
sqlalchemy.String(length=64),
default='heat_decrypt')
method.create(table)
def downgrade(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
for table in ('user_creds', 'resource_data'):
table = sqlalchemy.Table(table, meta, autoload=True)
table.c.decrypt_method.drop()

View File

@ -67,15 +67,3 @@ def upgrade(migrate_engine):
mysql_charset='utf8'
)
software_deployment.create()
def downgrade(migrate_engine):
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
software_deployment = sqlalchemy.Table(
'software_deployment', meta, autoload=True)
software_deployment.drop()
software_config = sqlalchemy.Table(
'software_config', meta, autoload=True)
software_config.drop()

View File

@ -23,11 +23,3 @@ def upgrade(migrate_engine):
raw_template = sqlalchemy.Table('raw_template', meta, autoload=True)
files = sqlalchemy.Column('files', types.Json, default={})
files.create(raw_template)
def downgrade(migrate_engine):
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
raw_template = sqlalchemy.Table('raw_template', meta, autoload=True)
raw_template.c.files.drop()

View File

@ -145,94 +145,3 @@ def upgrade_sqlite(migrate_engine):
prev_event_table.drop()
event_table.rename('event')
def downgrade(migrate_engine):
if migrate_engine.name == 'sqlite':
downgrade_sqlite(migrate_engine)
return
meta = sqlalchemy.MetaData(bind=migrate_engine)
event_table = sqlalchemy.Table('event', meta, autoload=True)
event_id_column_kwargs = {}
if migrate_engine.name == 'ibm_db_sa':
event_id_column_kwargs['nullable'] = False
event_id = sqlalchemy.Column('tmp_id', sqlalchemy.String(length=36),
default=lambda: str(uuid.uuid4),
**event_id_column_kwargs)
event_id.create(event_table)
event_list = event_table.select().execute()
for event in event_list:
values = {'tmp_id': event.uuid}
update = event_table.update().where(
event_table.c.uuid == event.uuid).values(values)
migrate_engine.execute(update)
event_table.c.id.drop()
event_table.c.uuid.drop()
cons = constraint.PrimaryKeyConstraint('tmp_id', table=event_table)
cons.create()
alter_kwargs = {}
# NOTE(mriedem): DB2 won't allow a primary key on a nullable column so
# we have to make it non-nullable.
if migrate_engine.name == 'ibm_db_sa':
alter_kwargs['nullable'] = False
event_table.c.tmp_id.alter('id', default=lambda: str(uuid.uuid4),
**alter_kwargs)
if migrate_engine.name == 'postgresql':
sequence = sqlalchemy.Sequence('evt')
sqlalchemy.schema.DropSequence(sequence, bind=migrate_engine).execute()
def downgrade_sqlite(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
# (pafuent) Here it isn't recommended to import the table from the models,
# because in future migrations the model could change and this migration
# could fail.
# I know it is ugly but it's the only way that I found to 'freeze'
# the model state for this migration.
stack_table = sqlalchemy.Table('stack', meta, autoload=True)
event_table = sqlalchemy.Table(
'new_event', meta,
sqlalchemy.Column('id', sqlalchemy.String(36),
default=lambda: str(uuid.uuid4())),
sqlalchemy.Column('stack_id', sqlalchemy.String(36),
sqlalchemy.ForeignKey(stack_table.c.id),
nullable=False),
sqlalchemy.Column('resource_action', sqlalchemy.String(255)),
sqlalchemy.Column('resource_status', sqlalchemy.String(255)),
sqlalchemy.Column('resource_name', sqlalchemy.String(255)),
sqlalchemy.Column('physical_resource_id', sqlalchemy.String(255)),
sqlalchemy.Column('resource_status_reason', sqlalchemy.String(255)),
sqlalchemy.Column('resource_type', sqlalchemy.String(255)),
sqlalchemy.Column('resource_properties', sqlalchemy.PickleType),
sqlalchemy.Column('created_at', sqlalchemy.DateTime,
default=timeutils.utcnow),
sqlalchemy.Column('updated_at', sqlalchemy.DateTime,
onupdate=timeutils.utcnow))
event_table.create()
prev_event_table = sqlalchemy.Table('event', meta, autoload=True)
event_list = prev_event_table.select().execute().fetchall()
for event in event_list:
values = {
'id': event.uuid,
'stack_id': event.stack_id,
'resource_action': event.resource_action,
'resource_status': event.resource_status,
'resource_name': event.resource_name,
'physical_resource_id': event.physical_resource_id,
'resource_status_reason': event.resource_status_reason,
'resource_type': event.resource_type,
'resource_properties': event.resource_properties}
migrate_engine.execute(event_table.insert(values))
prev_event_table.drop()
event_table.rename('event')

View File

@ -22,10 +22,3 @@ def upgrade(migrate_engine):
stack_user_project_id = sqlalchemy.Column('stack_user_project_id',
sqlalchemy.String(length=64))
stack_user_project_id.create(stack)
def downgrade(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
stack = sqlalchemy.Table('stack', meta, autoload=True)
stack.c.stack_user_project_id.drop()

View File

@ -11,13 +11,11 @@
# License for the specific language governing permissions and limitations
# under the License.
from migrate.versioning import util as migrate_util
from oslo_serialization import jsonutils
import six
import sqlalchemy
from sqlalchemy.orm import sessionmaker
from heat.common.i18n import _
from heat.engine.hot import parameters
@ -59,9 +57,3 @@ def upgrade(migrate_engine):
if changed:
_commit_schema(parameter, schema)
session.close()
def downgrade(migrate_engine):
migrate_util.log.warning(_('This version cannot be downgraded because '
'it involves a data migration to the '
'raw_template table.'))

View File

@ -21,12 +21,3 @@ def upgrade(migrate_engine):
software_config = sqlalchemy.Table('software_config', meta, autoload=True)
software_config.c.config.alter(type=types.Json)
software_config.c.io.drop()
def downgrade(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
software_config = sqlalchemy.Table('software_config', meta, autoload=True)
software_config.c.config.alter(type=types.LongText)
io = sqlalchemy.Column('io', types.Json)
io.create(software_config)

View File

@ -20,11 +20,3 @@ def upgrade(migrate_engine):
stack = sqlalchemy.Table('stack', meta, autoload=True)
stack.c.user_creds_id.alter(nullable=True)
def downgrade(migrate_engine):
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
stack = sqlalchemy.Table('stack', meta, autoload=True)
stack.c.user_creds_id.alter(nullable=False)

View File

@ -19,11 +19,3 @@ def upgrade(migrate_engine):
software_deployment = sqlalchemy.Table(
'software_deployment', meta, autoload=True)
software_deployment.c.signal_id.drop()
def downgrade(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
software_deployment = sqlalchemy.Table(
'software_deployment', meta, autoload=True)
signal_id = sqlalchemy.Column('signal_id', sqlalchemy.String(1024))
signal_id.create(software_deployment)

View File

@ -10,14 +10,11 @@
# License for the specific language governing permissions and limitations
# under the License.
from migrate.versioning import util as migrate_util
from oslo_serialization import jsonutils
import six
import sqlalchemy
from sqlalchemy.orm import sessionmaker
from heat.common.i18n import _
def upgrade(migrate_engine):
Session = sessionmaker(bind=migrate_engine)
@ -71,9 +68,3 @@ def upgrade(migrate_engine):
execute())
session.commit()
session.close()
def downgrade(migrate_engine):
migrate_util.log.warning(_('This version cannot be downgraded because '
'it involves a data migration to the '
'raw_template table.'))

View File

@ -22,10 +22,3 @@ def upgrade(migrate_engine):
stack_user_project_id = sqlalchemy.Column('stack_user_project_id',
sqlalchemy.String(length=64))
stack_user_project_id.create(stack)
def downgrade(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
stack = sqlalchemy.Table('software_deployment', meta, autoload=True)
stack.c.stack_user_project_id.drop()

View File

@ -12,13 +12,10 @@
import time
from migrate.versioning import util as migrate_util
from oslo_serialization import jsonutils
import sqlalchemy
from sqlalchemy.orm import sessionmaker
from heat.common.i18n import _
def upgrade(migrate_engine):
Session = sessionmaker(bind=migrate_engine)
@ -58,9 +55,3 @@ def upgrade(migrate_engine):
execute())
session.commit()
session.close()
def downgrade(migrate_engine):
migrate_util.log.warning(_('This version cannot be downgraded because '
'it involves a data migration to the '
'raw_template table.'))

View File

@ -43,11 +43,3 @@ def upgrade(migrate_engine):
)
sqlalchemy.Table('stack', meta, autoload=True)
snapshot.create()
def downgrade(migrate_engine):
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
snapshot = sqlalchemy.Table('snapshot', meta, autoload=True)
snapshot.drop()

View File

@ -13,8 +13,6 @@
import sqlalchemy
from heat.db.sqlalchemy import utils as migrate_utils
def upgrade(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
@ -35,33 +33,3 @@ def upgrade(migrate_engine):
update = stack.update().where(
stack.c.id == s.id).values(values)
migrate_engine.execute(update)
def downgrade(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
stack = sqlalchemy.Table('stack', meta, autoload=True)
if migrate_engine.name == 'sqlite':
_downgrade_045_sqlite(migrate_engine, meta, stack)
else:
stack.c.backup.drop()
def _downgrade_045_sqlite(migrate_engine, metadata, table):
table_name = table.name
constraints = [
c.copy() for c in table.constraints
if not isinstance(c, sqlalchemy.CheckConstraint)
]
columns = [c.copy() for c in table.columns if c.name != "backup"]
new_table = sqlalchemy.Table(table_name + "__tmp__", metadata,
*(columns + constraints))
new_table.create()
migrate_utils.migrate_data(migrate_engine,
table,
new_table,
['backup'])

View File

@ -23,10 +23,3 @@ def upgrade(migrate_engine):
resources = sqlalchemy.Table('resource', meta, autoload=True)
properties_data = sqlalchemy.Column('properties_data', heat_db_types.Json)
properties_data.create(resources)
def downgrade(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
table = sqlalchemy.Table('resource', meta, autoload=True)
table.c.properties_data.drop()

View File

@ -43,10 +43,3 @@ def upgrade(migrate_engine):
# Iterate over all top-level non nested stacks
for st in get_stacks(owner_id=None):
set_nested_depth(st, 0)
def downgrade(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
stack = sqlalchemy.Table('stack', meta, autoload=True)
stack.c.nested_depth.drop()

View File

@ -22,13 +22,3 @@ def upgrade(migrate_engine):
if migrate_engine.name == 'postgresql':
resource = sqlalchemy.Table('resource', meta)
resource.c.id.alter(server_default=None)
def downgrade(migrate_engine):
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
if migrate_engine.name == 'postgresql':
resource = sqlalchemy.Table('resource', meta, autoload=True)
resource.c.id.alter(
server_default=sqlalchemy.Sequence('resource_id_seq').next_value())

View File

@ -22,10 +22,3 @@ def upgrade(migrate_engine):
region_name = sqlalchemy.Column('region_name',
sqlalchemy.String(length=255))
region_name.create(user_creds)
def downgrade(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
user_creds = sqlalchemy.Table('user_creds', meta, autoload=True)
user_creds.c.region_name.drop()

View File

@ -22,10 +22,3 @@ def upgrade(migrate_engine):
stack = sqlalchemy.Table('stack', meta, autoload=True)
tags = sqlalchemy.Column('tags', heat_db_types.Json)
tags.create(stack)
def downgrade(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
stack = sqlalchemy.Table('stack', meta, autoload=True)
stack.c.tags.drop()

View File

@ -40,12 +40,3 @@ def upgrade(migrate_engine):
mysql_charset='utf8'
)
service.create()
def downgrade(migrate_engine):
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
service = sqlalchemy.Table(
'service', meta, autoload=True)
service.drop()

View File

@ -13,8 +13,6 @@
import sqlalchemy
from heat.db.sqlalchemy import utils as migrate_utils
def upgrade(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
@ -23,33 +21,3 @@ def upgrade(migrate_engine):
convergence = sqlalchemy.Column('convergence', sqlalchemy.Boolean,
default=False)
convergence.create(stack)
def downgrade(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
stack = sqlalchemy.Table('stack', meta, autoload=True)
if migrate_engine.name == 'sqlite':
_downgrade_052_sqlite(migrate_engine, meta, stack)
else:
stack.c.convergence.drop()
def _downgrade_052_sqlite(migrate_engine, metadata, table):
table_name = table.name
constraints = [
c.copy() for c in table.constraints
if not isinstance(c, sqlalchemy.CheckConstraint)
]
columns = [c.copy() for c in table.columns if c.name != "convergence"]
new_table = sqlalchemy.Table(table_name + "__tmp__", metadata,
*(columns + constraints))
new_table.create()
migrate_utils.migrate_data(migrate_engine,
table,
new_table,
['convergence'])

View File

@ -25,16 +25,3 @@ def upgrade(migrate_engine):
tenant_index = sqlalchemy.Index('ix_stack_tenant', stack.c.tenant,
mysql_length=255)
tenant_index.create(migrate_engine)
def downgrade(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
stack = sqlalchemy.Table('stack', meta, autoload=True)
name_index = sqlalchemy.Index('ix_stack_name', stack.c.name,
mysql_length=255)
name_index.drop(migrate_engine)
tenant_index = sqlalchemy.Index('ix_stack_tenant', stack.c.tenant,
mysql_length=255)
tenant_index.drop(migrate_engine)

View File

@ -13,8 +13,6 @@
import sqlalchemy
from heat.db.sqlalchemy import types as heat_db_types
def upgrade(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
@ -39,14 +37,3 @@ def upgrade(migrate_engine):
mysql_charset='utf8'
)
stack_tag.create()
def downgrade(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
stack = sqlalchemy.Table('stack', meta, autoload=True)
tags = sqlalchemy.Column('tags', heat_db_types.Json)
tags.create(stack)
stack_tag = sqlalchemy.Table('stack_tag', meta, autoload=True)
stack_tag.drop()

View File

@ -79,52 +79,6 @@ def _upgrade_sqlite(migrate_engine):
_add_indexes(migrate_engine, new_stack)
def downgrade(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
stack = sqlalchemy.Table('stack', meta, autoload=True)
if migrate_engine.name == 'sqlite':
_downgrade_sqlite(migrate_engine)
else:
raw_template = sqlalchemy.Table('raw_template', meta, autoload=True)
fkey = ForeignKeyConstraint(columns=[stack.c.prev_raw_template_id],
refcolumns=[raw_template.c.id],
name='prev_raw_template_ref')
fkey.drop()
stack.c.prev_raw_template_id.drop()
stack.c.current_traversal.drop()
stack.c.current_deps.drop()
def _downgrade_sqlite(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
stack = sqlalchemy.Table('stack', meta, autoload=True)
table_name = stack.name
# ignore CheckConstraints and FK Constraint on prev_raw_template_id.
ignorecols = [
stack.c.prev_raw_template_id.name,
stack.c.current_traversal.name,
stack.c.current_deps.name,
]
ignorecons = [
'prev_raw_template_ref',
]
new_stack = migrate_utils.clone_table(table_name + '__tmp__', stack, meta,
ignorecols=ignorecols,
ignorecons=ignorecons)
migrate_utils.migrate_data(migrate_engine,
stack,
new_stack,
['prev_raw_template_id',
'current_traversal',
'current_deps'])
# add the indexes back to new table
_add_indexes(migrate_engine, new_stack)
def _add_indexes(migrate_engine, stack):
name_index = sqlalchemy.Index('ix_stack_name',
stack.c.name,

View File

@ -102,79 +102,6 @@ def upgrade_sqlite(migrate_engine):
new_template.rename('raw_template')
def downgrade(migrate_engine):
if migrate_engine.name == 'sqlite':
downgrade_sqlite(migrate_engine)
return
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
stack_table = sqlalchemy.Table('stack', meta, autoload=True)
parameters = sqlalchemy.Column('parameters', heat_db_types.Json)
parameters.create(stack_table)
tmpl_table = sqlalchemy.Table('raw_template', meta, autoload=True)
update_query = stack_table.update().values(
parameters=sqlalchemy.select([tmpl_table.c.environment]).
where(stack_table.c.raw_template_id == tmpl_table.c.id).
as_scalar())
migrate_engine.execute(update_query)
tmpl_table.c.environment.drop()
fkey = migrate.ForeignKeyConstraint(
columns=[tmpl_table.c.predecessor],
refcolumns=[tmpl_table.c.id],
name='predecessor_fkey_ref')
fkey.drop()
tmpl_table.c.predecessor.drop()
def downgrade_sqlite(migrate_engine):
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
stack_table = sqlalchemy.Table('stack', meta, autoload=True)
newcols = [sqlalchemy.Column('parameters', heat_db_types.Json)]
new_stack = migrate_utils.clone_table('new_stack', stack_table,
meta, newcols=newcols)
tmpl_table = sqlalchemy.Table('raw_template', meta, autoload=True)
ignorecols = [tmpl_table.c.environment.name, tmpl_table.c.predecessor.name]
new_template = migrate_utils.clone_table('new_raw_template', tmpl_table,
meta, ignorecols=ignorecols)
# migrate stack data to new table
stacks = list(stack_table.select().order_by(
sqlalchemy.sql.expression.asc(stack_table.c.created_at))
.execute())
colnames = [c.name for c in stack_table.columns]
for stack in stacks:
values = dict(zip(colnames,
map(lambda colname: getattr(stack, colname),
colnames)))
migrate_engine.execute(new_stack.insert(values))
update_query = new_stack.update().values(
parameters=sqlalchemy.select([tmpl_table.c.environment]).
where(new_stack.c.raw_template_id == tmpl_table.c.id).
as_scalar())
migrate_engine.execute(update_query)
# migrate template data to new table
migrate_utils.migrate_data(migrate_engine,
tmpl_table,
new_template,
skip_columns=['environment', 'predecessor'])
stack_table.drop()
new_stack.rename('stack')
# add the indexes back to new table
_add_indexes(migrate_engine, new_stack)
def _add_indexes(migrate_engine, stack):
name_index = sqlalchemy.Index('ix_stack_name',
stack.c.name,

View File

@ -265,210 +265,3 @@ def upgrade_sqlite_resource(migrate_engine):
prev_res_table.drop()
res_table.rename('resource')
def downgrade(migrate_engine):
if migrate_engine.name == 'sqlite':
downgrade_sqlite_resource_data_pre(migrate_engine)
downgrade_sqlite_resource(migrate_engine)
else:
downgrade_resource_data_pre(migrate_engine)
downgrade_resource(migrate_engine)
downgrade_resource_data_post(migrate_engine)
def downgrade_resource_data_pre(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
rd_table = sqlalchemy.Table('resource_data', meta, autoload=True)
res_table = sqlalchemy.Table('resource', meta, autoload=True)
# remove foreignkey on resource_id
inspector = sqlalchemy.inspect(migrate_engine)
fkc_query = inspector.get_foreign_keys('resource_data')
if fkc_query:
fkc = ForeignKeyConstraint([rd_table.c.resource_id], [res_table.c.id],
fkc_query[0]['name'])
migrate_engine.execute(DropConstraint(fkc))
# rename resource_id -> tmp_res_id
rd_table.c.resource_id.alter(name='tmp_res_id')
# create the new resource_id column (no foreignkey yet)
res_id_column_kwargs = {}
if migrate_engine.name == 'ibm_db_sa':
# NOTE(mriedem): This is turned into a foreignkey key constraint
# later so it must be non-nullable.
res_id_column_kwargs['nullable'] = False
res_id = sqlalchemy.Column('resource_id', sqlalchemy.String(36),
**res_id_column_kwargs)
rd_table.create_column(res_id)
# reload metadata due to some strange behaviour of sqlalchemy
meta = sqlalchemy.MetaData(bind=migrate_engine)
rd_table = sqlalchemy.Table('resource_data', meta, autoload=True)
res_list = res_table.select().order_by(
sqlalchemy.sql.expression.asc(
res_table.c.created_at)).execute().fetchall()
for res in res_list:
values = {'resource_id': res.uuid}
update = rd_table.update().where(
rd_table.c.tmp_res_id == res.id).values(values)
migrate_engine.execute(update)
def downgrade_sqlite_resource_data_pre(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
sqlalchemy.Table('resource', meta, autoload=True)
rd_table = sqlalchemy.Table(
'new_resource_data', meta,
sqlalchemy.Column('id',
sqlalchemy.Integer,
primary_key=True,
nullable=False),
sqlalchemy.Column('created_at', sqlalchemy.DateTime),
sqlalchemy.Column('updated_at', sqlalchemy.DateTime),
sqlalchemy.Column('key', sqlalchemy.String(255)),
sqlalchemy.Column('value', sqlalchemy.Text),
sqlalchemy.Column('redact', sqlalchemy.Boolean),
sqlalchemy.Column('decrypt_method', sqlalchemy.String(64)),
sqlalchemy.Column('resource_id', sqlalchemy.Integer,
nullable=False),
sqlalchemy.Column('tmp_res_id', sqlalchemy.Integer,
nullable=False))
rd_table.create()
prev_rd_table = sqlalchemy.Table('resource_data', meta, autoload=True)
rd_list = list(prev_rd_table.select().order_by(
sqlalchemy.sql.expression.asc(prev_rd_table.c.created_at))
.execute())
for rd in rd_list:
values = {'key': rd.key,
'value': rd.value,
'redact': rd.redact,
'decrypt_method': rd.decrypt_method,
'resource_id': "foo",
'tmp_res_id': rd.resource_id}
migrate_engine.execute(rd_table.insert(values))
prev_rd_table.drop()
rd_table.rename('resource_data')
def downgrade_resource(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
res_table = sqlalchemy.Table('resource', meta, autoload=True)
res_id_column_kwargs = {}
if migrate_engine.name == 'ibm_db_sa':
res_id_column_kwargs['nullable'] = False
res_id = sqlalchemy.Column('tmp_id', sqlalchemy.String(length=36),
default=lambda: str(uuid.uuid4),
**res_id_column_kwargs)
res_id.create(res_table)
res_list = res_table.select().execute()
for res in res_list:
values1 = {'tmp_id': res.uuid}
update = res_table.update().where(
res_table.c.uuid == res.uuid).values(values1)
migrate_engine.execute(update)
res_table.c.id.drop()
res_table.c.uuid.drop()
cons = constraint.PrimaryKeyConstraint('tmp_id', table=res_table)
cons.create()
alter_kwargs = {}
# NOTE(mriedem): DB2 won't allow a primary key on a nullable column so
# we have to make it non-nullable.
if migrate_engine.name == 'ibm_db_sa':
alter_kwargs['nullable'] = False
res_table.c.tmp_id.alter('id', default=lambda: str(uuid.uuid4),
**alter_kwargs)
if migrate_engine.name == 'postgresql':
sequence = sqlalchemy.Sequence('res')
sqlalchemy.schema.DropSequence(sequence, bind=migrate_engine).execute()
def downgrade_sqlite_resource(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
# (pafuent) Here it isn't recommended to import the table from the models,
# because in future migrations the model could change and this migration
# could fail.
# I know it is ugly but it's the only way that I found to 'freeze'
# the model state for this migration.
stack_table = sqlalchemy.Table('stack', meta, autoload=True)
res_table = sqlalchemy.Table(
'new_resource', meta,
sqlalchemy.Column('id', sqlalchemy.String(36),
default=lambda: str(uuid.uuid4())),
sqlalchemy.Column('stack_id', sqlalchemy.String(36),
sqlalchemy.ForeignKey(stack_table.c.id),
nullable=False),
sqlalchemy.Column('name', sqlalchemy.String(255)),
sqlalchemy.Column('nova_instance', sqlalchemy.String(255)),
sqlalchemy.Column('action', sqlalchemy.String(255)),
sqlalchemy.Column('status', sqlalchemy.String(255)),
sqlalchemy.Column('status_reason', sqlalchemy.String(255)),
sqlalchemy.Column('rsrc_metadata', sqlalchemy.Text),
sqlalchemy.Column('properties_data', sqlalchemy.Text),
sqlalchemy.Column('created_at', sqlalchemy.DateTime,
default=timeutils.utcnow),
sqlalchemy.Column('updated_at', sqlalchemy.DateTime,
onupdate=timeutils.utcnow))
res_table.create()
prev_res_table = sqlalchemy.Table('resource', meta, autoload=True)
res_list = prev_res_table.select().execute().fetchall()
for res in res_list:
values = {
'id': res.uuid,
'stack_id': res.stack_id,
'name': res.name,
'nova_instance': res.nova_instance,
'status': res.status,
'status_reason': res.status_reason,
'rsrc_metadata': res.rsrc_metadata,
'properties_data': res.properties_data,
'created_at': res.created_at,
'updated_at': res.updated_at}
migrate_engine.execute(res_table.insert(values))
prev_res_table.drop()
res_table.rename('resource')
def downgrade_resource_data_post(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
res_table = sqlalchemy.Table('resource', meta, autoload=True)
rd_table = sqlalchemy.Table('resource_data', meta, autoload=True)
# set: resource_data.resource_id = resource.id
if migrate_engine.name == 'sqlite':
res_list = res_table.select().order_by(
sqlalchemy.sql.expression.asc(
res_table.c.created_at)).execute().fetchall()
for res in res_list:
values = {'resource_id': res.id}
update = rd_table.update().where(
rd_table.c.tmp_res_id == res.id).values(values)
migrate_engine.execute(update)
# set foreignkey on resource_id
if migrate_engine.name == 'mysql':
sqlalchemy.Index('resource_data_resource_id_fkey',
rd_table.c.resource_id).drop()
cons = migrate.ForeignKeyConstraint(columns=[rd_table.c.resource_id],
refcolumns=[res_table.c.id])
cons.create()
rd_table.c.resource_id.alter(nullable=False)
rd_table.c.tmp_res_id.drop()

View File

@ -21,10 +21,3 @@ def upgrade(migrate_engine):
atomic_key = sqlalchemy.Column('atomic_key', sqlalchemy.Integer)
engine_id.create(resource)
atomic_key.create(resource)
def downgrade(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
resource = sqlalchemy.Table('resource', meta, autoload=True)
resource.c.engine_id.drop()
resource.c.atomic_key.drop()

View File

@ -45,12 +45,3 @@ def upgrade(migrate_engine):
mysql_charset='utf8'
)
sync_point.create()
def downgrade(migrate_engine):
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
sync_point = sqlalchemy.Table(
'sync_point', meta, autoload=True)
sync_point.drop()

View File

@ -15,7 +15,6 @@ from migrate.changeset import constraint
import sqlalchemy
from heat.db.sqlalchemy import types
from heat.db.sqlalchemy import utils as migrate_utils
def upgrade(migrate_engine):
@ -41,49 +40,3 @@ def upgrade(migrate_engine):
refcolumns=[raw_template.c.id],
name='current_template_fkey_ref')
fkey.create()
def downgrade(migrate_engine):
if migrate_engine.name == 'sqlite':
_downgrade_sqlite(migrate_engine)
return
meta = sqlalchemy.MetaData(bind=migrate_engine)
resource = sqlalchemy.Table('resource', meta, autoload=True)
raw_template = sqlalchemy.Table('raw_template', meta, autoload=True)
fkey = constraint.ForeignKeyConstraint(
columns=[resource.c.current_template_id],
refcolumns=[raw_template.c.id],
name='current_template_fkey_ref')
fkey.drop()
resource.c.current_template_id.drop()
resource.c.needed_by.drop()
resource.c.requires.drop()
resource.c.replaces.drop()
resource.c.replaced_by.drop()
def _downgrade_sqlite(migrate_engine):
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
resource_table = sqlalchemy.Table('resource', meta, autoload=True)
ignorecons = ['current_template_fkey_ref']
ignorecols = [resource_table.c.current_template_id.name,
resource_table.c.needed_by.name,
resource_table.c.requires.name,
resource_table.c.replaces.name,
resource_table.c.replaced_by.name]
new_resource = migrate_utils.clone_table('new_resource',
resource_table,
meta, ignorecols=ignorecols,
ignorecons=ignorecons)
# migrate resources to new table
migrate_utils.migrate_data(migrate_engine,
resource_table,
new_resource,
skip_columns=ignorecols)

View File

@ -34,25 +34,3 @@ def upgrade(migrate_engine):
table.c.new_status_reason.alter(name='status_reason')
else:
table.c.status_reason.alter(type=sqlalchemy.Text)
def downgrade(migrate_engine):
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
for tab_name in ['stack', 'resource', 'software_deployment']:
table = sqlalchemy.Table(tab_name, meta, autoload=True)
if migrate_engine.name == 'ibm_db_sa':
status_reason = sqlalchemy.Column('new_status_reason',
sqlalchemy.String(255))
table.create_column(status_reason)
qry = table.select().execute().fetchall()
for item in qry:
values = {'new_status_reason': item.status_reason}
update = table.update().where(
table.c.id == item.id).values(values)
migrate_engine.execute(update)
table.c.status_reason.drop()
table.c.new_status_reason.alter(name='status_reason')
else:
table.c.status_reason.alter(type=sqlalchemy.String(255))

View File

@ -13,8 +13,6 @@
import sqlalchemy
from heat.db.sqlalchemy import utils as migrate_utils
def upgrade(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
@ -23,23 +21,3 @@ def upgrade(migrate_engine):
parent_resource_name = sqlalchemy.Column('parent_resource_name',
sqlalchemy.String(255))
parent_resource_name.create(stack)
def downgrade(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
stack = sqlalchemy.Table('stack', meta, autoload=True)
if migrate_engine.name == 'sqlite':
_downgrade_062_sqlite(migrate_engine, meta, stack)
else:
stack.c.parent_resource_name.drop()
def _downgrade_062_sqlite(migrate_engine, metadata, table):
new_table = migrate_utils.clone_table(
table.name + '__tmp__', table, metadata,
ignorecols=['parent_resource_name'])
migrate_utils.migrate_data(migrate_engine,
table,
new_table,
['parent_resource_name'])