Merge "Implement online schema migrations"

This commit is contained in:
Jenkins 2015-06-17 17:28:27 +00:00 committed by Gerrit Code Review
commit ef14a15a8e
7 changed files with 1320 additions and 3 deletions

View File

@ -908,6 +908,24 @@ class DbCommands(object):
"""Sync the database up to the most recent version."""
return migration.db_sync(version)
@args('--dry-run', action='store_true', dest='dry_run',
default=False, help='Print SQL statements instead of executing')
def expand(self, dry_run):
"""Expand database schema."""
return migration.db_expand(dry_run)
@args('--dry-run', action='store_true', dest='dry_run',
default=False, help='Print SQL statements instead of executing')
def migrate(self, dry_run):
"""Migrate database schema."""
return migration.db_migrate(dry_run)
@args('--dry-run', action='store_true', dest='dry_run',
default=False, help='Print SQL statements instead of executing')
def contract(self, dry_run):
"""Contract database schema."""
return migration.db_contract(dry_run)
def version(self):
"""Print the current database version."""
print(migration.db_version())

View File

@ -26,6 +26,21 @@ def db_sync(version=None, database='main'):
return IMPL.db_sync(version=version, database=database)
def db_expand(dryrun=False, database='main'):
"""Expand database schema."""
return IMPL.db_expand(dryrun=dryrun, database=database)
def db_migrate(dryrun=False, database='main'):
"""Migrate database schema."""
return IMPL.db_migrate(dryrun=dryrun, database=database)
def db_contract(dryrun=False, database='main'):
"""Contract database schema."""
return IMPL.db_contract(dryrun=dryrun, database=database)
def db_version(database='main'):
"""Display the current database version."""
return IMPL.db_version(database=database)

File diff suppressed because it is too large Load Diff

View File

@ -716,9 +716,16 @@ class Migration(BASE, NovaBase):
__table_args__ = (
Index('migrations_instance_uuid_and_status_idx', 'deleted',
'instance_uuid', 'status'),
# MySQL has a limit of 3072 bytes for an multi-column index. This
# index ends up being larger than that using the utf-8 encoding.
# Limiting the index to the prefixes will keep it under the limit.
# FIXME(johannes): Is it MySQL or InnoDB that imposes the limit?
Index('migrations_by_host_nodes_and_status_idx', 'deleted',
'source_compute', 'dest_compute', 'source_node', 'dest_node',
'status'),
'status', mysql_length={'source_compute': 100,
'dest_compute': 100,
'source_node': 100,
'dest_node': 100}),
)
id = Column(Integer, primary_key=True, nullable=False)
# NOTE(tr3buchet): the ____compute variables are instance['host']

View File

@ -1878,3 +1878,7 @@ class InvalidImageFormat(Invalid):
class UnsupportedImageModel(Invalid):
msg_fmt = _("Image model '%(image)s' is not supported")
class DatabaseMigrationError(NovaException):
msg_fmt = _("Database migration failed: %(reason)s")

View File

@ -39,6 +39,7 @@ import glob
import logging
import os
import alembic
from migrate import UniqueConstraint
from migrate.versioning import repository
import mock
@ -778,3 +779,290 @@ class ProjectTestCase(test.NoDBTestCase):
"which is not supported:"
"\n\t%s" % '\n\t'.join(sorted(includes_downgrade)))
self.assertFalse(includes_downgrade, helpful_msg)
class SchemaChangeSchedulerTest(test.NoDBTestCase):
def test_add_fk_after_add_column(self):
exist_meta = sqlalchemy.MetaData()
sqlalchemy.Table('a', exist_meta,
sqlalchemy.Column('id', sqlalchemy.Integer))
sqlalchemy.Table('b', exist_meta,
sqlalchemy.Column('id', sqlalchemy.Integer))
model_meta = sqlalchemy.MetaData()
sqlalchemy.Table('a', model_meta,
sqlalchemy.Column('id', sqlalchemy.Integer))
column = sqlalchemy.Column('a_id', sqlalchemy.Integer,
sqlalchemy.ForeignKey('a.id'))
table = sqlalchemy.Table('b', model_meta,
sqlalchemy.Column('id', sqlalchemy.Integer),
column)
fkc = sqlalchemy.ForeignKeyConstraint(['a_id'], ['a.id'],
table=table)
addcolumn = sa_migration.AddColumn('b', column,
desired_phase='migrate')
addfk = sa_migration.AddForeignKey(fkc)
scheduler = sa_migration.Scheduler()
scheduler.add(addfk)
scheduler.add(addcolumn)
expand, migrate, contract = scheduler.schedule()
self.assertEqual([], expand)
self.assertEqual([addcolumn, addfk], migrate)
self.assertEqual([], contract)
def test_remove_index_after_add(self):
exist_meta = sqlalchemy.MetaData()
oldtbl = sqlalchemy.Table('a', exist_meta,
sqlalchemy.Column('id', sqlalchemy.Integer),
sqlalchemy.Column('foo', sqlalchemy.Integer))
model_meta = sqlalchemy.MetaData()
newtbl = sqlalchemy.Table('a', model_meta,
sqlalchemy.Column('id', sqlalchemy.Integer))
old_index = sqlalchemy.Index('a_id_idx', oldtbl.c.id, oldtbl.c.foo)
new_index = sqlalchemy.Index('a_id_idx', newtbl.c.id)
dropidx = sa_migration.DropIndex(old_index)
addidx = sa_migration.AddIndex(new_index, {})
scheduler = sa_migration.Scheduler()
scheduler.add(addidx)
scheduler.add(dropidx)
expand, migrate, contract = scheduler.schedule()
self.assertEqual([], expand)
self.assertEqual([dropidx, addidx], migrate)
self.assertEqual([], contract)
def _table(*args, **kwargs):
kwargs = kwargs.copy()
kwargs['mysql_engine'] = 'InnoDB'
return sqlalchemy.Table(*args, **kwargs)
class SchemaChangeDDLCheckers(object):
def setUp(self):
super(SchemaChangeDDLCheckers, self).setUp()
context = alembic.migration.MigrationContext.configure(self.engine)
self.ddlop = alembic.operations.Operations(context)
def test_add_table(self):
meta = sqlalchemy.MetaData()
table = _table('a', meta,
sqlalchemy.Column('id', sqlalchemy.Integer))
meta.create_all(self.engine)
table = oslodbutils.get_table(self.engine, 'a')
self.assertIn('id', table.c)
def test_drop_table(self):
meta = sqlalchemy.MetaData()
table = _table('a', meta,
sqlalchemy.Column('id', sqlalchemy.Integer))
meta.create_all(self.engine)
# Will raise exception if table does not exist
oslodbutils.get_table(self.engine, 'a')
op = sa_migration.DropTable(table)
op.execute(self.ddlop)
self.assertRaises(sqlalchemy.exc.NoSuchTableError,
oslodbutils.get_table, self.engine, 'a')
def test_add_column(self):
meta = sqlalchemy.MetaData()
table = _table('a', meta,
sqlalchemy.Column('id', sqlalchemy.Integer))
meta.create_all(self.engine)
column = sqlalchemy.Column('uuid', sqlalchemy.String(36))
op = sa_migration.AddColumn('a', column)
op.execute(self.ddlop)
table = oslodbutils.get_table(self.engine, 'a')
self.assertIn('id', table.c)
self.assertIn('uuid', table.c)
def test_alter_column_nullable(self):
meta = sqlalchemy.MetaData()
column = sqlalchemy.Column('uuid', sqlalchemy.String(36))
table = _table('a', meta,
sqlalchemy.Column('id', sqlalchemy.Integer),
column)
meta.create_all(self.engine)
self.assertTrue(table.c.uuid.nullable)
op = sa_migration.AlterColumn('a', 'uuid',
{'nullable': False,
'existing_type': column.type})
op.execute(self.ddlop)
table = oslodbutils.get_table(self.engine, 'a')
self.assertFalse(table.c.uuid.nullable)
def test_alter_column_type(self):
meta = sqlalchemy.MetaData()
column = sqlalchemy.Column('uuid', sqlalchemy.Text)
table = _table('a', meta,
sqlalchemy.Column('id', sqlalchemy.Integer),
column)
meta.create_all(self.engine)
self.assertIsInstance(table.c.uuid.type, sqlalchemy.Text)
new_type = sqlalchemy.String(36)
op = sa_migration.AlterColumn('a', 'uuid',
{'nullable': True,
'type_': new_type})
op.execute(self.ddlop)
table = oslodbutils.get_table(self.engine, 'a')
self.assertIsInstance(table.c.uuid.type, sqlalchemy.String)
# Text is a subclass of String, so the previous assert could pass
# if the column type didn't change
self.assertNotIsInstance(table.c.uuid.type, sqlalchemy.Text)
def test_drop_column(self):
meta = sqlalchemy.MetaData()
column = sqlalchemy.Column('uuid', sqlalchemy.String(36))
table = _table('a', meta,
sqlalchemy.Column('id', sqlalchemy.Integer),
column)
meta.create_all(self.engine)
op = sa_migration.DropColumn('a', column)
op.execute(self.ddlop)
table = oslodbutils.get_table(self.engine, 'a')
self.assertIn('id', table.c)
self.assertNotIn('uuid', table.c)
def test_add_index(self):
meta = sqlalchemy.MetaData()
table = _table('a', meta,
sqlalchemy.Column('id', sqlalchemy.Integer))
meta.create_all(self.engine)
index = sqlalchemy.Index('a_id_idx', table.c.id)
op = sa_migration.AddIndex(index, {})
op.execute(self.ddlop)
table = oslodbutils.get_table(self.engine, 'a')
self.assertIn('a_id_idx', [i.name for i in table.indexes])
def test_drop_index(self):
meta = sqlalchemy.MetaData()
index = sqlalchemy.Index('a_id_idx', 'id')
table = _table('a', meta,
sqlalchemy.Column('id', sqlalchemy.Integer),
index)
meta.create_all(self.engine)
op = sa_migration.DropIndex(index)
op.execute(self.ddlop)
table = oslodbutils.get_table(self.engine, 'a')
self.assertNotIn('a_id_idx', [i.name for i in table.indexes])
def test_add_unique_constraint(self):
meta = sqlalchemy.MetaData()
table = _table('a', meta,
sqlalchemy.Column('id', sqlalchemy.Integer))
meta.create_all(self.engine)
uc = sqlalchemy.UniqueConstraint(table.c.id, name='uniq_a_id')
op = sa_migration.AddUniqueConstraint(uc)
op.execute(self.ddlop)
table = oslodbutils.get_table(self.engine, 'a')
# Collect all unique indexes and constraints. MySQL will
# transparently create unique constraints as unique indexes
# (which is different than PostgreSQL). Also, older versions
# of SQLAlchemy will sometimes reflect these inconsistently.
uniques = {i.name for i in table.indexes if i.unique}
uniques.update(c.name for c in table.constraints
if isinstance(c, sqlalchemy.UniqueConstraint))
self.assertIn('uniq_a_id', uniques)
def test_drop_unique_constraint(self):
meta = sqlalchemy.MetaData()
uc = sqlalchemy.UniqueConstraint('id', name='uniq_a_id')
table = _table('a', meta,
sqlalchemy.Column('id', sqlalchemy.Integer),
uc)
meta.create_all(self.engine)
op = sa_migration.DropUniqueConstraint(uc)
op.execute(self.ddlop)
table = oslodbutils.get_table(self.engine, 'a')
# See comment for test_add_unique_constraint
uniques = {i.name for i in table.indexes if i.unique}
uniques.update(c.name for c in table.constraints
if isinstance(c, sqlalchemy.UniqueConstraint))
self.assertNotIn('uniq_a_id', uniques)
def test_add_foreign_key(self):
meta = sqlalchemy.MetaData()
a = _table('a', meta,
sqlalchemy.Column('id', sqlalchemy.Integer),
sqlalchemy.UniqueConstraint('id'))
b = _table('b', meta,
sqlalchemy.Column('a_id', sqlalchemy.Integer))
meta.create_all(self.engine)
fkc = sqlalchemy.ForeignKeyConstraint([b.c.a_id], [a.c.id],
name='b_a_id_fk')
op = sa_migration.AddForeignKey(fkc)
op.execute(self.ddlop)
table = oslodbutils.get_table(self.engine, 'b')
fkcs = {c.name: c for c in table.constraints
if isinstance(c, sqlalchemy.ForeignKeyConstraint)}
self.assertIn('b_a_id_fk', fkcs)
columns = [(fk.parent.name, fk.column.name)
for fk in fkcs['b_a_id_fk'].elements]
self.assertEqual([('a_id', 'id')], columns)
def test_drop_foreign_key(self):
meta = sqlalchemy.MetaData()
a = _table('a', meta,
sqlalchemy.Column('id', sqlalchemy.Integer),
sqlalchemy.UniqueConstraint('id'))
b = _table('b', meta,
sqlalchemy.Column('a_id', sqlalchemy.Integer))
fkc = sqlalchemy.ForeignKeyConstraint([b.c.a_id], [a.c.id],
name='b_a_id_fk')
meta.create_all(self.engine)
op = sa_migration.DropForeignKey(fkc)
op.execute(self.ddlop)
table = oslodbutils.get_table(self.engine, 'b')
fkcs = {c.name: c for c in table.constraints}
self.assertNotIn('b_a_id_fk', fkcs)
class TestSchemaChangeDDLMySQL(SchemaChangeDDLCheckers,
test_base.MySQLOpportunisticTestCase,
test.NoDBTestCase):
pass
class TestSchemaChangeDDLPostgreSQL(SchemaChangeDDLCheckers,
test_base.PostgreSQLOpportunisticTestCase,
test.NoDBTestCase):
pass

View File

@ -98,13 +98,14 @@ class TestNullInstanceUuidScanDB(test.TestCase):
@mock.patch.object(migration, 'db_version', return_value=2)
@mock.patch.object(migration, '_find_migrate_repo', return_value='repo')
@mock.patch.object(migration, '_db_sync_locked', return_value=False)
@mock.patch.object(versioning_api, 'upgrade')
@mock.patch.object(versioning_api, 'downgrade')
@mock.patch.object(migration, 'get_engine', return_value='engine')
class TestDbSync(test.NoDBTestCase):
def test_version_none(self, mock_get_engine, mock_downgrade, mock_upgrade,
mock_find_repo, mock_version):
mock_sync_locked, mock_find_repo, mock_version):
database = 'fake'
migration.db_sync(database=database)
mock_version.assert_called_once_with(database)
@ -114,7 +115,7 @@ class TestDbSync(test.NoDBTestCase):
self.assertFalse(mock_downgrade.called)
def test_downgrade(self, mock_get_engine, mock_downgrade, mock_upgrade,
mock_find_repo, mock_version):
mock_sync_locked, mock_find_repo, mock_version):
database = 'fake'
migration.db_sync(1, database=database)
mock_version.assert_called_once_with(database)