Add tests for Alembic migrations

This is the next step towards CI/CD:

This code enables testing of DB migrations against real databases -
MySQL and Postgres. Unit testing on in-memory sqlite doesn't work with this
patch (there seems to be a problem with Alembic/in-memory-sqlite combination).

In case if MySQL and PostgreSQL with specifc user/password and database
are present, opportunistic tests will be started automatically. See
storyboard/tests/db/sqlalchemy/test_migrations.py for instructions.

OpenStack CI infrastructure provides these, so we get tests against
MySQL and Postgres with this commit.

Note(1): this patch doesn't modify database model. The main goal of this
patch is to enable continuous testing of DB migrations.

Note(2): most of the code is based on Nova. Which is good because
eventually it'll be extracted to Oslo library and we'll be able
to use Oslo instead of custom code.

Change-Id: Ia763124bea9ac8e39c6604f04fd99fa83645d5ba
This commit is contained in:
Ruslan Kamaldinov 2014-01-14 00:20:19 +04:00 committed by Michael Krotscheck
parent 2300b93e66
commit 1ce4b9a9f5
12 changed files with 1053 additions and 31 deletions

View File

@ -3,6 +3,7 @@
# The list of modules to copy from oslo-incubator.git
module=db
module=db.sqlalchemy
module=processutils
# The base module to hold the copy of openstack.common
base=storyboard

View File

@ -6,7 +6,7 @@ markdown
python-openid
six>=1.4.1
Babel>=0.9.6
SQLAlchemy>=0.8
SQLAlchemy>=0.8,<=0.8.99
alembic>=0.4.1
oslo.config>=1.2.0
iso8601>=0.1.8

View File

@ -27,8 +27,32 @@ down_revision = None
from alembic import op
import sqlalchemy as sa
MYSQL_ENGINE = 'InnoDB'
MYSQL_CHARSET = 'utf8'
def _define_enums():
branch_status = sa.Enum(
'master', 'release', 'stable', 'unsupported',
name='branch_status')
storyboard_priority = sa.Enum(
'Undefined', 'Low', 'Medium', 'High', 'Critical',
name='priority')
task_status = sa.Enum(
'Todo', 'In review', 'Landed',
name='task_status')
return {
'branch_status': branch_status,
'storyboard_priority': storyboard_priority,
'task_status': task_status
}
def upgrade(active_plugins=None, options=None):
enums = _define_enums()
op.create_table(
'branches',
@ -36,14 +60,12 @@ def upgrade(active_plugins=None, options=None):
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('name', sa.String(length=50), nullable=True),
sa.Column(
'status',
sa.Enum(
'master', 'release', 'stable', 'unsupported',
name='branch_status'), nullable=True),
sa.Column('status', enums['branch_status'], nullable=True),
sa.Column('release_date', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name', name='uniq_branch_name')
sa.UniqueConstraint('name', name='uniq_branch_name'),
mysql_engine=MYSQL_ENGINE,
mysql_charset=MYSQL_CHARSET
)
op.create_table(
'project_groups',
@ -53,7 +75,9 @@ def upgrade(active_plugins=None, options=None):
sa.Column('name', sa.String(length=50), nullable=True),
sa.Column('title', sa.Unicode(length=100), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name', name='uniq_group_name')
sa.UniqueConstraint('name', name='uniq_group_name'),
mysql_engine=MYSQL_ENGINE,
mysql_charset=MYSQL_CHARSET
)
op.create_table(
'users',
@ -71,7 +95,9 @@ def upgrade(active_plugins=None, options=None):
sa.Column('last_login', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email', name='uniq_user_email'),
sa.UniqueConstraint('username', name='uniq_user_username')
sa.UniqueConstraint('username', name='uniq_user_username'),
mysql_engine=MYSQL_ENGINE,
mysql_charset=MYSQL_CHARSET
)
op.create_table(
'teams',
@ -80,7 +106,9 @@ def upgrade(active_plugins=None, options=None):
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('name', sa.Unicode(length=255), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name', name='uniq_team_name')
sa.UniqueConstraint('name', name='uniq_team_name'),
mysql_engine=MYSQL_ENGINE,
mysql_charset=MYSQL_CHARSET
)
op.create_table(
'permissions',
@ -90,7 +118,9 @@ def upgrade(active_plugins=None, options=None):
sa.Column('name', sa.Unicode(length=50), nullable=True),
sa.Column('codename', sa.Unicode(length=255), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name', name='uniq_team_name')
sa.UniqueConstraint('name', name='uniq_permission_name'),
mysql_engine=MYSQL_ENGINE,
mysql_charset=MYSQL_CHARSET
)
op.create_table(
'team_membership',
@ -98,7 +128,9 @@ def upgrade(active_plugins=None, options=None):
sa.Column('team_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['team_id'], ['teams.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint()
sa.PrimaryKeyConstraint(),
mysql_engine=MYSQL_ENGINE,
mysql_charset=MYSQL_CHARSET
)
op.create_table(
'user_permissions',
@ -106,7 +138,9 @@ def upgrade(active_plugins=None, options=None):
sa.Column('permission_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['permission_id'], ['permissions.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint()
sa.PrimaryKeyConstraint(),
mysql_engine=MYSQL_ENGINE,
mysql_charset=MYSQL_CHARSET
)
op.create_table(
'team_permissions',
@ -114,7 +148,9 @@ def upgrade(active_plugins=None, options=None):
sa.Column('team_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['team_id'], ['teams.id'], ),
sa.ForeignKeyConstraint(['permission_id'], ['permissions.id'], ),
sa.PrimaryKeyConstraint()
sa.PrimaryKeyConstraint(),
mysql_engine=MYSQL_ENGINE,
mysql_charset=MYSQL_CHARSET
)
op.create_table(
'stories',
@ -125,13 +161,11 @@ def upgrade(active_plugins=None, options=None):
sa.Column('title', sa.Unicode(length=100), nullable=True),
sa.Column('description', sa.UnicodeText(), nullable=True),
sa.Column('is_bug', sa.Boolean(), nullable=True),
sa.Column(
'priority',
sa.Enum(
'Undefined', 'Low', 'Medium', 'High', 'Critical',
name='priority'), nullable=True),
sa.Column('priority', enums['storyboard_priority'], nullable=True),
sa.ForeignKeyConstraint(['creator_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
sa.PrimaryKeyConstraint('id'),
mysql_engine=MYSQL_ENGINE,
mysql_charset=MYSQL_CHARSET
)
op.create_table(
'milestones',
@ -144,7 +178,9 @@ def upgrade(active_plugins=None, options=None):
sa.Column('undefined', sa.Boolean(), nullable=True),
sa.ForeignKeyConstraint(['branch_id'], ['branches.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name', name='uniq_milestone_name')
sa.UniqueConstraint('name', name='uniq_milestone_name'),
mysql_engine=MYSQL_ENGINE,
mysql_charset=MYSQL_CHARSET
)
op.create_table(
'projects',
@ -156,7 +192,9 @@ def upgrade(active_plugins=None, options=None):
sa.Column('team_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['team_id'], ['teams.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name', name='uniq_project_name')
sa.UniqueConstraint('name', name='uniq_project_name'),
mysql_engine=MYSQL_ENGINE,
mysql_charset=MYSQL_CHARSET
)
op.create_table(
'project_group_mapping',
@ -164,7 +202,9 @@ def upgrade(active_plugins=None, options=None):
sa.Column('project_group_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['project_group_id'], ['project_groups.id'], ),
sa.ForeignKeyConstraint(['project_id'], ['projects.id'], ),
sa.PrimaryKeyConstraint()
sa.PrimaryKeyConstraint(),
mysql_engine=MYSQL_ENGINE,
mysql_charset=MYSQL_CHARSET
)
op.create_table(
'tasks',
@ -172,8 +212,7 @@ def upgrade(active_plugins=None, options=None):
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('title', sa.Unicode(length=100), nullable=True),
sa.Column(
'status', sa.Enum('Todo', 'In review', 'Landed'), nullable=True),
sa.Column('status', enums['task_status'], nullable=True),
sa.Column('story_id', sa.Integer(), nullable=True),
sa.Column('project_id', sa.Integer(), nullable=True),
sa.Column('assignee_id', sa.Integer(), nullable=True),
@ -182,7 +221,9 @@ def upgrade(active_plugins=None, options=None):
sa.ForeignKeyConstraint(['milestone_id'], ['milestones.id'], ),
sa.ForeignKeyConstraint(['project_id'], ['projects.id'], ),
sa.ForeignKeyConstraint(['story_id'], ['stories.id'], ),
sa.PrimaryKeyConstraint('id')
sa.PrimaryKeyConstraint('id'),
mysql_engine=MYSQL_ENGINE,
mysql_charset=MYSQL_CHARSET
)
op.create_table(
'comments',
@ -196,7 +237,9 @@ def upgrade(active_plugins=None, options=None):
sa.Column('author_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['author_id'], ['users.id'], ),
sa.ForeignKeyConstraint(['story_id'], ['stories.id'], ),
sa.PrimaryKeyConstraint('id')
sa.PrimaryKeyConstraint('id'),
mysql_engine=MYSQL_ENGINE,
mysql_charset=MYSQL_CHARSET
)
op.create_table(
'storytags',
@ -207,20 +250,31 @@ def upgrade(active_plugins=None, options=None):
sa.Column('story_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['story_id'], ['stories.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name', name='uniq_story_tags_name')
sa.UniqueConstraint('name', name='uniq_story_tags_name'),
mysql_engine=MYSQL_ENGINE,
mysql_charset=MYSQL_CHARSET
)
def downgrade(active_plugins=None, options=None):
# be careful with the order, keep FKs in mind
op.drop_table('project_group_mapping')
op.drop_table('team_membership')
op.drop_table('team_permissions')
op.drop_table('user_permissions')
op.drop_table('storytags')
op.drop_table('comments')
op.drop_table('tasks')
op.drop_table('project_groups')
op.drop_table('projects')
op.drop_table('milestones')
op.drop_table('stories')
op.drop_table('team_membership')
op.drop_table('permissions')
op.drop_table('teams')
op.drop_table('users')
op.drop_table('groups')
op.drop_table('project_groups')
op.drop_table('branches')
# Need to explicitly delete enums during migrations for Postgres
enums = _define_enums()
for enum in enums.values():
enum.drop(op.get_bind())

View File

@ -0,0 +1,251 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
System-level utilities and helper functions.
"""
import logging as stdlib_logging
import os
import random
import shlex
import signal
from eventlet.green import subprocess
from eventlet import greenthread
from storyboard.openstack.common.gettextutils import _ # noqa
from storyboard.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class InvalidArgumentError(Exception):
def __init__(self, message=None):
super(InvalidArgumentError, self).__init__(message)
class UnknownArgumentError(Exception):
def __init__(self, message=None):
super(UnknownArgumentError, self).__init__(message)
class ProcessExecutionError(Exception):
def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None,
description=None):
self.exit_code = exit_code
self.stderr = stderr
self.stdout = stdout
self.cmd = cmd
self.description = description
if description is None:
description = "Unexpected error while running command."
if exit_code is None:
exit_code = '-'
message = ("%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r"
% (description, cmd, exit_code, stdout, stderr))
super(ProcessExecutionError, self).__init__(message)
class NoRootWrapSpecified(Exception):
def __init__(self, message=None):
super(NoRootWrapSpecified, self).__init__(message)
def _subprocess_setup():
# Python installs a SIGPIPE handler by default. This is usually not what
# non-Python subprocesses expect.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def execute(*cmd, **kwargs):
"""Helper method to shell out and execute a command through subprocess.
Allows optional retry.
:param cmd: Passed to subprocess.Popen.
:type cmd: string
:param process_input: Send to opened process.
:type proces_input: string
:param check_exit_code: Single bool, int, or list of allowed exit
codes. Defaults to [0]. Raise
:class:`ProcessExecutionError` unless
program exits with one of these code.
:type check_exit_code: boolean, int, or [int]
:param delay_on_retry: True | False. Defaults to True. If set to True,
wait a short amount of time before retrying.
:type delay_on_retry: boolean
:param attempts: How many times to retry cmd.
:type attempts: int
:param run_as_root: True | False. Defaults to False. If set to True,
the command is prefixed by the command specified
in the root_helper kwarg.
:type run_as_root: boolean
:param root_helper: command to prefix to commands called with
run_as_root=True
:type root_helper: string
:param shell: whether or not there should be a shell used to
execute this command. Defaults to false.
:type shell: boolean
:param loglevel: log level for execute commands.
:type loglevel: int. (Should be stdlib_logging.DEBUG or
stdlib_logging.INFO)
:returns: (stdout, stderr) from process execution
:raises: :class:`UnknownArgumentError` on
receiving unknown arguments
:raises: :class:`ProcessExecutionError`
"""
process_input = kwargs.pop('process_input', None)
check_exit_code = kwargs.pop('check_exit_code', [0])
ignore_exit_code = False
delay_on_retry = kwargs.pop('delay_on_retry', True)
attempts = kwargs.pop('attempts', 1)
run_as_root = kwargs.pop('run_as_root', False)
root_helper = kwargs.pop('root_helper', '')
shell = kwargs.pop('shell', False)
loglevel = kwargs.pop('loglevel', stdlib_logging.DEBUG)
if isinstance(check_exit_code, bool):
ignore_exit_code = not check_exit_code
check_exit_code = [0]
elif isinstance(check_exit_code, int):
check_exit_code = [check_exit_code]
if kwargs:
raise UnknownArgumentError(_('Got unknown keyword args '
'to utils.execute: %r') % kwargs)
if run_as_root and hasattr(os, 'geteuid') and os.geteuid() != 0:
if not root_helper:
raise NoRootWrapSpecified(
message=('Command requested root, but did not specify a root '
'helper.'))
cmd = shlex.split(root_helper) + list(cmd)
cmd = map(str, cmd)
while attempts > 0:
attempts -= 1
try:
LOG.log(loglevel, _('Running cmd (subprocess): %s'), ' '.join(cmd))
_PIPE = subprocess.PIPE # pylint: disable=E1101
if os.name == 'nt':
preexec_fn = None
close_fds = False
else:
preexec_fn = _subprocess_setup
close_fds = True
obj = subprocess.Popen(cmd,
stdin=_PIPE,
stdout=_PIPE,
stderr=_PIPE,
close_fds=close_fds,
preexec_fn=preexec_fn,
shell=shell)
result = None
if process_input is not None:
result = obj.communicate(process_input)
else:
result = obj.communicate()
obj.stdin.close() # pylint: disable=E1101
_returncode = obj.returncode # pylint: disable=E1101
if _returncode:
LOG.log(loglevel, _('Result was %s') % _returncode)
if not ignore_exit_code and _returncode not in check_exit_code:
(stdout, stderr) = result
raise ProcessExecutionError(exit_code=_returncode,
stdout=stdout,
stderr=stderr,
cmd=' '.join(cmd))
return result
except ProcessExecutionError:
if not attempts:
raise
else:
LOG.log(loglevel, _('%r failed. Retrying.'), cmd)
if delay_on_retry:
greenthread.sleep(random.randint(20, 200) / 100.0)
finally:
# NOTE(termie): this appears to be necessary to let the subprocess
# call clean something up in between calls, without
# it two execute calls in a row hangs the second one
greenthread.sleep(0)
def trycmd(*args, **kwargs):
"""A wrapper around execute() to more easily handle warnings and errors.
Returns an (out, err) tuple of strings containing the output of
the command's stdout and stderr. If 'err' is not empty then the
command can be considered to have failed.
:discard_warnings True | False. Defaults to False. If set to True,
then for succeeding commands, stderr is cleared
"""
discard_warnings = kwargs.pop('discard_warnings', False)
try:
out, err = execute(*args, **kwargs)
failed = False
except ProcessExecutionError as exn:
out, err = '', str(exn)
failed = True
if not failed and discard_warnings and err:
# Handle commands that output to stderr but otherwise succeed
err = ''
return out, err
def ssh_execute(ssh, cmd, process_input=None,
addl_env=None, check_exit_code=True):
LOG.debug(_('Running cmd (SSH): %s'), cmd)
if addl_env:
raise InvalidArgumentError(_('Environment not supported over SSH'))
if process_input:
# This is (probably) fixable if we need it...
raise InvalidArgumentError(_('process_input not supported over SSH'))
stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd)
channel = stdout_stream.channel
# NOTE(justinsb): This seems suspicious...
# ...other SSH clients have buffering issues with this approach
stdout = stdout_stream.read()
stderr = stderr_stream.read()
stdin_stream.close()
exit_status = channel.recv_exit_status()
# exit_status == -1 if no exit code was returned
if exit_status != -1:
LOG.debug(_('Result was %s') % exit_status)
if check_exit_code and exit_status != 0:
raise ProcessExecutionError(exit_code=exit_status,
stdout=stdout,
stderr=stderr,
cmd=cmd)
return (stdout, stderr)

View File

@ -19,12 +19,15 @@ import os
import fixtures
from oslo.config import cfg
from storyboard.openstack.common import log as logging
import testtools
CONF = cfg.CONF
_TRUE_VALUES = ('true', '1', 'yes')
logging.setup('storyboard')
class TestCase(testtools.TestCase):

View File

View File

@ -0,0 +1,26 @@
[unit_tests]
# Set up any number of databases to test concurrently.
# The "name" used in the test is the config variable key.
# A few tests rely on one sqlite database with 'sqlite' as the key.
sqlite=sqlite://
#sqlitefile=sqlite:///test_migrations_utils.db
#mysql=mysql+mysqldb://user:pass@localhost/test_migrations_utils
#postgresql=postgresql+psycopg2://user:pass@localhost/test_migrations_utils
[migration_dbs]
# Migration DB details are listed separately as they can't be connected to
# concurrently. These databases can't be the same as above
# Note, sqlite:// is in-memory and unique each time it is spawned.
# However file sqlite's are not unique.
#sqlite=sqlite://
#sqlitefile=sqlite:///test_migrations.db
#mysql=mysql+mysqldb://user:pass@localhost/test_migrations
#postgresql=postgresql+psycopg2://user:pass@localhost/test_migrations
[walk_style]
snake_walk=yes
downgrade=yes

View File

@ -0,0 +1,95 @@
# Copyright 2014 OpenStack Foundation
# Copyright 2014 Mirantis Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for database migrations. This test case reads the configuration
file test_migrations.conf for database connection settings
to use in the tests. For each connection found in the config file,
the test case runs a series of test cases to ensure that migrations work
properly.
There are also "opportunistic" tests for both mysql and postgresql in here,
which allows testing against mysql and pg) in a properly configured unit
test environment.
For the opportunistic testing you need to set up a db named 'openstack_citest'
with user 'openstack_citest' and password 'openstack_citest' on localhost.
The test will then use that db and u/p combo to run the tests.
For postgres on Ubuntu this can be done with the following commands:
sudo -u postgres psql
postgres=# create user openstack_citest with createdb login password
'openstack_citest';
postgres=# create database openstack_citest with owner openstack_citest;
"""
from oslo.config import cfg
from storyboard.openstack.common.db.sqlalchemy import utils as db_utils
from storyboard.tests.db.migration import test_migrations_base as base
CONF = cfg.CONF
class TestMigrations(base.BaseWalkMigrationTestCase, base.CommonTestsMixIn):
"""Test sqlalchemy-migrate migrations."""
USER = "openstack_citest"
PASSWD = "openstack_citest"
DATABASE = "openstack_citest"
def __init__(self, *args, **kwargs):
super(TestMigrations, self).__init__(*args, **kwargs)
def setUp(self):
super(TestMigrations, self).setUp()
def assertColumnExists(self, engine, table, column):
t = db_utils.get_table(engine, table)
self.assertIn(column, t.c)
def assertColumnNotExists(self, engine, table, column):
t = db_utils.get_table(engine, table)
self.assertNotIn(column, t.c)
def assertIndexExists(self, engine, table, index):
t = db_utils.get_table(engine, table)
index_names = [idx.name for idx in t.indexes]
self.assertIn(index, index_names)
def assertIndexMembers(self, engine, table, index, members):
self.assertIndexExists(engine, table, index)
t = db_utils.get_table(engine, table)
index_columns = None
for idx in t.indexes:
if idx.name == index:
index_columns = idx.columns.keys()
break
self.assertEqual(sorted(members), sorted(index_columns))
def _pre_upgrade_18708bcdc0fe(self, engine):
# Anything returned from this method will be
# passed to corresponding _check_xxx method as 'data'.
pass
def _check_18708bcdc0fe(self, engine, data):
self.assertColumnExists(engine, 'users', 'created_at')
self.assertColumnExists(engine, 'users', 'last_login')
self.assertColumnExists(engine, 'teams', 'updated_at')
self.assertColumnExists(engine, 'teams', 'name')

View File

@ -0,0 +1,585 @@
# Copyright 2010-2011 OpenStack Foundation
# Copyright 2012-2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
#
# Ripped off from Nova's test_migrations.py
# The only difference between Nova and this code is usage of alembic instead
# of sqlalchemy migrations.
#
# There is an ongoing work to extact similar code to oslo incubator. Once it is
# extracted we'll be able to remove this file and use oslo.
import ConfigParser
import io
import os
import urlparse
from alembic import command
from alembic import config as alembic_config
from alembic import migration
from oslo.config import cfg
import sqlalchemy
import sqlalchemy.exc
import storyboard.db.migration
from storyboard.openstack.common.db.sqlalchemy import session
from storyboard.openstack.common import lockutils
from storyboard.openstack.common import log as logging
from storyboard.openstack.common import processutils
from storyboard.tests import base
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
synchronized = lockutils.synchronized_with_prefix('storyboard-')
def _get_connect_string(backend, user, passwd, database):
"""Try to get a connection with a very specific set of values, if we get
these then we'll run the tests, otherwise they are skipped
"""
if backend == "postgres":
backend = "postgresql+psycopg2"
elif backend == "mysql":
backend = "mysql+mysqldb"
else:
raise Exception("Unrecognized backend: '%s'" % backend)
return ("%s://%s:%s@localhost/%s" % (backend, user, passwd, database))
def _is_backend_avail(backend, user, passwd, database):
try:
connect_uri = _get_connect_string(backend, user, passwd, database)
engine = sqlalchemy.create_engine(connect_uri)
connection = engine.connect()
except Exception:
# intentionally catch all to handle exceptions even if we don't
# have any backend code loaded.
return False
else:
connection.close()
engine.dispose()
return True
def _have_mysql(user, passwd, database):
present = os.environ.get('STORYBOARD_MYSQL_PRESENT')
if present is None:
return _is_backend_avail('mysql', user, passwd, database)
return present.lower() in ('', 'true')
def _have_postgresql(user, passwd, database):
present = os.environ.get('STORYBOARD_TEST_POSTGRESQL_PRESENT')
if present is None:
return _is_backend_avail('postgres', user, passwd, database)
return present.lower() in ('', 'true')
def get_mysql_connection_info(conn_pieces):
database = conn_pieces.path.strip('/')
loc_pieces = conn_pieces.netloc.split('@')
host = loc_pieces[1]
auth_pieces = loc_pieces[0].split(':')
user = auth_pieces[0]
password = ""
if len(auth_pieces) > 1:
if auth_pieces[1].strip():
password = "-p\"%s\"" % auth_pieces[1]
return (user, password, database, host)
def get_pgsql_connection_info(conn_pieces):
database = conn_pieces.path.strip('/')
loc_pieces = conn_pieces.netloc.split('@')
host = loc_pieces[1]
auth_pieces = loc_pieces[0].split(':')
user = auth_pieces[0]
password = ""
if len(auth_pieces) > 1:
password = auth_pieces[1].strip()
return (user, password, database, host)
class CommonTestsMixIn(object):
"""These tests are shared between TestStoryboardMigrations and
TestBaremetalMigrations.
BaseMigrationTestCase is effectively an abstract class, meant to be derived
from and not directly tested against; that's why these `test_` methods need
to be on a Mixin, so that they won't be picked up as valid tests for
BaseMigrationTestCase.
"""
def test_walk_versions(self):
for key, engine in self.engines.items():
# We start each walk with a completely blank slate.
self._reset_database(key)
self._walk_versions(engine, self.snake_walk, self.downgrade)
def test_mysql_opportunistically(self):
self._test_mysql_opportunistically()
def test_mysql_connect_fail(self):
"""Test that we can trigger a mysql connection failure and we fail
gracefully to ensure we don't break people without mysql
"""
if _is_backend_avail('mysql', "openstack_cifail", self.PASSWD,
self.DATABASE):
self.fail("Shouldn't have connected")
def test_postgresql_opportunistically(self):
self._test_postgresql_opportunistically()
def test_postgresql_connect_fail(self):
"""Test that we can trigger a postgres connection failure and we fail
gracefully to ensure we don't break people without postgres
"""
if _is_backend_avail('postgres', "openstack_cifail", self.PASSWD,
self.DATABASE):
self.fail("Shouldn't have connected")
class BaseMigrationTestCase(base.TestCase):
"""Base class for testing migrations and migration utils. This sets up
and configures the databases to run tests against.
"""
# NOTE(jhesketh): It is expected that tests clean up after themselves.
# This is necessary for concurrency to allow multiple tests to work on
# one database.
# The full migration walk tests however do call the old _reset_databases()
# to throw away whatever was there so they need to operate on their own
# database that we know isn't accessed concurrently.
# Hence, BaseWalkMigrationTestCase overwrites the engine list.
USER = None
PASSWD = None
DATABASE = None
TIMEOUT_SCALING_FACTOR = 2
def __init__(self, *args, **kwargs):
super(BaseMigrationTestCase, self).__init__(*args, **kwargs)
self.DEFAULT_CONFIG_FILE = os.path.join(
os.path.dirname(__file__),
'test_migrations.conf')
# Test machines can set the STORYBOARD_TEST_MIGRATIONS_CONF variable
# to override the location of the config file for migration testing
self.CONFIG_FILE_PATH = os.environ.get(
'STORYBOARD_TEST_MIGRATIONS_CONF',
self.DEFAULT_CONFIG_FILE)
self.ALEMBIC_CONFIG = alembic_config.Config(
os.path.join(os.path.dirname(storyboard.db.migration.__file__),
'alembic.ini')
)
self.ALEMBIC_CONFIG.storyboard_config = CONF
self.snake_walk = False
self.downgrade = False
self.test_databases = {}
self.migration = None
self.migration_api = None
def setUp(self):
super(BaseMigrationTestCase, self).setUp()
self._load_config()
def _load_config(self):
# Load test databases from the config file. Only do this
# once. No need to re-run this on each test...
LOG.debug('config_path is %s' % self.CONFIG_FILE_PATH)
if os.path.exists(self.CONFIG_FILE_PATH):
cp = ConfigParser.RawConfigParser()
try:
cp.read(self.CONFIG_FILE_PATH)
config = cp.options('unit_tests')
for key in config:
self.test_databases[key] = cp.get('unit_tests', key)
self.snake_walk = cp.getboolean('walk_style', 'snake_walk')
self.downgrade = cp.getboolean('walk_style', 'downgrade')
except ConfigParser.ParsingError as e:
self.fail("Failed to read test_migrations.conf config "
"file. Got error: %s" % e)
else:
self.fail("Failed to find test_migrations.conf config "
"file.")
self.engines = {}
for key, value in self.test_databases.items():
self.engines[key] = sqlalchemy.create_engine(value)
# NOTE(jhesketh): We only need to make sure the databases are created
# not necessarily clean of tables.
self._create_databases()
def execute_cmd(self, cmd=None):
out, err = processutils.trycmd(cmd, shell=True, discard_warnings=True)
output = out or err
LOG.debug(output)
self.assertEqual('', err,
"Failed to run: %s\n%s" % (cmd, output))
@synchronized('pgadmin', external=True, lock_path='/tmp')
def _reset_pg(self, conn_pieces):
(user, password, database, host) = \
get_pgsql_connection_info(conn_pieces)
os.environ['PGPASSWORD'] = password
os.environ['PGUSER'] = user
# note(boris-42): We must create and drop database, we can't
# drop database which we have connected to, so for such
# operations there is a special database template1.
sqlcmd = ("psql -w -U %(user)s -h %(host)s -c"
" '%(sql)s' -d template1")
sqldict = {'user': user, 'host': host}
sqldict['sql'] = ("drop database if exists %s;") % database
droptable = sqlcmd % sqldict
self.execute_cmd(droptable)
sqldict['sql'] = ("create database %s;") % database
createtable = sqlcmd % sqldict
self.execute_cmd(createtable)
os.unsetenv('PGPASSWORD')
os.unsetenv('PGUSER')
@synchronized('mysql', external=True, lock_path='/tmp')
def _reset_mysql(self, conn_pieces):
# We can execute the MySQL client to destroy and re-create
# the MYSQL database, which is easier and less error-prone
# than using SQLAlchemy to do this via MetaData...trust me.
(user, password, database, host) = \
get_mysql_connection_info(conn_pieces)
sql = ("drop database if exists %(database)s; "
"create database %(database)s;" % {'database': database})
cmd = ("mysql -u \"%(user)s\" %(password)s -h %(host)s "
"-e \"%(sql)s\"" % {'user': user, 'password': password,
'host': host, 'sql': sql})
self.execute_cmd(cmd)
@synchronized('sqlite', external=True, lock_path='/tmp')
def _reset_sqlite(self, conn_pieces):
# We can just delete the SQLite database, which is
# the easiest and cleanest solution
db_path = conn_pieces.path.strip('/')
if os.path.exists(db_path):
os.unlink(db_path)
# No need to recreate the SQLite DB. SQLite will
# create it for us if it's not there...
def _create_databases(self):
"""Create all configured databases as needed."""
for key, engine in self.engines.items():
self._create_database(key)
def _create_database(self, key):
"""Create database if it doesn't exist."""
conn_string = self.test_databases[key]
conn_pieces = urlparse.urlparse(conn_string)
if conn_string.startswith('mysql'):
(user, password, database, host) = \
get_mysql_connection_info(conn_pieces)
sql = "create database if not exists %s;" % database
cmd = ("mysql -u \"%(user)s\" %(password)s -h %(host)s "
"-e \"%(sql)s\"" % {'user': user, 'password': password,
'host': host, 'sql': sql})
self.execute_cmd(cmd)
elif conn_string.startswith('postgresql'):
(user, password, database, host) = \
get_pgsql_connection_info(conn_pieces)
os.environ['PGPASSWORD'] = password
os.environ['PGUSER'] = user
sqlcmd = ("psql -w -U %(user)s -h %(host)s -c"
" '%(sql)s' -d template1")
sql = ("create database if not exists %s;") % database
createtable = sqlcmd % {'user': user, 'host': host, 'sql': sql}
# 0 means databases is created
# 256 means it already exists (which is fine)
# otherwise raise an error
out, err = processutils.trycmd(createtable, shell=True,
check_exit_code=[0, 256],
discard_warnings=True)
output = out or err
if err != '':
self.fail("Failed to run: %s\n%s" % (createtable, output))
os.unsetenv('PGPASSWORD')
os.unsetenv('PGUSER')
def _reset_databases(self):
"""Reset all configured databases."""
for key, engine in self.engines.items():
self._reset_database(key)
def _reset_database(self, key):
"""Reset specific database."""
engine = self.engines[key]
conn_string = self.test_databases[key]
conn_pieces = urlparse.urlparse(conn_string)
engine.dispose()
if conn_string.startswith('sqlite'):
self._reset_sqlite(conn_pieces)
elif conn_string.startswith('mysql'):
self._reset_mysql(conn_pieces)
elif conn_string.startswith('postgresql'):
self._reset_pg(conn_pieces)
class BaseWalkMigrationTestCase(BaseMigrationTestCase):
"""BaseWalkMigrationTestCase loads in an alternative set of databases for
testing against. This is necessary as the default databases can run tests
concurrently without interfering with itself. It is expected that
databases listed under [migraiton_dbs] in the configuration are only being
accessed by one test at a time. Currently only test_walk_versions accesses
the databases (and is the only method that calls _reset_database() which
is clearly problematic for concurrency).
"""
def _load_config(self):
# Load test databases from the config file. Only do this
# once. No need to re-run this on each test...
LOG.debug('config_path is %s' % self.CONFIG_FILE_PATH)
if os.path.exists(self.CONFIG_FILE_PATH):
cp = ConfigParser.RawConfigParser()
try:
cp.read(self.CONFIG_FILE_PATH)
config = cp.options('migration_dbs')
for key in config:
self.test_databases[key] = cp.get('migration_dbs', key)
self.snake_walk = cp.getboolean('walk_style', 'snake_walk')
self.downgrade = cp.getboolean('walk_style', 'downgrade')
except ConfigParser.ParsingError as e:
self.fail("Failed to read test_migrations.conf config "
"file. Got error: %s" % e)
else:
self.fail("Failed to find test_migrations.conf config "
"file.")
self.engines = {}
for key, value in self.test_databases.items():
self.engines[key] = sqlalchemy.create_engine(value)
self._create_databases()
def _configure(self, engine):
"""For each type of repository we should do some of configure steps.
For migrate_repo we should set under version control our database.
For alembic we should configure database settings. For this goal we
should use oslo.config and openstack.commom.db.sqlalchemy.session with
database functionality (reset default settings and session cleanup).
"""
CONF.set_override('connection', str(engine.url), group='database')
session.cleanup()
def _test_mysql_opportunistically(self):
# Test that table creation on mysql only builds InnoDB tables
if not _have_mysql(self.USER, self.PASSWD, self.DATABASE):
self.skipTest("mysql not available")
# add this to the global lists to make reset work with it, it's removed
# automatically in tearDown so no need to clean it up here.
connect_string = _get_connect_string("mysql", self.USER, self.PASSWD,
self.DATABASE)
(user, password, database, host) = \
get_mysql_connection_info(urlparse.urlparse(connect_string))
engine = sqlalchemy.create_engine(connect_string)
self.engines[database] = engine
self.test_databases[database] = connect_string
# build a fully populated mysql database with all the tables
self._reset_database(database)
self._walk_versions(engine, self.snake_walk, self.downgrade)
connection = engine.connect()
# sanity check
total = connection.execute("SELECT count(*) "
"from information_schema.TABLES "
"where TABLE_SCHEMA='%(database)s'" %
{'database': database})
self.assertTrue(total.scalar() > 0, "No tables found. Wrong schema?")
connection.close()
del(self.engines[database])
del(self.test_databases[database])
def _test_postgresql_opportunistically(self):
# Test postgresql database migration walk
if not _have_postgresql(self.USER, self.PASSWD, self.DATABASE):
self.skipTest("postgresql not available")
# add this to the global lists to make reset work with it, it's removed
# automatically in tearDown so no need to clean it up here.
connect_string = _get_connect_string("postgres", self.USER,
self.PASSWD, self.DATABASE)
engine = sqlalchemy.create_engine(connect_string)
(user, password, database, host) = \
get_mysql_connection_info(urlparse.urlparse(connect_string))
self.engines[database] = engine
self.test_databases[database] = connect_string
# build a fully populated postgresql database with all the tables
self._reset_database(database)
self._walk_versions(engine, self.snake_walk, self.downgrade)
del(self.engines[database])
del(self.test_databases[database])
def _alembic_command(self, alembic_command, engine, *args, **kwargs):
"""Most of alembic command return data into output.
We should redefine this setting for getting info.
"""
self.ALEMBIC_CONFIG.stdout = buf = io.StringIO()
CONF.set_override('connection', str(engine.url), group='database')
session.cleanup()
getattr(command, alembic_command)(*args, **kwargs)
res = buf.getvalue().strip()
LOG.debug('Alembic command `%s` returns: %s' % (alembic_command, res))
session.cleanup()
return res
def _get_alembic_versions(self, engine):
"""For support of full testing of migrations
we should have an opportunity to run command step by step for each
version in repo. This method returns list of alembic_versions by
historical order.
"""
full_history = self._alembic_command('history',
engine, self.ALEMBIC_CONFIG)
# The piece of output data with version can looked as:
# 'Rev: 17738166b91 (head)' or 'Rev: 43b1a023dfaa'
alembic_history = [r.split(' ')[1] for r in full_history.split("\n")
if r.startswith("Rev")]
alembic_history.reverse()
return alembic_history
def _up_and_down_versions(self, engine):
"""Since alembic version has a random algoritm of generation
(SA-migrate has an ordered autoincrement naming) we should store
a tuple of versions (version for upgrade and version for downgrade)
for successfull testing of migrations in up>down>up mode.
"""
versions = self._get_alembic_versions(engine)
return zip(versions, ['-1'] + versions)
def _walk_versions(self, engine=None, snake_walk=False,
downgrade=True):
# Determine latest version script from the repo, then
# upgrade from 1 through to the latest, with no data
# in the databases. This just checks that the schema itself
# upgrades successfully.
self._configure(engine)
up_and_down_versions = self._up_and_down_versions(engine)
for ver_up, ver_down in up_and_down_versions:
# upgrade -> downgrade -> upgrade
self._migrate_up(engine, ver_up, with_data=True)
if snake_walk:
downgraded = self._migrate_down(engine,
ver_down,
with_data=True,
next_version=ver_up)
if downgraded:
self._migrate_up(engine, ver_up)
if downgrade:
# Now walk it back down to 0 from the latest, testing
# the downgrade paths.
up_and_down_versions.reverse()
for ver_up, ver_down in up_and_down_versions:
# downgrade -> upgrade -> downgrade
downgraded = self._migrate_down(engine,
ver_down, next_version=ver_up)
if snake_walk and downgraded:
self._migrate_up(engine, ver_up)
self._migrate_down(engine, ver_down, next_version=ver_up)
def _get_version_from_db(self, engine):
"""For each type of migrate repo latest version from db
will be returned.
"""
conn = engine.connect()
try:
context = migration.MigrationContext.configure(conn)
version = context.get_current_revision() or '-1'
finally:
conn.close()
return version
def _migrate(self, engine, version, cmd):
"""Base method for manipulation with migrate repo.
It will upgrade or downgrade the actual database.
"""
self._alembic_command(cmd, engine, self.ALEMBIC_CONFIG, version)
def _migrate_down(self, engine, version, with_data=False,
next_version=None):
try:
self._migrate(engine, version, 'downgrade')
except NotImplementedError:
# NOTE(sirp): some migrations, namely release-level
# migrations, don't support a downgrade.
return False
self.assertEqual(version, self._get_version_from_db(engine))
# NOTE(sirp): `version` is what we're downgrading to (i.e. the 'target'
# version). So if we have any downgrade checks, they need to be run for
# the previous (higher numbered) migration.
if with_data:
post_downgrade = getattr(
self, "_post_downgrade_%s" % next_version, None)
if post_downgrade:
post_downgrade(engine)
return True
def _migrate_up(self, engine, version, with_data=False):
"""migrate up to a new version of the db.
We allow for data insertion and post checks at every
migration version with special _pre_upgrade_### and
_check_### functions in the main test.
"""
# NOTE(sdague): try block is here because it's impossible to debug
# where a failed data migration happens otherwise
check_version = version
try:
if with_data:
data = None
pre_upgrade = getattr(
self, "_pre_upgrade_%s" % check_version, None)
if pre_upgrade:
data = pre_upgrade(engine)
self._migrate(engine, version, 'upgrade')
self.assertEqual(version, self._get_version_from_db(engine))
if with_data:
check = getattr(self, "_check_%s" % check_version, None)
if check:
check(engine, data)
except Exception:
LOG.error("Failed to migrate to version %s on engine %s" %
(version, engine))
raise

View File

@ -13,3 +13,10 @@ testtools>=0.9.32
# Doc requirements
sphinx>=1.1.2,<1.2
sphinxcontrib-pecanwsme>=0.5
eventlet>=0.13.0
MySQL-python
psycopg2
# openstack/common/db/sqlalchemy/utils.py depends on this
# we don't use migrate framework in storyboard
sqlalchemy-migrate>=0.8.2