403 lines
18 KiB
Python
403 lines
18 KiB
Python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
|
|
# Copyright 2010-2011 OpenStack, LLC
|
|
# All Rights Reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
# not use this file except in compliance with the License. You may obtain
|
|
# a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
# License for the specific language governing permissions and limitations
|
|
# under the License.
|
|
|
|
"""
|
|
Tests for database migrations. This test case reads the configuration
|
|
file test_migrations.conf for database connection settings
|
|
to use in the tests. For each connection found in the config file,
|
|
the test case runs a series of test cases to ensure that migrations work
|
|
properly both upgrading and downgrading, and that no data loss occurs
|
|
if possible.
|
|
"""
|
|
|
|
import commands
|
|
import ConfigParser
|
|
import os
|
|
import urlparse
|
|
|
|
from migrate.versioning import repository
|
|
import sqlalchemy
|
|
|
|
import cinder.db.migration as migration
|
|
import cinder.db.sqlalchemy.migrate_repo
|
|
from cinder.db.sqlalchemy.migration import versioning_api as migration_api
|
|
from cinder.openstack.common import log as logging
|
|
from cinder import test
|
|
|
|
LOG = logging.getLogger('cinder.tests.test_migrations')
|
|
|
|
|
|
def _mysql_get_connect_string(user="openstack_citest",
|
|
passwd="openstack_citest",
|
|
database="openstack_citest"):
|
|
"""
|
|
Try to get a connection with a very specfic set of values, if we get
|
|
these then we'll run the mysql tests, otherwise they are skipped
|
|
"""
|
|
return "mysql://%(user)s:%(passwd)s@localhost/%(database)s" % locals()
|
|
|
|
|
|
def _is_mysql_avail(user="openstack_citest",
|
|
passwd="openstack_citest",
|
|
database="openstack_citest"):
|
|
try:
|
|
connect_uri = _mysql_get_connect_string(
|
|
user=user, passwd=passwd, database=database)
|
|
engine = sqlalchemy.create_engine(connect_uri)
|
|
connection = engine.connect()
|
|
except Exception:
|
|
# intential catch all to handle exceptions even if we don't
|
|
# have mysql code loaded at all.
|
|
return False
|
|
else:
|
|
connection.close()
|
|
return True
|
|
|
|
|
|
def _have_mysql():
|
|
present = os.environ.get('NOVA_TEST_MYSQL_PRESENT')
|
|
if present is None:
|
|
return _is_mysql_avail()
|
|
return present.lower() in ('', 'true')
|
|
|
|
|
|
class TestMigrations(test.TestCase):
|
|
"""Test sqlalchemy-migrate migrations."""
|
|
|
|
TEST_DATABASES = {}
|
|
DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__),
|
|
'test_migrations.conf')
|
|
# Test machines can set the CINDER_TEST_MIGRATIONS_CONF variable
|
|
# to override the location of the config file for migration testing
|
|
CONFIG_FILE_PATH = os.environ.get('CINDER_TEST_MIGRATIONS_CONF',
|
|
DEFAULT_CONFIG_FILE)
|
|
MIGRATE_FILE = cinder.db.sqlalchemy.migrate_repo.__file__
|
|
REPOSITORY = repository.Repository(
|
|
os.path.abspath(os.path.dirname(MIGRATE_FILE)))
|
|
|
|
def setUp(self):
|
|
super(TestMigrations, self).setUp()
|
|
|
|
self.snake_walk = False
|
|
|
|
# Load test databases from the config file. Only do this
|
|
# once. No need to re-run this on each test...
|
|
LOG.debug('config_path is %s' % TestMigrations.CONFIG_FILE_PATH)
|
|
if not TestMigrations.TEST_DATABASES:
|
|
if os.path.exists(TestMigrations.CONFIG_FILE_PATH):
|
|
cp = ConfigParser.RawConfigParser()
|
|
try:
|
|
cp.read(TestMigrations.CONFIG_FILE_PATH)
|
|
defaults = cp.defaults()
|
|
for key, value in defaults.items():
|
|
TestMigrations.TEST_DATABASES[key] = value
|
|
self.snake_walk = cp.getboolean('walk_style', 'snake_walk')
|
|
except ConfigParser.ParsingError, e:
|
|
self.fail("Failed to read test_migrations.conf config "
|
|
"file. Got error: %s" % e)
|
|
else:
|
|
self.fail("Failed to find test_migrations.conf config "
|
|
"file.")
|
|
|
|
self.engines = {}
|
|
for key, value in TestMigrations.TEST_DATABASES.items():
|
|
self.engines[key] = sqlalchemy.create_engine(value)
|
|
|
|
# We start each test case with a completely blank slate.
|
|
self._reset_databases()
|
|
|
|
def tearDown(self):
|
|
|
|
# We destroy the test data store between each test case,
|
|
# and recreate it, which ensures that we have no side-effects
|
|
# from the tests
|
|
self._reset_databases()
|
|
|
|
# remove these from the list so they aren't used in the migration tests
|
|
if "mysqlcitest" in self.engines:
|
|
del self.engines["mysqlcitest"]
|
|
if "mysqlcitest" in TestMigrations.TEST_DATABASES:
|
|
del TestMigrations.TEST_DATABASES["mysqlcitest"]
|
|
super(TestMigrations, self).tearDown()
|
|
|
|
def _reset_databases(self):
|
|
def execute_cmd(cmd=None):
|
|
status, output = commands.getstatusoutput(cmd)
|
|
LOG.debug(output)
|
|
self.assertEqual(0, status)
|
|
for key, engine in self.engines.items():
|
|
conn_string = TestMigrations.TEST_DATABASES[key]
|
|
conn_pieces = urlparse.urlparse(conn_string)
|
|
if conn_string.startswith('sqlite'):
|
|
# We can just delete the SQLite database, which is
|
|
# the easiest and cleanest solution
|
|
db_path = conn_pieces.path.strip('/')
|
|
if os.path.exists(db_path):
|
|
os.unlink(db_path)
|
|
# No need to recreate the SQLite DB. SQLite will
|
|
# create it for us if it's not there...
|
|
elif conn_string.startswith('mysql'):
|
|
# We can execute the MySQL client to destroy and re-create
|
|
# the MYSQL database, which is easier and less error-prone
|
|
# than using SQLAlchemy to do this via MetaData...trust me.
|
|
database = conn_pieces.path.strip('/')
|
|
loc_pieces = conn_pieces.netloc.split('@')
|
|
host = loc_pieces[1]
|
|
auth_pieces = loc_pieces[0].split(':')
|
|
user = auth_pieces[0]
|
|
password = ""
|
|
if len(auth_pieces) > 1:
|
|
if auth_pieces[1].strip():
|
|
password = "-p\"%s\"" % auth_pieces[1]
|
|
sql = ("drop database if exists %(database)s; "
|
|
"create database %(database)s;") % locals()
|
|
cmd = ("mysql -u \"%(user)s\" %(password)s -h %(host)s "
|
|
"-e \"%(sql)s\"") % locals()
|
|
execute_cmd(cmd)
|
|
elif conn_string.startswith('postgresql'):
|
|
database = conn_pieces.path.strip('/')
|
|
loc_pieces = conn_pieces.netloc.split('@')
|
|
host = loc_pieces[1]
|
|
auth_pieces = loc_pieces[0].split(':')
|
|
user = auth_pieces[0]
|
|
password = ""
|
|
if len(auth_pieces) > 1:
|
|
if auth_pieces[1].strip():
|
|
password = auth_pieces[1]
|
|
cmd = ("touch ~/.pgpass;"
|
|
"chmod 0600 ~/.pgpass;"
|
|
"sed -i -e"
|
|
"'1{s/^.*$/\*:\*:\*:%(user)s:%(password)s/};"
|
|
"1!d' ~/.pgpass") % locals()
|
|
execute_cmd(cmd)
|
|
sql = ("UPDATE pg_catalog.pg_database SET datallowconn=false "
|
|
"WHERE datname='%(database)s';") % locals()
|
|
cmd = ("psql -U%(user)s -h%(host)s -c\"%(sql)s\"") % locals()
|
|
execute_cmd(cmd)
|
|
sql = ("SELECT pg_catalog.pg_terminate_backend(procpid) "
|
|
"FROM pg_catalog.pg_stat_activity "
|
|
"WHERE datname='%(database)s';") % locals()
|
|
cmd = ("psql -U%(user)s -h%(host)s -c\"%(sql)s\"") % locals()
|
|
execute_cmd(cmd)
|
|
sql = ("drop database if exists %(database)s;") % locals()
|
|
cmd = ("psql -U%(user)s -h%(host)s -c\"%(sql)s\"") % locals()
|
|
execute_cmd(cmd)
|
|
sql = ("create database %(database)s;") % locals()
|
|
cmd = ("psql -U%(user)s -h%(host)s -c\"%(sql)s\"") % locals()
|
|
execute_cmd(cmd)
|
|
|
|
def test_walk_versions(self):
|
|
"""
|
|
Walks all version scripts for each tested database, ensuring
|
|
that there are no errors in the version scripts for each engine
|
|
"""
|
|
for key, engine in self.engines.items():
|
|
self._walk_versions(engine, self.snake_walk)
|
|
|
|
def test_mysql_connect_fail(self):
|
|
"""
|
|
Test that we can trigger a mysql connection failure and we fail
|
|
gracefully to ensure we don't break people without mysql
|
|
"""
|
|
if _is_mysql_avail(user="openstack_cifail"):
|
|
self.fail("Shouldn't have connected")
|
|
|
|
@test.skip_unless(_have_mysql(), "mysql not available")
|
|
def test_mysql_innodb(self):
|
|
"""
|
|
Test that table creation on mysql only builds InnoDB tables
|
|
"""
|
|
# add this to the global lists to make reset work with it, it's removed
|
|
# automaticaly in tearDown so no need to clean it up here.
|
|
connect_string = _mysql_get_connect_string()
|
|
engine = sqlalchemy.create_engine(connect_string)
|
|
self.engines["mysqlcitest"] = engine
|
|
TestMigrations.TEST_DATABASES["mysqlcitest"] = connect_string
|
|
|
|
# build a fully populated mysql database with all the tables
|
|
self._reset_databases()
|
|
self._walk_versions(engine, False, False)
|
|
|
|
uri = _mysql_get_connect_string(database="information_schema")
|
|
connection = sqlalchemy.create_engine(uri).connect()
|
|
|
|
# sanity check
|
|
total = connection.execute("SELECT count(*) "
|
|
"from information_schema.TABLES "
|
|
"where TABLE_SCHEMA='openstack_citest'")
|
|
self.assertTrue(total.scalar() > 0, "No tables found. Wrong schema?")
|
|
|
|
noninnodb = connection.execute("SELECT count(*) "
|
|
"from information_schema.TABLES "
|
|
"where TABLE_SCHEMA='openstack_citest' "
|
|
"and ENGINE!='InnoDB' "
|
|
"and TABLE_NAME!='migrate_version'")
|
|
count = noninnodb.scalar()
|
|
self.assertEqual(count, 0, "%d non InnoDB tables created" % count)
|
|
|
|
def _walk_versions(self, engine=None, snake_walk=False, downgrade=True):
|
|
# Determine latest version script from the repo, then
|
|
# upgrade from 1 through to the latest, with no data
|
|
# in the databases. This just checks that the schema itself
|
|
# upgrades successfully.
|
|
|
|
# Place the database under version control
|
|
migration_api.version_control(engine,
|
|
TestMigrations.REPOSITORY,
|
|
migration.INIT_VERSION)
|
|
self.assertEqual(migration.INIT_VERSION,
|
|
migration_api.db_version(engine,
|
|
TestMigrations.REPOSITORY))
|
|
|
|
migration_api.upgrade(engine, TestMigrations.REPOSITORY,
|
|
migration.INIT_VERSION + 1)
|
|
|
|
LOG.debug('latest version is %s' % TestMigrations.REPOSITORY.latest)
|
|
|
|
for version in xrange(migration.INIT_VERSION + 2,
|
|
TestMigrations.REPOSITORY.latest + 1):
|
|
# upgrade -> downgrade -> upgrade
|
|
self._migrate_up(engine, version)
|
|
if snake_walk:
|
|
self._migrate_down(engine, version - 1)
|
|
self._migrate_up(engine, version)
|
|
|
|
if downgrade:
|
|
# Now walk it back down to 0 from the latest, testing
|
|
# the downgrade paths.
|
|
for version in reversed(
|
|
xrange(migration.INIT_VERSION + 1,
|
|
TestMigrations.REPOSITORY.latest)):
|
|
# downgrade -> upgrade -> downgrade
|
|
self._migrate_down(engine, version)
|
|
if snake_walk:
|
|
self._migrate_up(engine, version + 1)
|
|
self._migrate_down(engine, version)
|
|
|
|
def _migrate_down(self, engine, version):
|
|
migration_api.downgrade(engine,
|
|
TestMigrations.REPOSITORY,
|
|
version)
|
|
self.assertEqual(version,
|
|
migration_api.db_version(engine,
|
|
TestMigrations.REPOSITORY))
|
|
|
|
def _migrate_up(self, engine, version):
|
|
migration_api.upgrade(engine,
|
|
TestMigrations.REPOSITORY,
|
|
version)
|
|
self.assertEqual(version,
|
|
migration_api.db_version(engine,
|
|
TestMigrations.REPOSITORY))
|
|
|
|
def test_migration_004(self):
|
|
"""Test that volume_type_id migration works correctly."""
|
|
for (key, engine) in self.engines.items():
|
|
migration_api.version_control(engine,
|
|
TestMigrations.REPOSITORY,
|
|
migration.INIT_VERSION)
|
|
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 3)
|
|
metadata = sqlalchemy.schema.MetaData()
|
|
metadata.bind = engine
|
|
|
|
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 4)
|
|
volumes = sqlalchemy.Table('volumes',
|
|
metadata,
|
|
autoload=True)
|
|
volume_types = sqlalchemy.Table('volume_types',
|
|
metadata,
|
|
autoload=True)
|
|
extra_specs = sqlalchemy.Table('volume_type_extra_specs',
|
|
metadata,
|
|
autoload=True)
|
|
|
|
self.assertTrue(isinstance(volumes.c.volume_type_id.type,
|
|
sqlalchemy.types.VARCHAR))
|
|
self.assertTrue(isinstance(volume_types.c.id.type,
|
|
sqlalchemy.types.VARCHAR))
|
|
self.assertTrue(isinstance(extra_specs.c.volume_type_id.type,
|
|
sqlalchemy.types.VARCHAR))
|
|
|
|
self.assertTrue(extra_specs.c.volume_type_id.foreign_keys)
|
|
|
|
def test_migration_005(self):
|
|
"""Test that adding source_volid column works correctly."""
|
|
for (key, engine) in self.engines.items():
|
|
migration_api.version_control(engine,
|
|
TestMigrations.REPOSITORY,
|
|
migration.INIT_VERSION)
|
|
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 4)
|
|
metadata = sqlalchemy.schema.MetaData()
|
|
metadata.bind = engine
|
|
|
|
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 5)
|
|
volumes = sqlalchemy.Table('volumes',
|
|
metadata,
|
|
autoload=True)
|
|
self.assertTrue(isinstance(volumes.c.source_volid.type,
|
|
sqlalchemy.types.VARCHAR))
|
|
|
|
def _metadatas(self, upgrade_to, downgrade_to=None):
|
|
for (key, engine) in self.engines.items():
|
|
migration_api.version_control(engine,
|
|
TestMigrations.REPOSITORY,
|
|
migration.INIT_VERSION)
|
|
migration_api.upgrade(engine,
|
|
TestMigrations.REPOSITORY,
|
|
upgrade_to)
|
|
|
|
if downgrade_to is not None:
|
|
migration_api.downgrade(
|
|
engine, TestMigrations.REPOSITORY, downgrade_to)
|
|
|
|
metadata = sqlalchemy.schema.MetaData()
|
|
metadata.bind = engine
|
|
yield metadata
|
|
|
|
def metadatas_upgraded_to(self, revision):
|
|
return self._metadatas(revision)
|
|
|
|
def metadatas_downgraded_from(self, revision):
|
|
return self._metadatas(revision, revision - 1)
|
|
|
|
def test_upgrade_006_adds_provider_location(self):
|
|
for metadata in self.metadatas_upgraded_to(6):
|
|
snapshots = sqlalchemy.Table('snapshots', metadata, autoload=True)
|
|
self.assertTrue(isinstance(snapshots.c.provider_location.type,
|
|
sqlalchemy.types.VARCHAR))
|
|
|
|
def test_downgrade_006_removes_provider_location(self):
|
|
for metadata in self.metadatas_downgraded_from(6):
|
|
snapshots = sqlalchemy.Table('snapshots', metadata, autoload=True)
|
|
|
|
self.assertTrue('provider_location' not in snapshots.c)
|
|
|
|
def test_upgrade_007_adds_fk(self):
|
|
for metadata in self.metadatas_upgraded_to(7):
|
|
snapshots = sqlalchemy.Table('snapshots', metadata, autoload=True)
|
|
volumes = sqlalchemy.Table('volumes', metadata, autoload=True)
|
|
|
|
fkey, = snapshots.c.volume_id.foreign_keys
|
|
|
|
self.assertEquals(volumes.c.id, fkey.column)
|
|
|
|
def test_downgrade_007_removes_fk(self):
|
|
for metadata in self.metadatas_downgraded_from(7):
|
|
snapshots = sqlalchemy.Table('snapshots', metadata, autoload=True)
|
|
|
|
self.assertEquals(0, len(snapshots.c.volume_id.foreign_keys))
|