Add common code from Oslo for db migrations test
Moved common code for db migrations test from Oslo. partially implements blueprint db-migration-tests Change-Id: Icf8c27a40ccc0114e6001cfa64444ca1cdb132fa
This commit is contained in:
parent
d1b78db89a
commit
4614fb4d57
0
ironic/openstack/common/fixture/__init__.py
Normal file
0
ironic/openstack/common/fixture/__init__.py
Normal file
51
ironic/openstack/common/fixture/mockpatch.py
Normal file
51
ironic/openstack/common/fixture/mockpatch.py
Normal file
@ -0,0 +1,51 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# Copyright 2013 Hewlett-Packard Development Company, L.P.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import fixtures
|
||||
import mock
|
||||
|
||||
|
||||
class PatchObject(fixtures.Fixture):
|
||||
"""Deal with code around mock."""
|
||||
|
||||
def __init__(self, obj, attr, **kwargs):
|
||||
self.obj = obj
|
||||
self.attr = attr
|
||||
self.kwargs = kwargs
|
||||
|
||||
def setUp(self):
|
||||
super(PatchObject, self).setUp()
|
||||
_p = mock.patch.object(self.obj, self.attr, **self.kwargs)
|
||||
self.mock = _p.start()
|
||||
self.addCleanup(_p.stop)
|
||||
|
||||
|
||||
class Patch(fixtures.Fixture):
|
||||
|
||||
"""Deal with code around mock.patch."""
|
||||
|
||||
def __init__(self, obj, **kwargs):
|
||||
self.obj = obj
|
||||
self.kwargs = kwargs
|
||||
|
||||
def setUp(self):
|
||||
super(Patch, self).setUp()
|
||||
_p = mock.patch(self.obj, **self.kwargs)
|
||||
self.mock = _p.start()
|
||||
self.addCleanup(_p.stop)
|
37
ironic/openstack/common/fixture/moxstubout.py
Normal file
37
ironic/openstack/common/fixture/moxstubout.py
Normal file
@ -0,0 +1,37 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# Copyright 2013 Hewlett-Packard Development Company, L.P.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import fixtures
|
||||
import mox
|
||||
import stubout
|
||||
|
||||
|
||||
class MoxStubout(fixtures.Fixture):
|
||||
"""Deal with code around mox and stubout as a fixture."""
|
||||
|
||||
def setUp(self):
|
||||
super(MoxStubout, self).setUp()
|
||||
# emulate some of the mox stuff, we can't use the metaclass
|
||||
# because it screws with our generators
|
||||
self.mox = mox.Mox()
|
||||
self.stubs = stubout.StubOutForTesting()
|
||||
self.addCleanup(self.mox.UnsetStubs)
|
||||
self.addCleanup(self.stubs.UnsetAll)
|
||||
self.addCleanup(self.stubs.SmartUnsetAll)
|
||||
self.addCleanup(self.mox.VerifyAll)
|
16
ironic/tests/db/sqlalchemy/__init__.py
Normal file
16
ironic/tests/db/sqlalchemy/__init__.py
Normal file
@ -0,0 +1,16 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2012 Cloudscaling Group, Inc
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
7
ironic/tests/db/sqlalchemy/test_migrations.conf
Normal file
7
ironic/tests/db/sqlalchemy/test_migrations.conf
Normal file
@ -0,0 +1,7 @@
|
||||
[DEFAULT]
|
||||
# Set up any number of migration data stores you want, one
|
||||
# The "name" used in the test is the config variable key.
|
||||
#sqlite=sqlite:///test_migrations.db
|
||||
sqlite=sqlite://
|
||||
#mysql=mysql://root:@localhost/test_migrations
|
||||
#postgresql=postgresql://user:pass@localhost/test_migrations
|
426
ironic/tests/db/sqlalchemy/test_migrations.py
Normal file
426
ironic/tests/db/sqlalchemy/test_migrations.py
Normal file
@ -0,0 +1,426 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010-2011 OpenStack Foundation
|
||||
# Copyright 2012-2013 IBM Corp.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
import commands
|
||||
import ConfigParser
|
||||
import os
|
||||
import urlparse
|
||||
|
||||
import mock
|
||||
import sqlalchemy
|
||||
import sqlalchemy.exc
|
||||
|
||||
from ironic.openstack.common import lockutils
|
||||
from ironic.openstack.common import log as logging
|
||||
|
||||
from ironic.tests import utils as test_utils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _get_connect_string(backend, user, passwd, database):
|
||||
"""Get database connection
|
||||
|
||||
Try to get a connection with a very specific set of values, if we get
|
||||
these then we'll run the tests, otherwise they are skipped
|
||||
"""
|
||||
if backend == "postgres":
|
||||
backend = "postgresql+psycopg2"
|
||||
elif backend == "mysql":
|
||||
backend = "mysql+mysqldb"
|
||||
else:
|
||||
raise Exception("Unrecognized backend: '%s'" % backend)
|
||||
|
||||
return ("%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s"
|
||||
% {'backend': backend, 'user': user, 'passwd': passwd,
|
||||
'database': database})
|
||||
|
||||
|
||||
def _is_backend_avail(backend, user, passwd, database):
|
||||
try:
|
||||
connect_uri = _get_connect_string(backend, user, passwd, database)
|
||||
engine = sqlalchemy.create_engine(connect_uri)
|
||||
connection = engine.connect()
|
||||
except Exception:
|
||||
# intentionally catch all to handle exceptions even if we don't
|
||||
# have any backend code loaded.
|
||||
return False
|
||||
else:
|
||||
connection.close()
|
||||
engine.dispose()
|
||||
return True
|
||||
|
||||
|
||||
def _have_mysql(user, passwd, database):
|
||||
present = os.environ.get('TEST_MYSQL_PRESENT')
|
||||
if present is None:
|
||||
return _is_backend_avail('mysql', user, passwd, database)
|
||||
return present.lower() in ('', 'true')
|
||||
|
||||
|
||||
def _have_postgresql(user, passwd, database):
|
||||
present = os.environ.get('TEST_POSTGRESQL_PRESENT')
|
||||
if present is None:
|
||||
return _is_backend_avail('postgres', user, passwd, database)
|
||||
return present.lower() in ('', 'true')
|
||||
|
||||
|
||||
def get_db_connection_info(conn_pieces):
|
||||
database = conn_pieces.path.strip('/')
|
||||
loc_pieces = conn_pieces.netloc.split('@')
|
||||
host = loc_pieces[1]
|
||||
|
||||
auth_pieces = loc_pieces[0].split(':')
|
||||
user = auth_pieces[0]
|
||||
password = ""
|
||||
if len(auth_pieces) > 1:
|
||||
password = auth_pieces[1].strip()
|
||||
|
||||
return (user, password, database, host)
|
||||
|
||||
|
||||
class BaseMigrationTestCase(test_utils.BaseTestCase):
|
||||
"""Base class fort testing of migration utils."""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(BaseMigrationTestCase, self).__init__(*args, **kwargs)
|
||||
|
||||
self.DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__),
|
||||
'test_migrations.conf')
|
||||
# Test machines can set the TEST_MIGRATIONS_CONF variable
|
||||
# to override the location of the config file for migration testing
|
||||
self.CONFIG_FILE_PATH = os.environ.get('TEST_MIGRATIONS_CONF',
|
||||
self.DEFAULT_CONFIG_FILE)
|
||||
self.test_databases = {}
|
||||
self.migration_api = None
|
||||
|
||||
def setUp(self):
|
||||
super(BaseMigrationTestCase, self).setUp()
|
||||
|
||||
# Load test databases from the config file. Only do this
|
||||
# once. No need to re-run this on each test...
|
||||
LOG.debug('config_path is %s' % self.CONFIG_FILE_PATH)
|
||||
if os.path.exists(self.CONFIG_FILE_PATH):
|
||||
cp = ConfigParser.RawConfigParser()
|
||||
try:
|
||||
cp.read(self.CONFIG_FILE_PATH)
|
||||
defaults = cp.defaults()
|
||||
for key, value in defaults.items():
|
||||
self.test_databases[key] = value
|
||||
except ConfigParser.ParsingError as e:
|
||||
self.fail("Failed to read test_migrations.conf config "
|
||||
"file. Got error: %s" % e)
|
||||
else:
|
||||
self.fail("Failed to find test_migrations.conf config "
|
||||
"file.")
|
||||
|
||||
self.engines = {}
|
||||
for key, value in self.test_databases.items():
|
||||
self.engines[key] = sqlalchemy.create_engine(value)
|
||||
|
||||
# We start each test case with a completely blank slate.
|
||||
self._reset_databases()
|
||||
|
||||
def tearDown(self):
|
||||
# We destroy the test data store between each test case,
|
||||
# and recreate it, which ensures that we have no side-effects
|
||||
# from the tests
|
||||
self._reset_databases()
|
||||
super(BaseMigrationTestCase, self).tearDown()
|
||||
|
||||
def execute_cmd(self, cmd=None):
|
||||
status, output = commands.getstatusoutput(cmd)
|
||||
LOG.debug(output)
|
||||
self.assertEqual(0, status,
|
||||
"Failed to run: %s\n%s" % (cmd, output))
|
||||
|
||||
@lockutils.synchronized('pgadmin', 'tests-', external=True)
|
||||
def _reset_pg(self, conn_pieces):
|
||||
(user, password, database, host) = get_db_connection_info(conn_pieces)
|
||||
os.environ['PGPASSWORD'] = password
|
||||
os.environ['PGUSER'] = user
|
||||
# note(boris-42): We must create and drop database, we can't
|
||||
# drop database which we have connected to, so for such
|
||||
# operations there is a special database template1.
|
||||
sqlcmd = ("psql -w -U %(user)s -h %(host)s -c"
|
||||
" '%(sql)s' -d template1")
|
||||
|
||||
sql = ("drop database if exists %s;") % database
|
||||
droptable = sqlcmd % {'user': user, 'host': host, 'sql': sql}
|
||||
self.execute_cmd(droptable)
|
||||
|
||||
sql = ("create database %s;") % database
|
||||
createtable = sqlcmd % {'user': user, 'host': host, 'sql': sql}
|
||||
self.execute_cmd(createtable)
|
||||
|
||||
os.unsetenv('PGPASSWORD')
|
||||
os.unsetenv('PGUSER')
|
||||
|
||||
def _reset_databases(self):
|
||||
for key, engine in self.engines.items():
|
||||
conn_string = self.test_databases[key]
|
||||
conn_pieces = urlparse.urlparse(conn_string)
|
||||
engine.dispose()
|
||||
if conn_string.startswith('sqlite'):
|
||||
# We can just delete the SQLite database, which is
|
||||
# the easiest and cleanest solution
|
||||
db_path = conn_pieces.path.strip('/')
|
||||
if os.path.exists(db_path):
|
||||
os.unlink(db_path)
|
||||
# No need to recreate the SQLite DB. SQLite will
|
||||
# create it for us if it's not there...
|
||||
elif conn_string.startswith('mysql'):
|
||||
# We can execute the MySQL client to destroy and re-create
|
||||
# the MYSQL database, which is easier and less error-prone
|
||||
# than using SQLAlchemy to do this via MetaData...trust me.
|
||||
(user, password, database, host) = \
|
||||
get_db_connection_info(conn_pieces)
|
||||
sql = ("drop database if exists %(database)s; "
|
||||
"create database %(database)s;") % \
|
||||
{'database': database}
|
||||
cmd = ("mysql -u \"%(user)s\" -p\"%(password)s\" -h %(host)s "
|
||||
"-e \"%(sql)s\"") % {'user': user, 'password': password,
|
||||
'host': host, 'sql': sql}
|
||||
self.execute_cmd(cmd)
|
||||
elif conn_string.startswith('postgresql'):
|
||||
self._reset_pg(conn_pieces)
|
||||
|
||||
|
||||
class WalkVersionsMixin(object):
|
||||
def _walk_versions(self, engine=None, snake_walk=False, downgrade=True):
|
||||
# Determine latest version script from the repo, then
|
||||
# upgrade from 1 through to the latest, with no data
|
||||
# in the databases. This just checks that the schema itself
|
||||
# upgrades successfully.
|
||||
|
||||
# Place the database under version control
|
||||
self.migration_api.version_control(engine, self.REPOSITORY,
|
||||
self.INIT_VERSION)
|
||||
self.assertEqual(self.INIT_VERSION,
|
||||
self.migration_api.db_version(engine,
|
||||
self.REPOSITORY))
|
||||
|
||||
LOG.debug('latest version is %s' % self.REPOSITORY.latest)
|
||||
versions = range(self.INIT_VERSION + 1, self.REPOSITORY.latest + 1)
|
||||
|
||||
for version in versions:
|
||||
# upgrade -> downgrade -> upgrade
|
||||
self._migrate_up(engine, version, with_data=True)
|
||||
if snake_walk:
|
||||
downgraded = self._migrate_down(
|
||||
engine, version - 1, with_data=True)
|
||||
if downgraded:
|
||||
self._migrate_up(engine, version)
|
||||
|
||||
if downgrade:
|
||||
# Now walk it back down to 0 from the latest, testing
|
||||
# the downgrade paths.
|
||||
for version in reversed(versions):
|
||||
# downgrade -> upgrade -> downgrade
|
||||
downgraded = self._migrate_down(engine, version - 1)
|
||||
|
||||
if snake_walk and downgraded:
|
||||
self._migrate_up(engine, version)
|
||||
self._migrate_down(engine, version - 1)
|
||||
|
||||
def _migrate_down(self, engine, version, with_data=False):
|
||||
try:
|
||||
self.migration_api.downgrade(engine, self.REPOSITORY, version)
|
||||
except NotImplementedError:
|
||||
# NOTE(sirp): some migrations, namely release-level
|
||||
# migrations, don't support a downgrade.
|
||||
return False
|
||||
|
||||
self.assertEqual(
|
||||
version, self.migration_api.db_version(engine, self.REPOSITORY))
|
||||
|
||||
# NOTE(sirp): `version` is what we're downgrading to (i.e. the 'target'
|
||||
# version). So if we have any downgrade checks, they need to be run for
|
||||
# the previous (higher numbered) migration.
|
||||
if with_data:
|
||||
post_downgrade = getattr(
|
||||
self, "_post_downgrade_%03d" % (version + 1), None)
|
||||
if post_downgrade:
|
||||
post_downgrade(engine)
|
||||
|
||||
return True
|
||||
|
||||
def _migrate_up(self, engine, version, with_data=False):
|
||||
"""migrate up to a new version of the db.
|
||||
|
||||
We allow for data insertion and post checks at every
|
||||
migration version with special _pre_upgrade_### and
|
||||
_check_### functions in the main test.
|
||||
"""
|
||||
# NOTE(sdague): try block is here because it's impossible to debug
|
||||
# where a failed data migration happens otherwise
|
||||
try:
|
||||
if with_data:
|
||||
data = None
|
||||
pre_upgrade = getattr(
|
||||
self, "_pre_upgrade_%03d" % version, None)
|
||||
if pre_upgrade:
|
||||
data = pre_upgrade(engine)
|
||||
|
||||
self.migration_api.upgrade(engine, self.REPOSITORY, version)
|
||||
self.assertEqual(version,
|
||||
self.migration_api.db_version(engine,
|
||||
self.REPOSITORY))
|
||||
if with_data:
|
||||
check = getattr(self, "_check_%03d" % version, None)
|
||||
if check:
|
||||
check(engine, data)
|
||||
except Exception:
|
||||
LOG.error("Failed to migrate to version %s on engine %s" %
|
||||
(version, engine))
|
||||
raise
|
||||
|
||||
|
||||
class TestWalkVersions(test_utils.BaseTestCase, WalkVersionsMixin):
|
||||
def setUp(self):
|
||||
super(TestWalkVersions, self).setUp()
|
||||
self.migration_api = mock.MagicMock()
|
||||
self.engine = mock.MagicMock()
|
||||
self.REPOSITORY = mock.MagicMock()
|
||||
self.INIT_VERSION = 4
|
||||
|
||||
def test_migrate_up(self):
|
||||
self.migration_api.db_version.return_value = 141
|
||||
|
||||
self._migrate_up(self.engine, 141)
|
||||
|
||||
self.migration_api.upgrade.assert_called_with(
|
||||
self.engine, self.REPOSITORY, 141)
|
||||
self.migration_api.db_version.assert_called_with(
|
||||
self.engine, self.REPOSITORY)
|
||||
|
||||
def test_migrate_up_with_data(self):
|
||||
test_value = {"a": 1, "b": 2}
|
||||
self.migration_api.db_version.return_value = 141
|
||||
self._pre_upgrade_141 = mock.MagicMock()
|
||||
self._pre_upgrade_141.return_value = test_value
|
||||
self._check_141 = mock.MagicMock()
|
||||
|
||||
self._migrate_up(self.engine, 141, True)
|
||||
|
||||
self._pre_upgrade_141.assert_called_with(self.engine)
|
||||
self._check_141.assert_called_with(self.engine, test_value)
|
||||
|
||||
def test_migrate_down(self):
|
||||
self.migration_api.db_version.return_value = 42
|
||||
|
||||
self.assertTrue(self._migrate_down(self.engine, 42))
|
||||
self.migration_api.db_version.assert_called_with(
|
||||
self.engine, self.REPOSITORY)
|
||||
|
||||
def test_migrate_down_not_implemented(self):
|
||||
self.migration_api.downgrade.side_effect = NotImplementedError
|
||||
self.assertFalse(self._migrate_down(self.engine, 42))
|
||||
|
||||
def test_migrate_down_with_data(self):
|
||||
self._post_downgrade_043 = mock.MagicMock()
|
||||
self.migration_api.db_version.return_value = 42
|
||||
|
||||
self._migrate_down(self.engine, 42, True)
|
||||
|
||||
self._post_downgrade_043.assert_called_with(self.engine)
|
||||
|
||||
@mock.patch.object(WalkVersionsMixin, '_migrate_up')
|
||||
@mock.patch.object(WalkVersionsMixin, '_migrate_down')
|
||||
def test_walk_versions_all_default(self, _migrate_up, _migrate_down):
|
||||
self.REPOSITORY.latest = 20
|
||||
self.migration_api.db_version.return_value = self.INIT_VERSION
|
||||
|
||||
self._walk_versions()
|
||||
|
||||
self.migration_api.version_control.assert_called_with(
|
||||
None, self.REPOSITORY, self.INIT_VERSION)
|
||||
self.migration_api.db_version.assert_called_with(
|
||||
None, self.REPOSITORY)
|
||||
|
||||
versions = range(self.INIT_VERSION + 1, self.REPOSITORY.latest + 1)
|
||||
upgraded = [mock.call(None, v, with_data=True) for v in versions]
|
||||
self.assertEquals(self._migrate_up.call_args_list, upgraded)
|
||||
|
||||
downgraded = [mock.call(None, v - 1) for v in reversed(versions)]
|
||||
self.assertEquals(self._migrate_down.call_args_list, downgraded)
|
||||
|
||||
@mock.patch.object(WalkVersionsMixin, '_migrate_up')
|
||||
@mock.patch.object(WalkVersionsMixin, '_migrate_down')
|
||||
def test_walk_versions_all_true(self, _migrate_up, _migrate_down):
|
||||
self.REPOSITORY.latest = 20
|
||||
self.migration_api.db_version.return_value = self.INIT_VERSION
|
||||
|
||||
self._walk_versions(self.engine, snake_walk=True, downgrade=True)
|
||||
|
||||
versions = range(self.INIT_VERSION + 1, self.REPOSITORY.latest + 1)
|
||||
upgraded = []
|
||||
for v in versions:
|
||||
upgraded.append(mock.call(self.engine, v, with_data=True))
|
||||
upgraded.append(mock.call(self.engine, v))
|
||||
upgraded.extend(
|
||||
[mock.call(self.engine, v) for v in reversed(versions)]
|
||||
)
|
||||
self.assertEquals(upgraded, self._migrate_up.call_args_list)
|
||||
|
||||
downgraded_1 = [
|
||||
mock.call(self.engine, v - 1, with_data=True) for v in versions
|
||||
]
|
||||
downgraded_2 = []
|
||||
for v in reversed(versions):
|
||||
downgraded_2.append(mock.call(self.engine, v - 1))
|
||||
downgraded_2.append(mock.call(self.engine, v - 1))
|
||||
downgraded = downgraded_1 + downgraded_2
|
||||
self.assertEquals(self._migrate_down.call_args_list, downgraded)
|
||||
|
||||
@mock.patch.object(WalkVersionsMixin, '_migrate_up')
|
||||
@mock.patch.object(WalkVersionsMixin, '_migrate_down')
|
||||
def test_walk_versions_true_false(self, _migrate_up, _migrate_down):
|
||||
self.REPOSITORY.latest = 20
|
||||
self.migration_api.db_version.return_value = self.INIT_VERSION
|
||||
|
||||
self._walk_versions(self.engine, snake_walk=True, downgrade=False)
|
||||
|
||||
versions = range(self.INIT_VERSION + 1, self.REPOSITORY.latest + 1)
|
||||
|
||||
upgraded = []
|
||||
for v in versions:
|
||||
upgraded.append(mock.call(self.engine, v, with_data=True))
|
||||
upgraded.append(mock.call(self.engine, v))
|
||||
self.assertEquals(upgraded, self._migrate_up.call_args_list)
|
||||
|
||||
downgraded = [
|
||||
mock.call(self.engine, v - 1, with_data=True) for v in versions
|
||||
]
|
||||
self.assertEquals(self._migrate_down.call_args_list, downgraded)
|
||||
|
||||
@mock.patch.object(WalkVersionsMixin, '_migrate_up')
|
||||
@mock.patch.object(WalkVersionsMixin, '_migrate_down')
|
||||
def test_walk_versions_all_false(self, _migrate_up, _migrate_down):
|
||||
self.REPOSITORY.latest = 20
|
||||
self.migration_api.db_version.return_value = self.INIT_VERSION
|
||||
|
||||
self._walk_versions(self.engine, snake_walk=False, downgrade=False)
|
||||
|
||||
versions = range(self.INIT_VERSION + 1, self.REPOSITORY.latest + 1)
|
||||
|
||||
upgraded = [
|
||||
mock.call(self.engine, v, with_data=True) for v in versions
|
||||
]
|
||||
self.assertEquals(upgraded, self._migrate_up.call_args_list)
|
80
ironic/tests/utils.py
Normal file
80
ironic/tests/utils.py
Normal file
@ -0,0 +1,80 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010-2011 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Common utilities used in testing"""
|
||||
|
||||
import os
|
||||
import tempfile
|
||||
|
||||
import fixtures
|
||||
from oslo.config import cfg
|
||||
import testtools
|
||||
|
||||
from ironic.openstack.common.fixture import moxstubout
|
||||
|
||||
|
||||
class BaseTestCase(testtools.TestCase):
|
||||
|
||||
def setUp(self, conf=cfg.CONF):
|
||||
super(BaseTestCase, self).setUp()
|
||||
moxfixture = self.useFixture(moxstubout.MoxStubout())
|
||||
self.mox = moxfixture.mox
|
||||
self.stubs = moxfixture.stubs
|
||||
self.conf = conf
|
||||
self.addCleanup(self.conf.reset)
|
||||
self.useFixture(fixtures.FakeLogger('openstack.common'))
|
||||
self.useFixture(fixtures.Timeout(30, True))
|
||||
self.config(fatal_exception_format_errors=True)
|
||||
self.useFixture(fixtures.NestedTempfile())
|
||||
self.tempdirs = []
|
||||
|
||||
def tearDown(self):
|
||||
super(BaseTestCase, self).tearDown()
|
||||
self.conf.reset()
|
||||
self.stubs.UnsetAll()
|
||||
self.stubs.SmartUnsetAll()
|
||||
|
||||
def create_tempfiles(self, files, ext='.conf'):
|
||||
tempfiles = []
|
||||
for (basename, contents) in files:
|
||||
if not os.path.isabs(basename):
|
||||
(fd, path) = tempfile.mkstemp(prefix=basename, suffix=ext)
|
||||
else:
|
||||
path = basename + ext
|
||||
fd = os.open(path, os.O_CREAT | os.O_WRONLY)
|
||||
tempfiles.append(path)
|
||||
try:
|
||||
os.write(fd, contents)
|
||||
finally:
|
||||
os.close(fd)
|
||||
return tempfiles
|
||||
|
||||
def config(self, **kw):
|
||||
"""Override some configuration values.
|
||||
|
||||
The keyword arguments are the names of configuration options to
|
||||
override and their values.
|
||||
|
||||
If a group argument is supplied, the overrides are applied to
|
||||
the specified configuration option group.
|
||||
|
||||
All overrides are automatically cleared at the end of the current
|
||||
test by the tearDown() method.
|
||||
"""
|
||||
group = kw.pop('group', None)
|
||||
for k, v in kw.iteritems():
|
||||
self.conf.set_override(k, v, group)
|
@ -7,6 +7,7 @@ module=db.sqlalchemy
|
||||
module=eventlet_backdoor
|
||||
module=excutils
|
||||
module=fileutils
|
||||
module=fixture
|
||||
module=flakes
|
||||
module=gettextutils
|
||||
module=importutils
|
||||
|
@ -6,6 +6,7 @@ hacking>=0.5.6,<0.7
|
||||
coverage>=3.6
|
||||
discover
|
||||
fixtures>=0.3.12
|
||||
mock>=0.8.0
|
||||
mox>=0.5.3
|
||||
Babel>=0.9.6
|
||||
MySQL-python
|
||||
|
Loading…
Reference in New Issue
Block a user