make test_databases instance variable

eventually we'll need to do this to allow for testr testing, but in
doing so this also exposed that sqlite downgrade of source_volid
didn't work when data was in the table (sqlite just happily ignored
the drop, then failed when trying to added the column for a second
time during the snake walk.)

This fix also adds the correct sqlite downgrade path for migration
005 to actually drop source_volid, but ensure we don't loose any
other data in the process.

Change-Id: I082b53e108c4d564e33ef79979ea8c1642afdbcd
This commit is contained in:
Sean Dague 2013-02-09 10:10:03 -05:00
parent 835fb61442
commit 3edcec0f48
2 changed files with 131 additions and 13 deletions

View File

@ -0,0 +1,124 @@
BEGIN TRANSACTION;
CREATE TEMPORARY TABLE volumes_backup (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
id VARCHAR(36) NOT NULL,
ec2_id VARCHAR(255),
user_id VARCHAR(255),
project_id VARCHAR(255),
host VARCHAR(255),
size INTEGER,
availability_zone VARCHAR(255),
instance_uuid VARCHAR(36),
mountpoint VARCHAR(255),
attach_time VARCHAR(255),
status VARCHAR(255),
attach_status VARCHAR(255),
scheduled_at DATETIME,
launched_at DATETIME,
terminated_at DATETIME,
display_name VARCHAR(255),
display_description VARCHAR(255),
provider_location VARCHAR(256),
provider_auth VARCHAR(256),
snapshot_id VARCHAR(36),
volume_type_id VARCHAR(36),
source_volid VARCHAR(36),
PRIMARY KEY (id),
CHECK (deleted IN (0, 1))
);
INSERT INTO volumes_backup
SELECT created_at,
updated_at,
deleted_at,
deleted,
id,
ec2_id,
user_id,
project_id,
host,
size,
availability_zone,
instance_uuid,
mountpoint,
attach_time,
status,
attach_status,
scheduled_at,
launched_at,
terminated_at,
display_name,
display_description,
provider_location,
provider_auth,
snapshot_id,
volume_type_id,
source_volid
FROM volumes;
DROP TABLE volumes;
CREATE TABLE volumes (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
id VARCHAR(36) NOT NULL,
ec2_id VARCHAR(255),
user_id VARCHAR(255),
project_id VARCHAR(255),
host VARCHAR(255),
size INTEGER,
availability_zone VARCHAR(255),
instance_uuid VARCHAR(36),
mountpoint VARCHAR(255),
attach_time VARCHAR(255),
status VARCHAR(255),
attach_status VARCHAR(255),
scheduled_at DATETIME,
launched_at DATETIME,
terminated_at DATETIME,
display_name VARCHAR(255),
display_description VARCHAR(255),
provider_location VARCHAR(256),
provider_auth VARCHAR(256),
snapshot_id VARCHAR(36),
volume_type_id VARCHAR(36),
PRIMARY KEY (id),
CHECK (deleted IN (0, 1))
);
INSERT INTO volumes
SELECT created_at,
updated_at,
deleted_at,
deleted,
id,
ec2_id,
user_id,
project_id,
host,
size,
availability_zone,
instance_uuid,
mountpoint,
attach_time,
status,
attach_status,
scheduled_at,
launched_at,
terminated_at,
display_name,
display_description,
provider_location,
provider_auth,
snapshot_id,
volume_type_id
FROM volumes_backup;
DROP TABLE volumes_backup;
COMMIT;

View File

@ -104,7 +104,6 @@ def get_table(engine, name):
class TestMigrations(test.TestCase):
"""Test sqlalchemy-migrate migrations."""
TEST_DATABASES = {}
DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__),
'test_migrations.conf')
# Test machines can set the CINDER_TEST_MIGRATIONS_CONF variable
@ -119,18 +118,19 @@ class TestMigrations(test.TestCase):
super(TestMigrations, self).setUp()
self.snake_walk = False
self.test_databases = {}
# Load test databases from the config file. Only do this
# once. No need to re-run this on each test...
LOG.debug('config_path is %s' % TestMigrations.CONFIG_FILE_PATH)
if not TestMigrations.TEST_DATABASES:
if not self.test_databases:
if os.path.exists(TestMigrations.CONFIG_FILE_PATH):
cp = ConfigParser.RawConfigParser()
try:
cp.read(TestMigrations.CONFIG_FILE_PATH)
defaults = cp.defaults()
for key, value in defaults.items():
TestMigrations.TEST_DATABASES[key] = value
self.test_databases[key] = value
self.snake_walk = cp.getboolean('walk_style', 'snake_walk')
except ConfigParser.ParsingError, e:
self.fail("Failed to read test_migrations.conf config "
@ -140,7 +140,7 @@ class TestMigrations(test.TestCase):
"file.")
self.engines = {}
for key, value in TestMigrations.TEST_DATABASES.items():
for key, value in self.test_databases.items():
self.engines[key] = sqlalchemy.create_engine(value)
# We start each test case with a completely blank slate.
@ -151,13 +151,7 @@ class TestMigrations(test.TestCase):
# We destroy the test data store between each test case,
# and recreate it, which ensures that we have no side-effects
# from the tests
# self._reset_databases()
# remove these from the list so they aren't used in the migration tests
if "mysqlcitest" in self.engines:
del self.engines["mysqlcitest"]
if "mysqlcitest" in TestMigrations.TEST_DATABASES:
del TestMigrations.TEST_DATABASES["mysqlcitest"]
self._reset_databases()
super(TestMigrations, self).tearDown()
def _reset_databases(self):
@ -166,7 +160,7 @@ class TestMigrations(test.TestCase):
LOG.debug(output)
self.assertEqual(0, status)
for key, engine in self.engines.items():
conn_string = TestMigrations.TEST_DATABASES[key]
conn_string = self.test_databases[key]
conn_pieces = urlparse.urlparse(conn_string)
if conn_string.startswith('sqlite'):
# We can just delete the SQLite database, which is
@ -252,7 +246,7 @@ class TestMigrations(test.TestCase):
connect_string = _get_connect_string('mysql')
engine = sqlalchemy.create_engine(connect_string)
self.engines["mysqlcitest"] = engine
TestMigrations.TEST_DATABASES["mysqlcitest"] = connect_string
self.test_databases["mysqlcitest"] = connect_string
# build a fully populated mysql database with all the tables
self._reset_databases()