Merge "Remove Ocata's data migrations"

This commit is contained in:
Jenkins 2017-02-15 14:53:56 +00:00 committed by Gerrit Code Review
commit 00153284b2
6 changed files with 1 additions and 504 deletions

View File

@ -207,8 +207,7 @@ class HostCommands(object):
class DbCommands(object):
"""Class for managing the database."""
online_migrations = (db.migrate_consistencygroups_to_groups,
db.migrate_add_message_prefix)
online_migrations = ()
def __init__(self):
pass

View File

@ -1348,19 +1348,6 @@ def consistencygroup_include_in_cluster(context, cluster, partial_rename=True,
**filters)
def migrate_add_message_prefix(context, max_count, force=False):
"""Change Message event ids to start with the VOLUME_ prefix.
:param max_count: The maximum number of messages to consider in
this run.
:param force: Ignored in this migration
:returns: number of messages needing migration, number of
messages migrated (both will always be less than
max_count).
"""
return IMPL.migrate_add_message_prefix(context, max_count, force)
###################
@ -1444,11 +1431,6 @@ def group_volume_type_mapping_create(context, group_id, volume_type_id):
volume_type_id)
def migrate_consistencygroups_to_groups(context, max_count, force=False):
"""Migrage CGs to generic volume groups"""
return IMPL.migrate_consistencygroups_to_groups(context, max_count, force)
###################

View File

@ -60,7 +60,6 @@ from cinder import exception
from cinder.i18n import _, _LW, _LE, _LI
from cinder.objects import fields
from cinder import utils
from cinder.volume import group_types
CONF = cfg.CONF
@ -5463,26 +5462,6 @@ def consistencygroup_include_in_cluster(context, cluster,
partial_rename, filters)
@require_admin_context
def migrate_add_message_prefix(context, max_count, force=False):
prefix = "VOLUME_"
session = get_session()
with session.begin():
messages = (model_query(context, models.Message.id, session=session).
filter(~models.Message.event_id.like(prefix + '%')).
limit(max_count))
ids = [msg[0] for msg in messages.all()]
count_all = messages.count()
count_hit = 0
if ids:
count_hit = (model_query(context, models.Message, session=session).
filter(models.Message.id.in_(ids)).
update({'event_id': prefix + models.Message.event_id},
synchronize_session=False))
return count_all, count_hit
###############################
@ -5770,151 +5749,6 @@ def group_creating_from_src(group_id=None, group_snapshot_id=None):
return sql.exists([subq]).where(match_id)
@require_admin_context
def migrate_consistencygroups_to_groups(context, max_count, force=False):
now = timeutils.utcnow()
grps = model_query(context, models.Group)
ids = [grp.id for grp in grps] if grps else []
# NOTE(xyang): We are using the same IDs in the CG and Group tables.
# This is because we are deleting the entry from the CG table after
# migrating it to the Group table. Also when the user queries a CG id,
# we will display it whether it is in the CG table or the Group table.
# Without using the same IDs, we'll have to add a consistencygroup_id
# column in the Group group to correlate it with the CG entry so we
# know whether it has been migrated or not. It makes things more
# complicated especially because the CG entry will be removed after
# migration.
query = (model_query(context, models.ConsistencyGroup).
filter(models.ConsistencyGroup.id.notin_(ids)))
cgs = query.limit(max_count)
# Check if default group_type for migrating cgsnapshots exists
result = (model_query(context, models.GroupTypes,
project_only=True).
filter_by(name=group_types.DEFAULT_CGSNAPSHOT_TYPE).
first())
if not result:
msg = (_('Group type %s not found. Rerun migration script to create '
'the default cgsnapshot type.') %
group_types.DEFAULT_CGSNAPSHOT_TYPE)
raise exception.NotFound(msg)
grp_type_id = result['id']
count_all = 0
count_hit = 0
for cg in cgs.all():
cg_ids = []
cgsnapshot_ids = []
volume_ids = []
snapshot_ids = []
session = get_session()
with session.begin():
count_all += 1
cgsnapshot_list = []
vol_list = []
# NOTE(dulek): We should avoid modifying consistency groups that
# are in the middle of some operation.
if not force:
if cg.status not in (fields.ConsistencyGroupStatus.AVAILABLE,
fields.ConsistencyGroupStatus.ERROR,
fields.ConsistencyGroupStatus.DELETING):
continue
# Migrate CG to group
grp = model_query(context, models.Group,
session=session).filter_by(id=cg.id).first()
if grp:
# NOTE(xyang): This CG is already migrated to group.
continue
values = {'id': cg.id,
'created_at': now,
'updated_at': now,
'deleted': False,
'user_id': cg.user_id,
'project_id': cg.project_id,
'host': cg.host,
'cluster_name': cg.cluster_name,
'availability_zone': cg.availability_zone,
'name': cg.name,
'description': cg.description,
'group_type_id': grp_type_id,
'status': cg.status,
'group_snapshot_id': cg.cgsnapshot_id,
'source_group_id': cg.source_cgid,
}
mappings = []
for item in cg.volume_type_id.rstrip(',').split(','):
mapping = models.GroupVolumeTypeMapping()
mapping['volume_type_id'] = item
mapping['group_id'] = cg.id
mappings.append(mapping)
values['volume_types'] = mappings
grp = models.Group()
grp.update(values)
session.add(grp)
cg_ids.append(cg.id)
# Update group_id in volumes
vol_list = (model_query(context, models.Volume,
session=session).
filter_by(consistencygroup_id=cg.id).all())
for vol in vol_list:
vol.group_id = cg.id
volume_ids.append(vol.id)
# Migrate data from cgsnapshots to group_snapshots
cgsnapshot_list = (model_query(context, models.Cgsnapshot,
session=session).
filter_by(consistencygroup_id=cg.id).all())
for cgsnap in cgsnapshot_list:
grp_snap = (model_query(context, models.GroupSnapshot,
session=session).
filter_by(id=cgsnap.id).first())
if grp_snap:
# NOTE(xyang): This CGSnapshot is already migrated to
# group snapshot.
continue
grp_snap = models.GroupSnapshot()
values = {'id': cgsnap.id,
'created_at': now,
'updated_at': now,
'deleted': False,
'user_id': cgsnap.user_id,
'project_id': cgsnap.project_id,
'group_id': cg.id,
'name': cgsnap.name,
'description': cgsnap.description,
'group_type_id': grp_type_id,
'status': cgsnap.status, }
grp_snap.update(values)
session.add(grp_snap)
cgsnapshot_ids.append(cgsnap.id)
# Update group_snapshot_id in snapshots
snap_list = (model_query(context, models.Snapshot,
session=session).
filter_by(cgsnapshot_id=cgsnap.id).all())
for snap in snap_list:
snap.group_snapshot_id = cgsnap.id
snapshot_ids.append(snap.id)
# Delete entries in CG and CGSnapshot tables
cg_cgsnapshot_destroy_all_by_ids(context, cg_ids, cgsnapshot_ids,
volume_ids, snapshot_ids,
session=session)
count_hit += 1
return count_all, count_hit
###############################

View File

@ -44,9 +44,4 @@ event_id_message_map = {
def get_message_text(event_id):
# FIXME(ameade): In the Ocata release, this check can be removed as
# there should no longer be any event ids that do not start with the prefix
if not event_id.startswith("VOLUME_"):
event_id = "VOLUME_" + event_id
return event_id_message_map[event_id]

View File

@ -41,6 +41,3 @@ class DefinedMessagesTest(test.TestCase):
value = getattr(defined_messages.EventIds, attr_name)
self.assertTrue(defined_messages.event_id_message_map.get(
value))
def test_event_id_missing_prefix(self):
self.assertTrue(defined_messages.get_message_text('000001'))

View File

@ -34,7 +34,6 @@ from cinder import quota
from cinder import test
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import utils
from cinder.volume import group_types
CONF = cfg.CONF
THREE = 3
@ -1762,315 +1761,6 @@ class DBAPIConsistencygroupTestCase(BaseTest):
db_cgs[i].cluster_name)
class DBAPIMigrateCGstoGroupsTestCase(BaseTest):
"""Tests for cinder.db.api.migrate_consistencygroups_to_groups."""
def setUp(self):
super(DBAPIMigrateCGstoGroupsTestCase, self).setUp()
db.volume_type_create(self.ctxt, {'id': 'a', 'name': 'a'})
db.volume_type_create(self.ctxt, {'id': 'b', 'name': 'b'})
cg_dicts = [
{'id': '1', 'status': fields.ConsistencyGroupStatus.AVAILABLE,
'volume_type_id': 'a,b,'},
{'id': '2', 'status': fields.ConsistencyGroupStatus.ERROR,
'volume_type_id': 'a,'},
{'id': '3',
'status': fields.ConsistencyGroupStatus.AVAILABLE,
'volume_type_id': 'b,'},
{'id': '4',
'status': fields.ConsistencyGroupStatus.UPDATING,
'volume_type_id': 'a,'},
]
for cg_dict in cg_dicts:
db.consistencygroup_create(self.ctxt, cg_dict)
# Create volumes in CGs
self.vol1 = db.volume_create(self.ctxt, {'volume_type_id': 'a',
'consistencygroup_id': '1',
'status': 'available',
'size': 1})
self.vol2 = db.volume_create(self.ctxt, {'volume_type_id': 'b',
'consistencygroup_id': '1',
'status': 'available',
'size': 1})
self.vol3 = db.volume_create(self.ctxt, {'volume_type_id': 'b',
'consistencygroup_id': '3',
'status': 'available',
'size': 1})
# Create cgsnapshots
cgsnap1 = db.cgsnapshot_create(
self.ctxt,
{'id': 'cgsnap1',
'consistencygroup_id': '1',
'status': fields.ConsistencyGroupStatus.AVAILABLE}
)
cgsnap3 = db.cgsnapshot_create(
self.ctxt,
{'id': 'cgsnap3',
'consistencygroup_id': '3',
'status': fields.ConsistencyGroupStatus.AVAILABLE}
)
# Create snapshots
self.snap1 = db.snapshot_create(
self.ctxt,
{'volume_id': self.vol1['id'],
'cgsnapshot_id': cgsnap1['id'],
'status': fields.SnapshotStatus.AVAILABLE})
self.snap2 = db.snapshot_create(
self.ctxt,
{'volume_id': self.vol2['id'],
'cgsnapshot_id': cgsnap1['id'],
'status': fields.SnapshotStatus.AVAILABLE})
self.snap3 = db.snapshot_create(
self.ctxt,
{'volume_id': self.vol3['id'],
'cgsnapshot_id': cgsnap3['id'],
'status': fields.SnapshotStatus.AVAILABLE})
# Create CG from CG snapshot
cg5_dict = {
'id': '5',
'cgsnapshot_id': cgsnap3['id'],
'status': fields.ConsistencyGroupStatus.AVAILABLE,
'volume_type_id': 'b,'
}
db.consistencygroup_create(self.ctxt, cg5_dict)
cg_dicts.append(cg5_dict)
self.vol5 = db.volume_create(self.ctxt, {'volume_type_id': 'b',
'consistencygroup_id': '5',
'status': 'available',
'size': 1})
# Create CG from source CG
cg6_dict = {
'id': '6',
'source_cgid': '5',
'status': fields.ConsistencyGroupStatus.AVAILABLE,
'volume_type_id': 'b,'
}
db.consistencygroup_create(self.ctxt, cg6_dict)
cg_dicts.append(cg6_dict)
self.vol6 = db.volume_create(self.ctxt, {'volume_type_id': 'b',
'consistencygroup_id': '6',
'status': 'available',
'size': 1})
self.addCleanup(self._cleanup)
def _cleanup(self):
db.snapshot_destroy(self.ctxt, self.snap1.id)
db.snapshot_destroy(self.ctxt, self.snap2.id)
db.snapshot_destroy(self.ctxt, self.snap3.id)
db.volume_destroy(self.ctxt, self.vol1.id)
db.volume_destroy(self.ctxt, self.vol2.id)
db.volume_destroy(self.ctxt, self.vol3.id)
db.volume_destroy(self.ctxt, self.vol5.id)
db.volume_destroy(self.ctxt, self.vol6.id)
db.cgsnapshot_destroy(self.ctxt, 'cgsnap1')
db.cgsnapshot_destroy(self.ctxt, 'cgsnap3')
db.group_snapshot_destroy(self.ctxt, 'cgsnap1')
db.group_snapshot_destroy(self.ctxt, 'cgsnap3')
db.consistencygroup_destroy(self.ctxt, '1')
db.consistencygroup_destroy(self.ctxt, '2')
db.consistencygroup_destroy(self.ctxt, '3')
db.consistencygroup_destroy(self.ctxt, '4')
db.consistencygroup_destroy(self.ctxt, '5')
db.consistencygroup_destroy(self.ctxt, '6')
db.group_destroy(self.ctxt, '1')
db.group_destroy(self.ctxt, '2')
db.group_destroy(self.ctxt, '3')
db.group_destroy(self.ctxt, '4')
db.group_destroy(self.ctxt, '5')
db.group_destroy(self.ctxt, '6')
db.volume_type_destroy(self.ctxt, 'a')
db.volume_type_destroy(self.ctxt, 'b')
grp_type = group_types.get_default_group_type()
if grp_type:
db.group_type_destroy(self.ctxt, grp_type.id)
def _assert_migrated(self, migrated, not_migrated):
for cg_id, cgsnap_id in migrated.items():
grp = db.group_get(self.ctxt, cg_id)
self.assertIsNotNone(grp)
vols_in_cgs = db.volume_get_all_by_group(self.ctxt, cg_id)
vols_in_grps = db.volume_get_all_by_generic_group(self.ctxt, cg_id)
self.assertEqual(0, len(vols_in_cgs))
if cg_id == '1':
self.assertEqual(2, len(vols_in_grps))
elif cg_id == '3':
self.assertEqual(1, len(vols_in_grps))
if cgsnap_id:
grp_snap = db.group_snapshot_get(self.ctxt, cgsnap_id)
self.assertIsNotNone(grp_snap)
snaps_in_cgsnaps = db.snapshot_get_all_for_cgsnapshot(
self.ctxt, cgsnap_id)
snaps_in_grpsnaps = db.snapshot_get_all_for_group_snapshot(
self.ctxt, cgsnap_id)
self.assertEqual(0, len(snaps_in_cgsnaps))
if cg_id == '1':
self.assertEqual(2, len(snaps_in_grpsnaps))
elif cg_id == '3':
self.assertEqual(1, len(snaps_in_grpsnaps))
for cg_id in not_migrated:
self.assertRaises(exception.GroupNotFound,
db.group_get, self.ctxt, cg_id)
def test_migrate(self):
# Run migration
count_all, count_hit = db.migrate_consistencygroups_to_groups(
self.ctxt, 50)
# Check counted entries
self.assertEqual(6, count_all)
self.assertEqual(5, count_hit)
# Check migated
migrated = {'1': 'cgsnap1', '2': None, '3': 'cgsnap3',
'5': None, '6': None}
not_migrated = ('4',)
self._assert_migrated(migrated, not_migrated)
def test_migrate_force(self):
# Run migration
count_all, count_hit = db.migrate_consistencygroups_to_groups(
self.ctxt, 50, True)
# Check counted entries
self.assertEqual(6, count_all)
self.assertEqual(6, count_hit)
# Check migrated
migrated = {'1': 'cgsnap1', '2': None, '3': 'cgsnap3', '4': None,
'5': None, '6': None}
self._assert_migrated(migrated, ())
def test_migrate_limit_force(self):
# Run first migration
count_all, count_hit = db.migrate_consistencygroups_to_groups(
self.ctxt, 2, True)
# Check counted entries
self.assertEqual(2, count_all)
self.assertEqual(2, count_hit)
# Check migrated
migrated = {'1': 'cgsnap1', '2': None}
not_migrated = ('3', '4', '5', '6',)
self._assert_migrated(migrated, not_migrated)
# Run second migration
count_all, count_hit = db.migrate_consistencygroups_to_groups(
self.ctxt, 4, True)
# Check counted entries
self.assertEqual(4, count_all)
self.assertEqual(4, count_hit)
# Check migrated
migrated = {'1': 'cgsnap1', '2': None, '3': 'cgsnap3', '4': None,
'5': None, '6': None}
self._assert_migrated(migrated, ())
class DBAPIMigrateMessagePrefixTestCase(BaseTest):
"""Tests for cinder.db.api.migrate_add_message_prefix."""
def setUp(self):
super(DBAPIMigrateMessagePrefixTestCase, self).setUp()
message_values = {
"project_id": "fake_project",
"event_id": "test_id",
"message_level": "ERROR",
"id": '1',
}
db.message_create(self.ctxt, message_values)
message_2_values = {
"project_id": "fake_project",
"event_id": "test_id",
"message_level": "ERROR",
"id": '2',
}
db.message_create(self.ctxt, message_2_values)
message_3_values = {
"project_id": "fake_project",
"event_id": "VOLUME_test_id",
"message_level": "ERROR",
"id": '3',
}
db.message_create(self.ctxt, message_3_values)
def tearDown(self):
super(DBAPIMigrateMessagePrefixTestCase, self).tearDown()
db.message_destroy(self.ctxt, {'id': '1'})
db.message_destroy(self.ctxt, {'id': '2'})
db.message_destroy(self.ctxt, {'id': '3'})
def _assert_migrated(self, migrated, not_migrated):
for message_id in migrated:
message = db.message_get(self.ctxt, message_id)
self.assertEqual('VOLUME_test_id', message['event_id'])
for message_id in not_migrated:
message = db.message_get(self.ctxt, message_id)
self.assertEqual('test_id', message['event_id'])
def test_migrate(self):
self._assert_migrated(['3'], ['1', '2'])
# Run migration
count_all, count_hit = db.migrate_add_message_prefix(self.ctxt, 50)
# Check counted entries
self.assertEqual(2, count_all)
self.assertEqual(2, count_hit)
self._assert_migrated(['1', '2', '3'], [])
def test_migrate_limit_force(self):
# Run first migration
count_all, count_hit = db.migrate_add_message_prefix(self.ctxt, 1,
True)
# Check counted entries
self.assertEqual(1, count_all)
self.assertEqual(1, count_hit)
self._assert_migrated(['1', '3'], ['2'])
# Run second migration
count_all, count_hit = db.migrate_add_message_prefix(self.ctxt, 2,
True)
# Check counted entries
self.assertEqual(1, count_all)
self.assertEqual(1, count_hit)
self._assert_migrated(['1', '2', '3'], [])
# Run final migration
count_all, count_hit = db.migrate_add_message_prefix(self.ctxt, 2,
True)
# Check counted entries
self.assertEqual(0, count_all)
self.assertEqual(0, count_hit)
class DBAPICgsnapshotTestCase(BaseTest):
"""Tests for cinder.db.api.cgsnapshot_*."""