Migrate consistency groups to groups

This patch provides script to migrate data from consistencygroups to
groups and from cgsnapshots to group_snapshots.

In the migration script, it creates a default_cgsnapshot_type
for migrating data and copies data from consistency groups to
groups and from cgsnapshots to group_snapshots. Migrated consistency
groups and cgsnapshots will be removed from the database.

It depends on the following patch that adds generic code for
online data migrations:
    https://review.openstack.org/#/c/330391/

Run the following command to migrate CGs:
    cinder-manage db online_data_migrations
    --max_count <max>
    --ignore_state
max_count is optional. Default is 50.
ignore_state is optional. Default is False.

UpgradeImpact
Partial-Implements: blueprint generic-volume-group
Related: blueprint online-schema-upgrades
Change-Id: I1cf31e4ba4acffe08e2c09cbfd5b50cf0ea7a6e0
This commit is contained in:
xing-yang 2016-08-02 21:02:34 -04:00
parent f40c86ba51
commit 307da0778f
9 changed files with 550 additions and 11 deletions

View File

@ -207,7 +207,7 @@ class HostCommands(object):
class DbCommands(object):
"""Class for managing the database."""
online_migrations = ()
online_migrations = (db.migrate_consistencygroups_to_groups,)
def __init__(self):
pass

View File

@ -1408,6 +1408,11 @@ def group_volume_type_mapping_create(context, group_id, volume_type_id):
volume_type_id)
def migrate_consistencygroups_to_groups(context, max_count, force=False):
"""Migrage CGs to generic volume groups"""
return IMPL.migrate_consistencygroups_to_groups(context, max_count, force)
###################

View File

@ -61,6 +61,7 @@ from cinder import exception
from cinder.i18n import _, _LW, _LE, _LI
from cinder.objects import fields
from cinder import utils
from cinder.volume import group_types
CONF = cfg.CONF
@ -5204,7 +5205,8 @@ def consistencygroup_create(context, values, cg_snap_id=None, cg_id=None):
consistencygroup = cg_model()
consistencygroup.update(values)
session.add(consistencygroup)
return _consistencygroup_get(context, values['id'], session=session)
return _consistencygroup_get(context, values['id'], session=session)
@handle_db_data_error
@ -5223,6 +5225,7 @@ def consistencygroup_update(context, consistencygroup_id, values):
result.update(values)
result.save(session=session)
return result
@ -5241,10 +5244,52 @@ def consistencygroup_destroy(context, consistencygroup_id):
'deleted': True,
'deleted_at': utcnow,
'updated_at': literal_column('updated_at')})
del updated_values['updated_at']
return updated_values
@require_admin_context
def cg_cgsnapshot_destroy_all_by_ids(context, cg_ids, cgsnapshot_ids,
volume_ids, snapshot_ids, session):
utcnow = timeutils.utcnow()
if snapshot_ids:
snaps = (model_query(context, models.Snapshot,
session=session, read_deleted="no").
filter(models.Snapshot.id.in_(snapshot_ids)).
all())
for snap in snaps:
snap.update({'cgsnapshot_id': None,
'updated_at': utcnow})
if cgsnapshot_ids:
cg_snaps = (model_query(context, models.Cgsnapshot,
session=session, read_deleted="no").
filter(models.Cgsnapshot.id.in_(cgsnapshot_ids)).
all())
for cg_snap in cg_snaps:
cg_snap.delete(session=session)
if volume_ids:
vols = (model_query(context, models.Volume,
session=session, read_deleted="no").
filter(models.Volume.id.in_(volume_ids)).
all())
for vol in vols:
vol.update({'consistencygroup_id': None,
'updated_at': utcnow})
if cg_ids:
cgs = (model_query(context, models.ConsistencyGroup,
session=session, read_deleted="no").
filter(models.ConsistencyGroup.id.in_(cg_ids)).
all())
for cg in cgs:
cg.delete(session=session)
def cg_has_cgsnapshot_filter():
"""Return a filter that checks if a CG has CG Snapshots."""
return sql.exists().where(and_(
@ -5601,6 +5646,151 @@ def group_creating_from_src(group_id=None, group_snapshot_id=None):
return sql.exists([subq]).where(match_id)
@require_admin_context
def migrate_consistencygroups_to_groups(context, max_count, force=False):
now = timeutils.utcnow()
grps = model_query(context, models.Group)
ids = [grp.id for grp in grps] if grps else []
# NOTE(xyang): We are using the same IDs in the CG and Group tables.
# This is because we are deleting the entry from the CG table after
# migrating it to the Group table. Also when the user queries a CG id,
# we will display it whether it is in the CG table or the Group table.
# Without using the same IDs, we'll have to add a consistencygroup_id
# column in the Group group to correlate it with the CG entry so we
# know whether it has been migrated or not. It makes things more
# complicated especially because the CG entry will be removed after
# migration.
query = (model_query(context, models.ConsistencyGroup).
filter(models.ConsistencyGroup.id.notin_(ids)))
cgs = query.limit(max_count)
# Check if default group_type for migrating cgsnapshots exists
result = (model_query(context, models.GroupTypes,
project_only=True).
filter_by(name=group_types.DEFAULT_CGSNAPSHOT_TYPE).
first())
if not result:
msg = (_('Group type %s not found. Rerun migration script to create '
'the default cgsnapshot type.') %
group_types.DEFAULT_CGSNAPSHOT_TYPE)
raise exception.NotFound(msg)
grp_type_id = result['id']
count_all = 0
count_hit = 0
for cg in cgs.all():
cg_ids = []
cgsnapshot_ids = []
volume_ids = []
snapshot_ids = []
session = get_session()
with session.begin():
count_all += 1
cgsnapshot_list = []
vol_list = []
# NOTE(dulek): We should avoid modifying consistency groups that
# are in the middle of some operation.
if not force:
if cg.status not in (fields.ConsistencyGroupStatus.AVAILABLE,
fields.ConsistencyGroupStatus.ERROR,
fields.ConsistencyGroupStatus.DELETING):
continue
# Migrate CG to group
grp = model_query(context, models.Group,
session=session).filter_by(id=cg.id).first()
if grp:
# NOTE(xyang): This CG is already migrated to group.
continue
values = {'id': cg.id,
'created_at': now,
'updated_at': now,
'deleted': False,
'user_id': cg.user_id,
'project_id': cg.project_id,
'host': cg.host,
'cluster_name': cg.cluster_name,
'availability_zone': cg.availability_zone,
'name': cg.name,
'description': cg.description,
'group_type_id': grp_type_id,
'status': cg.status,
'group_snapshot_id': cg.cgsnapshot_id,
'source_group_id': cg.source_cgid,
}
mappings = []
for item in cg.volume_type_id.rstrip(',').split(','):
mapping = models.GroupVolumeTypeMapping()
mapping['volume_type_id'] = item
mapping['group_id'] = cg.id
mappings.append(mapping)
values['volume_types'] = mappings
grp = models.Group()
grp.update(values)
session.add(grp)
cg_ids.append(cg.id)
# Update group_id in volumes
vol_list = (model_query(context, models.Volume,
session=session).
filter_by(consistencygroup_id=cg.id).all())
for vol in vol_list:
vol.group_id = cg.id
volume_ids.append(vol.id)
# Migrate data from cgsnapshots to group_snapshots
cgsnapshot_list = (model_query(context, models.Cgsnapshot,
session=session).
filter_by(consistencygroup_id=cg.id).all())
for cgsnap in cgsnapshot_list:
grp_snap = (model_query(context, models.GroupSnapshot,
session=session).
filter_by(id=cgsnap.id).first())
if grp_snap:
# NOTE(xyang): This CGSnapshot is already migrated to
# group snapshot.
continue
grp_snap = models.GroupSnapshot()
values = {'id': cgsnap.id,
'created_at': now,
'updated_at': now,
'deleted': False,
'user_id': cgsnap.user_id,
'project_id': cgsnap.project_id,
'group_id': cg.id,
'name': cgsnap.name,
'description': cgsnap.description,
'group_type_id': grp_type_id,
'status': cgsnap.status, }
grp_snap.update(values)
session.add(grp_snap)
cgsnapshot_ids.append(cgsnap.id)
# Update group_snapshot_id in snapshots
snap_list = (model_query(context, models.Snapshot,
session=session).
filter_by(cgsnapshot_id=cgsnap.id).all())
for snap in snap_list:
snap.group_snapshot_id = cgsnap.id
snapshot_ids.append(snap.id)
# Delete entries in CG and CGSnapshot tables
cg_cgsnapshot_destroy_all_by_ids(context, cg_ids, cgsnapshot_ids,
volume_ids, snapshot_ids,
session=session)
count_hit += 1
return count_all, count_hit
###############################
@ -5712,7 +5902,7 @@ def cgsnapshot_create(context, values):
cgsnapshot = model()
cgsnapshot.update(values)
session.add(cgsnapshot)
return _cgsnapshot_get(context, values['id'], session=session)
return _cgsnapshot_get(context, values['id'], session=session)
@require_context

View File

@ -0,0 +1,68 @@
# Copyright (c) 2016 Dell Inc. or its subsidiaries.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo_utils import timeutils
import six
from sqlalchemy import MetaData, Table
from cinder.volume import group_types as volume_group_types
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
now = timeutils.utcnow()
group_types = Table('group_types', meta, autoload=True)
group_type_specs = Table('group_type_specs', meta, autoload=True)
# Create a default group_type for migrating cgsnapshots
results = list(group_types.select().where(
group_types.c.name == volume_group_types.DEFAULT_CGSNAPSHOT_TYPE and
group_types.c.deleted is False).
execute())
if not results:
grp_type_id = six.text_type(uuid.uuid4())
group_type_dicts = {
'id': grp_type_id,
'name': volume_group_types.DEFAULT_CGSNAPSHOT_TYPE,
'description': 'Default group type for migrating cgsnapshot',
'created_at': now,
'updated_at': now,
'deleted': False,
'is_public': True,
}
grp_type = group_types.insert()
grp_type.execute(group_type_dicts)
else:
grp_type_id = results[0]['id']
results = list(group_type_specs.select().where(
group_type_specs.c.group_type_id == grp_type_id and
group_type_specs.c.deleted is False).
execute())
if not results:
group_spec_dicts = {
'key': 'consistent_group_snapshot_enabled',
'value': '<is> True',
'group_type_id': grp_type_id,
'created_at': now,
'updated_at': now,
'deleted': False,
}
grp_spec = group_type_specs.insert()
grp_spec.execute(group_spec_dicts)

View File

@ -110,6 +110,7 @@ class GroupTypesApiTest(test.TestCase):
self.type_id3 = self._create_group_type('group_type3',
{'key3': 'value3'}, False,
[fake.PROJECT_ID])
self.type_id0 = group_types.get_default_cgsnapshot_type()['id']
def test_group_types_index(self):
self.mock_object(group_types, 'get_all_group_types',
@ -160,7 +161,7 @@ class GroupTypesApiTest(test.TestCase):
req.environ['cinder.context'] = self.ctxt
res = self.controller.index(req)
self.assertEqual(2, len(res['group_types']))
self.assertEqual(3, len(res['group_types']))
def test_group_types_index_with_offset_out_of_range(self):
url = '/v3/%s/group_types?offset=424366766556787' % fake.PROJECT_ID
@ -198,10 +199,11 @@ class GroupTypesApiTest(test.TestCase):
req.environ['cinder.context'] = self.ctxt
res = self.controller.index(req)
self.assertEqual(3, len(res['group_types']))
self.assertEqual(4, len(res['group_types']))
self.assertEqual(self.type_id3, res['group_types'][0]['id'])
self.assertEqual(self.type_id2, res['group_types'][1]['id'])
self.assertEqual(self.type_id1, res['group_types'][2]['id'])
self.assertEqual(self.type_id0, res['group_types'][3]['id'])
def test_group_types_index_with_invalid_filter(self):
req = fakes.HTTPRequest.blank(
@ -210,7 +212,7 @@ class GroupTypesApiTest(test.TestCase):
req.environ['cinder.context'] = self.ctxt
res = self.controller.index(req)
self.assertEqual(3, len(res['group_types']))
self.assertEqual(4, len(res['group_types']))
def test_group_types_index_with_sort_keys(self):
req = fakes.HTTPRequest.blank('/v3/%s/group_types?sort=id' %
@ -218,13 +220,15 @@ class GroupTypesApiTest(test.TestCase):
version=GROUP_TYPE_MICRO_VERSION)
req.environ['cinder.context'] = self.ctxt
res = self.controller.index(req)
expect_result = [self.type_id1, self.type_id2, self.type_id3]
expect_result = [self.type_id0, self.type_id1, self.type_id2,
self.type_id3]
expect_result.sort(reverse=True)
self.assertEqual(3, len(res['group_types']))
self.assertEqual(4, len(res['group_types']))
self.assertEqual(expect_result[0], res['group_types'][0]['id'])
self.assertEqual(expect_result[1], res['group_types'][1]['id'])
self.assertEqual(expect_result[2], res['group_types'][2]['id'])
self.assertEqual(expect_result[3], res['group_types'][3]['id'])
def test_group_types_index_with_sort_and_limit(self):
req = fakes.HTTPRequest.blank(
@ -232,7 +236,8 @@ class GroupTypesApiTest(test.TestCase):
version=GROUP_TYPE_MICRO_VERSION)
req.environ['cinder.context'] = self.ctxt
res = self.controller.index(req)
expect_result = [self.type_id1, self.type_id2, self.type_id3]
expect_result = [self.type_id0, self.type_id1, self.type_id2,
self.type_id3]
expect_result.sort(reverse=True)
self.assertEqual(2, len(res['group_types']))
@ -245,13 +250,15 @@ class GroupTypesApiTest(test.TestCase):
version=GROUP_TYPE_MICRO_VERSION)
req.environ['cinder.context'] = self.ctxt
res = self.controller.index(req)
expect_result = [self.type_id1, self.type_id2, self.type_id3]
expect_result = [self.type_id0, self.type_id1, self.type_id2,
self.type_id3]
expect_result.sort()
self.assertEqual(3, len(res['group_types']))
self.assertEqual(4, len(res['group_types']))
self.assertEqual(expect_result[0], res['group_types'][0]['id'])
self.assertEqual(expect_result[1], res['group_types'][1]['id'])
self.assertEqual(expect_result[2], res['group_types'][2]['id'])
self.assertEqual(expect_result[3], res['group_types'][3]['id'])
def test_group_types_show(self):
self.mock_object(group_types, 'get_group_type',

View File

@ -34,6 +34,7 @@ from cinder import quota
from cinder import test
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import utils
from cinder.volume import group_types
CONF = cfg.CONF
THREE = 3
@ -1765,6 +1766,228 @@ class DBAPIConsistencygroupTestCase(BaseTest):
db_cgs[i].cluster_name)
class DBAPIMigrateCGstoGroupsTestCase(BaseTest):
"""Tests for cinder.db.api.migrate_consistencygroups_to_groups."""
def setUp(self):
super(DBAPIMigrateCGstoGroupsTestCase, self).setUp()
db.volume_type_create(self.ctxt, {'id': 'a', 'name': 'a'})
db.volume_type_create(self.ctxt, {'id': 'b', 'name': 'b'})
cg_dicts = [
{'id': '1', 'status': fields.ConsistencyGroupStatus.AVAILABLE,
'volume_type_id': 'a,b,'},
{'id': '2', 'status': fields.ConsistencyGroupStatus.ERROR,
'volume_type_id': 'a,'},
{'id': '3',
'status': fields.ConsistencyGroupStatus.AVAILABLE,
'volume_type_id': 'b,'},
{'id': '4',
'status': fields.ConsistencyGroupStatus.UPDATING,
'volume_type_id': 'a,'},
]
for cg_dict in cg_dicts:
db.consistencygroup_create(self.ctxt, cg_dict)
# Create volumes in CGs
self.vol1 = db.volume_create(self.ctxt, {'volume_type_id': 'a',
'consistencygroup_id': '1',
'status': 'available',
'size': 1})
self.vol2 = db.volume_create(self.ctxt, {'volume_type_id': 'b',
'consistencygroup_id': '1',
'status': 'available',
'size': 1})
self.vol3 = db.volume_create(self.ctxt, {'volume_type_id': 'b',
'consistencygroup_id': '3',
'status': 'available',
'size': 1})
# Create cgsnapshots
cgsnap1 = db.cgsnapshot_create(
self.ctxt,
{'id': 'cgsnap1',
'consistencygroup_id': '1',
'status': fields.ConsistencyGroupStatus.AVAILABLE}
)
cgsnap3 = db.cgsnapshot_create(
self.ctxt,
{'id': 'cgsnap3',
'consistencygroup_id': '3',
'status': fields.ConsistencyGroupStatus.AVAILABLE}
)
# Create snapshots
self.snap1 = db.snapshot_create(
self.ctxt,
{'volume_id': self.vol1['id'],
'cgsnapshot_id': cgsnap1['id'],
'status': fields.SnapshotStatus.AVAILABLE})
self.snap2 = db.snapshot_create(
self.ctxt,
{'volume_id': self.vol2['id'],
'cgsnapshot_id': cgsnap1['id'],
'status': fields.SnapshotStatus.AVAILABLE})
self.snap3 = db.snapshot_create(
self.ctxt,
{'volume_id': self.vol3['id'],
'cgsnapshot_id': cgsnap3['id'],
'status': fields.SnapshotStatus.AVAILABLE})
# Create CG from CG snapshot
cg5_dict = {
'id': '5',
'cgsnapshot_id': cgsnap3['id'],
'status': fields.ConsistencyGroupStatus.AVAILABLE,
'volume_type_id': 'b,'
}
db.consistencygroup_create(self.ctxt, cg5_dict)
cg_dicts.append(cg5_dict)
self.vol5 = db.volume_create(self.ctxt, {'volume_type_id': 'b',
'consistencygroup_id': '5',
'status': 'available',
'size': 1})
# Create CG from source CG
cg6_dict = {
'id': '6',
'source_cgid': '5',
'status': fields.ConsistencyGroupStatus.AVAILABLE,
'volume_type_id': 'b,'
}
db.consistencygroup_create(self.ctxt, cg6_dict)
cg_dicts.append(cg6_dict)
self.vol6 = db.volume_create(self.ctxt, {'volume_type_id': 'b',
'consistencygroup_id': '6',
'status': 'available',
'size': 1})
self.addCleanup(self._cleanup)
def _cleanup(self):
db.snapshot_destroy(self.ctxt, self.snap1.id)
db.snapshot_destroy(self.ctxt, self.snap2.id)
db.snapshot_destroy(self.ctxt, self.snap3.id)
db.volume_destroy(self.ctxt, self.vol1.id)
db.volume_destroy(self.ctxt, self.vol2.id)
db.volume_destroy(self.ctxt, self.vol3.id)
db.volume_destroy(self.ctxt, self.vol5.id)
db.volume_destroy(self.ctxt, self.vol6.id)
db.cgsnapshot_destroy(self.ctxt, 'cgsnap1')
db.cgsnapshot_destroy(self.ctxt, 'cgsnap3')
db.group_snapshot_destroy(self.ctxt, 'cgsnap1')
db.group_snapshot_destroy(self.ctxt, 'cgsnap3')
db.consistencygroup_destroy(self.ctxt, '1')
db.consistencygroup_destroy(self.ctxt, '2')
db.consistencygroup_destroy(self.ctxt, '3')
db.consistencygroup_destroy(self.ctxt, '4')
db.consistencygroup_destroy(self.ctxt, '5')
db.consistencygroup_destroy(self.ctxt, '6')
db.group_destroy(self.ctxt, '1')
db.group_destroy(self.ctxt, '2')
db.group_destroy(self.ctxt, '3')
db.group_destroy(self.ctxt, '4')
db.group_destroy(self.ctxt, '5')
db.group_destroy(self.ctxt, '6')
db.volume_type_destroy(self.ctxt, 'a')
db.volume_type_destroy(self.ctxt, 'b')
grp_type = group_types.get_default_group_type()
if grp_type:
db.group_type_destroy(self.ctxt, grp_type.id)
def _assert_migrated(self, migrated, not_migrated):
for cg_id, cgsnap_id in migrated.items():
grp = db.group_get(self.ctxt, cg_id)
self.assertIsNotNone(grp)
vols_in_cgs = db.volume_get_all_by_group(self.ctxt, cg_id)
vols_in_grps = db.volume_get_all_by_generic_group(self.ctxt, cg_id)
self.assertEqual(0, len(vols_in_cgs))
if cg_id == '1':
self.assertEqual(2, len(vols_in_grps))
elif cg_id == '3':
self.assertEqual(1, len(vols_in_grps))
if cgsnap_id:
grp_snap = db.group_snapshot_get(self.ctxt, cgsnap_id)
self.assertIsNotNone(grp_snap)
snaps_in_cgsnaps = db.snapshot_get_all_for_cgsnapshot(
self.ctxt, cgsnap_id)
snaps_in_grpsnaps = db.snapshot_get_all_for_group_snapshot(
self.ctxt, cgsnap_id)
self.assertEqual(0, len(snaps_in_cgsnaps))
if cg_id == '1':
self.assertEqual(2, len(snaps_in_grpsnaps))
elif cg_id == '3':
self.assertEqual(1, len(snaps_in_grpsnaps))
for cg_id in not_migrated:
self.assertRaises(exception.GroupNotFound,
db.group_get, self.ctxt, cg_id)
def test_migrate(self):
# Run migration
count_all, count_hit = db.migrate_consistencygroups_to_groups(
self.ctxt, 50)
# Check counted entries
self.assertEqual(6, count_all)
self.assertEqual(5, count_hit)
# Check migated
migrated = {'1': 'cgsnap1', '2': None, '3': 'cgsnap3',
'5': None, '6': None}
not_migrated = ('4',)
self._assert_migrated(migrated, not_migrated)
def test_migrate_force(self):
# Run migration
count_all, count_hit = db.migrate_consistencygroups_to_groups(
self.ctxt, 50, True)
# Check counted entries
self.assertEqual(6, count_all)
self.assertEqual(6, count_hit)
# Check migrated
migrated = {'1': 'cgsnap1', '2': None, '3': 'cgsnap3', '4': None,
'5': None, '6': None}
self._assert_migrated(migrated, ())
def test_migrate_limit_force(self):
# Run first migration
count_all, count_hit = db.migrate_consistencygroups_to_groups(
self.ctxt, 2, True)
# Check counted entries
self.assertEqual(2, count_all)
self.assertEqual(2, count_hit)
# Check migrated
migrated = {'1': 'cgsnap1', '2': None}
not_migrated = ('3', '4', '5', '6',)
self._assert_migrated(migrated, not_migrated)
# Run second migration
count_all, count_hit = db.migrate_consistencygroups_to_groups(
self.ctxt, 4, True)
# Check counted entries
self.assertEqual(4, count_all)
self.assertEqual(4, count_hit)
# Check migrated
migrated = {'1': 'cgsnap1', '2': None, '3': 'cgsnap3', '4': None,
'5': None, '6': None}
self._assert_migrated(migrated, ())
class DBAPICgsnapshotTestCase(BaseTest):
"""Tests for cinder.db.api.cgsnapshot_*."""

View File

@ -32,6 +32,7 @@ import sqlalchemy
from cinder.db import migration
import cinder.db.sqlalchemy.migrate_repo
from cinder.volume import group_types as volume_group_types
class MigrationsMixin(test_migrations.WalkVersionsMixin):
@ -1044,6 +1045,24 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
self.assertIsInstance(groups.c.source_group_id.type,
self.VARCHAR_TYPE)
def _check_086(self, engine, data):
"""Test inserting default cgsnapshot group type."""
self.assertTrue(engine.dialect.has_table(engine.connect(),
"group_types"))
group_types = db_utils.get_table(engine, 'group_types')
t1 = (group_types.select(group_types.c.name ==
volume_group_types.DEFAULT_CGSNAPSHOT_TYPE).
execute().first())
self.assertIsNotNone(t1)
group_specs = db_utils.get_table(engine, 'group_type_specs')
specs = group_specs.select(
group_specs.c.group_type_id == t1.id and
group_specs.c.key == 'consistent_group_snapshot_enabled'
).execute().first()
self.assertIsNotNone(specs)
self.assertEqual('<is> True', specs.value)
def test_walk_versions(self):
self.walk_versions(False, False)

View File

@ -26,6 +26,7 @@ from cinder.i18n import _, _LE
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
DEFAULT_CGSNAPSHOT_TYPE = "default_cgsnapshot_type"
def create(context,
@ -135,6 +136,27 @@ def get_default_group_type():
return grp_type
def get_default_cgsnapshot_type():
"""Get the default group type for migrating cgsnapshots.
Get the default group type for migrating consistencygroups to
groups and cgsnapshots to group_snapshots.
"""
grp_type = {}
ctxt = context.get_admin_context()
try:
grp_type = get_group_type_by_name(ctxt, DEFAULT_CGSNAPSHOT_TYPE)
except exception.GroupTypeNotFoundByName:
# Couldn't find DEFAULT_CGSNAPSHOT_TYPE group type.
# Record this issue and move on.
LOG.exception(_LE('Default cgsnapshot type %s is not found.')
% DEFAULT_CGSNAPSHOT_TYPE)
return grp_type
def get_group_type_specs(group_type_id, key=False):
group_type = get_group_type(context.get_admin_context(),
group_type_id)

View File

@ -0,0 +1,5 @@
---
upgrade:
- Operator needs to perform `cinder-manage db
online-data-migrations` to migrate existing consistency
groups to generic volume groups.