Browse Source

Remove unused CG code

There are CG related code in api, scheduler, and manager that are
no longer invoked in Pike. This is because we will force users to
migrate all existing CGs and CGsnapshots to the new generic volume
groups tables when they upgrade to Pike. CG CLI and API are still
supported in Pike. They will be re-directed to create/modify
entries in generic volume groups tables instead.

Database and versioned object related code are still kept for now
because there are still drivers referencing them.

Change-Id: Ieba87c6725f07564fd5a69674602eb3ca6200db3
changes/18/446018/7
xing-yang 5 years ago
parent
commit
c979bdac87
  1. 65
      cinder/api/contrib/cgsnapshots.py
  2. 90
      cinder/api/contrib/consistencygroups.py
  3. 16
      cinder/api/v2/volumes.py
  4. 21
      cinder/api/v3/volumes.py
  5. 27
      cinder/consistencygroup/__init__.py
  6. 790
      cinder/consistencygroup/api.py
  7. 18
      cinder/scheduler/driver.py
  8. 135
      cinder/scheduler/filter_scheduler.py
  9. 22
      cinder/scheduler/manager.py
  10. 15
      cinder/scheduler/rpcapi.py
  11. 286
      cinder/tests/unit/api/contrib/test_cgsnapshots.py
  12. 485
      cinder/tests/unit/api/contrib/test_consistencygroups.py
  13. 2
      cinder/tests/unit/api/v2/fakes.py
  14. 7
      cinder/tests/unit/api/v2/test_volumes.py
  15. 12
      cinder/tests/unit/api/v3/test_consistencygroups.py
  16. 778
      cinder/tests/unit/consistencygroup/test_cg.py
  17. 2
      cinder/tests/unit/scheduler/fakes.py
  18. 116
      cinder/tests/unit/scheduler/test_filter_scheduler.py
  19. 9
      cinder/tests/unit/scheduler/test_rpcapi.py
  20. 41
      cinder/tests/unit/scheduler/test_scheduler.py
  21. 74
      cinder/tests/unit/volume/test_rpcapi.py
  22. 694
      cinder/volume/manager.py
  23. 37
      cinder/volume/rpcapi.py

65
cinder/api/contrib/cgsnapshots.py

@ -25,14 +25,9 @@ from cinder.api import common
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api.views import cgsnapshots as cgsnapshot_views
from cinder import consistencygroup as consistencygroup_api
from cinder import exception
from cinder import group as group_api
from cinder.i18n import _
from cinder.objects import cgsnapshot as cgsnap_obj
from cinder.objects import consistencygroup as cg_obj
from cinder.objects import group as grp_obj
from cinder.objects import group_snapshot as grpsnap_obj
LOG = logging.getLogger(__name__)
@ -43,7 +38,6 @@ class CgsnapshotsController(wsgi.Controller):
_view_builder_class = cgsnapshot_views.ViewBuilder
def __init__(self):
self.cgsnapshot_api = consistencygroup_api.API()
self.group_snapshot_api = group_api.API()
super(CgsnapshotsController, self).__init__()
@ -66,20 +60,11 @@ class CgsnapshotsController(wsgi.Controller):
try:
cgsnapshot = self._get_cgsnapshot(context, id)
if isinstance(cgsnapshot, cgsnap_obj.CGSnapshot):
self.cgsnapshot_api.delete_cgsnapshot(context, cgsnapshot)
elif isinstance(cgsnapshot, grpsnap_obj.GroupSnapshot):
self.group_snapshot_api.delete_group_snapshot(
context, cgsnapshot)
else:
msg = _("Group snapshot '%s' not found.") % id
raise exc.HTTPNotFound(explanation=msg)
except (exception.CgSnapshotNotFound,
exception.GroupSnapshotNotFound):
self.group_snapshot_api.delete_group_snapshot(context, cgsnapshot)
except exception.GroupSnapshotNotFound:
# Not found exception will be handled at the wsgi level
raise
except (exception.InvalidCgSnapshot,
exception.InvalidGroupSnapshot) as e:
except exception.InvalidGroupSnapshot as e:
raise exc.HTTPBadRequest(explanation=six.text_type(e))
except Exception:
msg = _("Failed cgsnapshot")
@ -97,53 +82,32 @@ class CgsnapshotsController(wsgi.Controller):
def _get_cg(self, context, id):
# Not found exception will be handled at the wsgi level
try:
consistencygroup = self.cgsnapshot_api.get(
context,
group_id=id)
except exception.ConsistencyGroupNotFound:
consistencygroup = self.group_snapshot_api.get(
context, group_id=id)
consistencygroup = self.group_snapshot_api.get(context, group_id=id)
return consistencygroup
def _get_cgsnapshot(self, context, id):
# Not found exception will be handled at the wsgi level
try:
cgsnapshot = self.cgsnapshot_api.get_cgsnapshot(
context,
cgsnapshot_id=id)
except exception.CgSnapshotNotFound:
cgsnapshot = self.group_snapshot_api.get_group_snapshot(
context,
group_snapshot_id=id)
cgsnapshot = self.group_snapshot_api.get_group_snapshot(
context, group_snapshot_id=id)
return cgsnapshot
def _get_cgsnapshots(self, req, is_detail):
"""Returns a list of cgsnapshots, transformed through view builder."""
context = req.environ['cinder.context']
cgsnapshots = self.cgsnapshot_api.get_all_cgsnapshots(context)
cgsnap_limited_list = common.limited(cgsnapshots, req)
grp_snapshots = self.group_snapshot_api.get_all_group_snapshots(
context)
grpsnap_limited_list = common.limited(grp_snapshots, req)
if is_detail:
cgsnapshots = self._view_builder.detail_list(
req, cgsnap_limited_list)
grp_snapshots = self._view_builder.detail_list(
req, grpsnap_limited_list)
else:
cgsnapshots = self._view_builder.summary_list(
req, cgsnap_limited_list)
grp_snapshots = self._view_builder.summary_list(
req, grpsnap_limited_list)
cgsnapshots['cgsnapshots'] = (cgsnapshots['cgsnapshots'] +
grp_snapshots['cgsnapshots'])
return cgsnapshots
return grp_snapshots
@wsgi.response(http_client.ACCEPTED)
def create(self, req, body):
@ -172,19 +136,10 @@ class CgsnapshotsController(wsgi.Controller):
context=context)
try:
if isinstance(group, cg_obj.ConsistencyGroup):
new_cgsnapshot = self.cgsnapshot_api.create_cgsnapshot(
context, group, name, description)
elif isinstance(group, grp_obj.Group):
new_cgsnapshot = self.group_snapshot_api.create_group_snapshot(
context, group, name, description)
else:
msg = _("Group %s not found.") % group.id
raise exc.HTTPNotFound(explanation=msg)
new_cgsnapshot = self.group_snapshot_api.create_group_snapshot(
context, group, name, description)
# Not found exception will be handled at the wsgi level
except (exception.InvalidCgSnapshot,
exception.InvalidConsistencyGroup,
exception.InvalidGroup,
except (exception.InvalidGroup,
exception.InvalidGroupSnapshot,
exception.InvalidVolume) as error:
raise exc.HTTPBadRequest(explanation=error.msg)

90
cinder/api/contrib/consistencygroups.py

@ -25,14 +25,10 @@ from cinder.api import common
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api.views import consistencygroups as consistencygroup_views
from cinder import consistencygroup as consistencygroup_api
from cinder.consistencygroup import api as consistencygroup_api
from cinder import exception
from cinder import group as group_api
from cinder.i18n import _
from cinder.objects import cgsnapshot as cgsnap_obj
from cinder.objects import consistencygroup as cg_obj
from cinder.objects import group as grp_obj
from cinder.objects import group_snapshot as grpsnap_obj
from cinder.volume import group_types
LOG = logging.getLogger(__name__)
@ -44,7 +40,6 @@ class ConsistencyGroupsController(wsgi.Controller):
_view_builder_class = consistencygroup_views.ViewBuilder
def __init__(self):
self.consistencygroup_api = consistencygroup_api.API()
self.group_api = group_api.API()
super(ConsistencyGroupsController, self).__init__()
@ -81,14 +76,8 @@ class ConsistencyGroupsController(wsgi.Controller):
try:
group = self._get(context, id)
if isinstance(group, cg_obj.ConsistencyGroup):
self.consistencygroup_api.delete(context, group, force)
elif isinstance(group, grp_obj.Group):
consistencygroup_api.api.check_policy(context, 'delete')
self.group_api.delete(context, group, force)
else:
msg = _("Group '%s' not found.") % id
raise exc.HTTPNotFound(explanation=msg)
consistencygroup_api.check_policy(context, 'delete')
self.group_api.delete(context, group, force)
# Not found exception will be handled at the wsgi level
except exception.InvalidConsistencyGroup as error:
raise exc.HTTPBadRequest(explanation=error.msg)
@ -105,25 +94,15 @@ class ConsistencyGroupsController(wsgi.Controller):
def _get(self, context, id):
# Not found exception will be handled at the wsgi level
try:
consistencygroup = self.consistencygroup_api.get(
context,
group_id=id)
except exception.ConsistencyGroupNotFound:
consistencygroup = self.group_api.get(context, group_id=id)
consistencygroup = self.group_api.get(context, group_id=id)
return consistencygroup
def _get_cgsnapshot(self, context, id):
# Not found exception will be handled at the wsgi level
try:
cgsnapshot = self.consistencygroup_api.get_cgsnapshot(
context,
cgsnapshot_id=id)
except exception.CgSnapshotNotFound:
cgsnapshot = self.group_api.get_group_snapshot(
context,
group_snapshot_id=id)
cgsnapshot = self.group_api.get_group_snapshot(
context,
group_snapshot_id=id)
return cgsnapshot
@ -134,31 +113,19 @@ class ConsistencyGroupsController(wsgi.Controller):
# make another copy of filters, since it is being modified in
# consistencygroup_api while getting consistencygroups
group_filters = req.params.copy()
marker, limit, offset = common.get_pagination_params(filters)
sort_keys, sort_dirs = common.get_sort_params(filters)
consistencygroups = self.consistencygroup_api.get_all(
context, filters=filters, marker=marker, limit=limit,
offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs)
groups = self.group_api.get_all(
context, filters=group_filters, marker=marker, limit=limit,
context, filters=filters, marker=marker, limit=limit,
offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs)
if is_detail:
consistencygroups = self._view_builder.detail_list(
req, consistencygroups)
groups = self._view_builder.detail_list(req, groups)
else:
consistencygroups = self._view_builder.summary_list(
req, consistencygroups)
groups = self._view_builder.summary_list(req, groups)
consistencygroups['consistencygroups'] = (
consistencygroups['consistencygroups'] +
groups['consistencygroups'])
return consistencygroups
return groups
@wsgi.response(http_client.ACCEPTED)
def create(self, req, body):
@ -189,7 +156,7 @@ class ConsistencyGroupsController(wsgi.Controller):
{'name': name})
try:
consistencygroup_api.api.check_policy(context, 'create')
consistencygroup_api.check_policy(context, 'create')
new_consistencygroup = self.group_api.create(
context, name, description, group_type['id'], volume_types,
availability_zone=availability_zone)
@ -245,28 +212,13 @@ class ConsistencyGroupsController(wsgi.Controller):
{'name': name, 'source_cgid': source_cgid})
try:
src_grp = None
src_snap = None
if source_cgid:
src_grp = self._get(context, source_cgid)
self._get(context, source_cgid)
if cgsnapshot_id:
src_snap = self._get_cgsnapshot(context, cgsnapshot_id)
if (isinstance(src_grp, cg_obj.ConsistencyGroup) or
isinstance(src_snap, cgsnap_obj.CGSnapshot)):
new_group = self.consistencygroup_api.create_from_src(
context, name, description, cgsnapshot_id, source_cgid)
elif (isinstance(src_grp, grp_obj.Group) or
isinstance(src_snap, grpsnap_obj.GroupSnapshot)):
consistencygroup_api.api.check_policy(context, 'create')
new_group = self.group_api.create_from_src(
context, name, description, cgsnapshot_id, source_cgid)
else:
msg = (_("Source CGSnapshot %(cgsnap)s or source CG %(cg)s "
"not found.") % {'cgsnap': cgsnapshot_id,
'cg': source_cgid})
raise exc.HTTPNotFound(explanation=msg)
except exception.InvalidConsistencyGroup as error:
raise exc.HTTPBadRequest(explanation=error.msg)
self._get_cgsnapshot(context, cgsnapshot_id)
consistencygroup_api.check_policy(context, 'create')
new_group = self.group_api.create_from_src(
context, name, description, cgsnapshot_id, source_cgid)
except exception.NotFound:
# Not found exception will be handled at the wsgi level
raise
@ -296,16 +248,8 @@ class ConsistencyGroupsController(wsgi.Controller):
'remove_volumes': remove_volumes})
group = self._get(context, id)
if isinstance(group, cg_obj.ConsistencyGroup):
self.consistencygroup_api.update(context, group, name, description,
add_volumes, remove_volumes,
allow_empty)
elif isinstance(group, grp_obj.Group):
self.group_api.update(context, group, name, description,
add_volumes, remove_volumes)
else:
msg = _("Group '%s' not found.") % id
raise exc.HTTPNotFound(explanation=msg)
self.group_api.update(context, group, name, description,
add_volumes, remove_volumes)
def update(self, req, id, body):
"""Update the consistency group.

16
cinder/api/v2/volumes.py

@ -26,7 +26,6 @@ from webob import exc
from cinder.api import common
from cinder.api.openstack import wsgi
from cinder.api.v2.views import volumes as volume_views
from cinder import consistencygroup as consistencygroupAPI
from cinder import exception
from cinder import group as group_api
from cinder.i18n import _
@ -48,7 +47,6 @@ class VolumeController(wsgi.Controller):
def __init__(self, ext_mgr):
self.volume_api = cinder_volume.API()
self.consistencygroup_api = consistencygroupAPI.API()
self.group_api = group_api.API()
self.ext_mgr = ext_mgr
super(VolumeController, self).__init__()
@ -237,18 +235,12 @@ class VolumeController(wsgi.Controller):
else:
kwargs['source_replica'] = None
kwargs['group'] = None
kwargs['consistencygroup'] = None
consistencygroup_id = volume.get('consistencygroup_id')
if consistencygroup_id is not None:
try:
kwargs['consistencygroup'] = (
self.consistencygroup_api.get(context,
consistencygroup_id))
except exception.ConsistencyGroupNotFound:
# Not found exception will be handled at the wsgi level
kwargs['group'] = self.group_api.get(
context, consistencygroup_id)
else:
kwargs['consistencygroup'] = None
# Not found exception will be handled at the wsgi level
kwargs['group'] = self.group_api.get(context, consistencygroup_id)
size = volume.get('size', None)
if size is None and kwargs['snapshot'] is not None:

21
cinder/api/v3/volumes.py

@ -23,7 +23,6 @@ from cinder.api import common
from cinder.api.openstack import wsgi
from cinder.api.v2 import volumes as volumes_v2
from cinder.api.v3.views import volumes as volume_views_v3
from cinder import exception
from cinder import group as group_api
from cinder.i18n import _
from cinder import objects
@ -235,26 +234,18 @@ class VolumeController(volumes_v2.VolumeController):
else:
kwargs['source_replica'] = None
kwargs['group'] = None
kwargs['consistencygroup'] = None
consistencygroup_id = volume.get('consistencygroup_id')
if consistencygroup_id is not None:
try:
kwargs['consistencygroup'] = (
self.consistencygroup_api.get(context,
consistencygroup_id))
except exception.ConsistencyGroupNotFound:
# Not found exception will be handled at the wsgi level
kwargs['group'] = self.group_api.get(
context, consistencygroup_id)
else:
kwargs['consistencygroup'] = None
# Not found exception will be handled at the wsgi level
kwargs['group'] = self.group_api.get(context, consistencygroup_id)
# Get group_id if volume is in a group.
group_id = volume.get('group_id')
if group_id is not None:
try:
kwargs['group'] = self.group_api.get(context, group_id)
except exception.GroupNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
# Not found exception will be handled at the wsgi level
kwargs['group'] = self.group_api.get(context, group_id)
size = volume.get('size', None)
if size is None and kwargs['snapshot'] is not None:

27
cinder/consistencygroup/__init__.py

@ -1,27 +0,0 @@
# Copyright (C) 2012 - 2014 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Importing full names to not pollute the namespace and cause possible
# collisions with use of 'from cinder.transfer import <foo>' elsewhere.
from oslo_utils import importutils
from cinder.common import config
CONF = config.CONF
API = importutils.import_class(
CONF.consistencygroup_api_class)

790
cinder/consistencygroup/api.py

@ -17,57 +17,7 @@
Handles all requests relating to consistency groups.
"""
import functools
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import timeutils
from cinder import db
from cinder.db import base
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder.objects import fields as c_fields
import cinder.policy
from cinder import quota
from cinder import quota_utils
from cinder.scheduler import rpcapi as scheduler_rpcapi
from cinder.volume import api as volume_api
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import utils as vol_utils
from cinder.volume import volume_types
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
CGQUOTAS = quota.CGQUOTAS
QUOTAS = quota.QUOTAS
VALID_REMOVE_VOL_FROM_CG_STATUS = (
'available',
'in-use',
'error',
'error_deleting')
VALID_ADD_VOL_TO_CG_STATUS = (
'available',
'in-use')
def wrap_check_policy(func):
"""Check policy corresponding to the wrapped methods prior to execution.
This decorator requires the first 3 args of the wrapped function
to be (self, context, consistencygroup)
"""
@functools.wraps(func)
def wrapped(self, context, target_obj, *args, **kwargs):
check_policy(context, func.__name__, target_obj)
return func(self, context, target_obj, *args, **kwargs)
return wrapped
def check_policy(context, action, target_obj=None):
@ -79,743 +29,3 @@ def check_policy(context, action, target_obj=None):
target.update(target_obj)
_action = 'consistencygroup:%s' % action
cinder.policy.enforce(context, _action, target)
class API(base.Base):
"""API for interacting with the volume manager for consistency groups."""
def __init__(self, db_driver=None):
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
self.volume_rpcapi = volume_rpcapi.VolumeAPI()
self.availability_zone_names = ()
self.volume_api = volume_api.API()
super(API, self).__init__(db_driver)
def _valid_availability_zone(self, availability_zone):
if availability_zone in self.availability_zone_names:
return True
if CONF.storage_availability_zone == availability_zone:
return True
azs = self.volume_api.list_availability_zones()
self.availability_zone_names = [az['name'] for az in azs]
return availability_zone in self.availability_zone_names
def _extract_availability_zone(self, availability_zone):
if availability_zone is None:
if CONF.default_availability_zone:
availability_zone = CONF.default_availability_zone
else:
# For backwards compatibility use the storage_availability_zone
availability_zone = CONF.storage_availability_zone
valid = self._valid_availability_zone(availability_zone)
if not valid:
msg = _("Availability zone '%s' is invalid.") % availability_zone
LOG.warning(msg)
raise exception.InvalidInput(reason=msg)
return availability_zone
def create(self, context, name, description,
cg_volume_types, availability_zone=None):
check_policy(context, 'create')
volume_type_list = cg_volume_types.split(',')
# NOTE: Admin context is required to get extra_specs of volume_types.
req_volume_types = (self.db.volume_types_get_by_name_or_id(
context.elevated(), volume_type_list))
req_volume_type_ids = ""
for voltype in req_volume_types:
req_volume_type_ids = (
req_volume_type_ids + voltype.get('id') + ",")
if len(req_volume_type_ids) == 0:
req_volume_type_ids = None
availability_zone = self._extract_availability_zone(availability_zone)
kwargs = {'user_id': context.user_id,
'project_id': context.project_id,
'availability_zone': availability_zone,
'status': c_fields.ConsistencyGroupStatus.CREATING,
'name': name,
'description': description,
'volume_type_id': req_volume_type_ids}
group = None
try:
group = objects.ConsistencyGroup(context=context, **kwargs)
group.create()
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Error occurred when creating consistency group "
"%s.", name)
request_spec_list = []
filter_properties_list = []
for req_volume_type in req_volume_types:
request_spec = {'volume_type': req_volume_type.copy(),
'consistencygroup_id': group.id}
filter_properties = {}
request_spec_list.append(request_spec)
filter_properties_list.append(filter_properties)
# Update quota for consistencygroups
self.update_quota(context, group, 1)
self._cast_create_consistencygroup(context, group,
request_spec_list,
filter_properties_list)
return group
def create_from_src(self, context, name, description=None,
cgsnapshot_id=None, source_cgid=None):
check_policy(context, 'create')
kwargs = {
'user_id': context.user_id,
'project_id': context.project_id,
'status': c_fields.ConsistencyGroupStatus.CREATING,
'name': name,
'description': description,
'cgsnapshot_id': cgsnapshot_id,
'source_cgid': source_cgid,
}
group = None
try:
group = objects.ConsistencyGroup(context=context, **kwargs)
group.create(cg_snap_id=cgsnapshot_id, cg_id=source_cgid)
except exception.ConsistencyGroupNotFound:
with excutils.save_and_reraise_exception():
LOG.error("Source CG %(source_cg)s not found when "
"creating consistency group %(cg)s from "
"source.",
{'cg': name, 'source_cg': source_cgid})
except exception.CgSnapshotNotFound:
with excutils.save_and_reraise_exception():
LOG.error("CG snapshot %(cgsnap)s not found when creating "
"consistency group %(cg)s from source.",
{'cg': name, 'cgsnap': cgsnapshot_id})
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Error occurred when creating consistency group"
" %(cg)s from cgsnapshot %(cgsnap)s.",
{'cg': name, 'cgsnap': cgsnapshot_id})
# Update quota for consistencygroups
self.update_quota(context, group, 1)
if not group.host:
msg = _("No host to create consistency group %s.") % group.id
LOG.error(msg)
raise exception.InvalidConsistencyGroup(reason=msg)
group.assert_not_frozen()
if cgsnapshot_id:
self._create_cg_from_cgsnapshot(context, group, cgsnapshot_id)
elif source_cgid:
self._create_cg_from_source_cg(context, group, source_cgid)
return group
def _create_cg_from_cgsnapshot(self, context, group, cgsnapshot_id):
try:
cgsnapshot = objects.CGSnapshot.get_by_id(context, cgsnapshot_id)
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
if not snapshots:
msg = _("Cgsnahost is empty. No consistency group "
"will be created.")
raise exception.InvalidConsistencyGroup(reason=msg)
try:
values = {'volumes': len(snapshots)}
QUOTAS.limit_check(context, project_id=context.project_id,
**values)
except exception.OverQuota as e:
group.destroy()
quotas = e.kwargs['quotas']
raise exception.VolumeLimitExceeded(
allowed=e.kwargs['overs'], limit=quotas['volumes'])
for snapshot in snapshots:
kwargs = {}
kwargs['availability_zone'] = group.availability_zone
kwargs['cgsnapshot'] = cgsnapshot
kwargs['consistencygroup'] = group
kwargs['snapshot'] = snapshot
volume_type_id = snapshot.volume_type_id
if volume_type_id:
kwargs['volume_type'] = (
objects.VolumeType.get_by_name_or_id(
context, volume_type_id))
# Since cgsnapshot is passed in, the following call will
# create a db entry for the volume, but will not call the
# volume manager to create a real volume in the backend yet.
# If error happens, taskflow will handle rollback of quota
# and removal of volume entry in the db.
try:
self.volume_api.create(context,
snapshot.volume_size,
None,
None,
**kwargs)
except exception.CinderException:
with excutils.save_and_reraise_exception():
LOG.error("Error occurred when creating volume "
"entry from snapshot in the process of "
"creating consistency group %(group)s "
"from cgsnapshot %(cgsnap)s.",
{'group': group.id,
'cgsnap': cgsnapshot.id})
except Exception:
with excutils.save_and_reraise_exception():
try:
new_vols = self.db.volume_get_all_by_group(context,
group.id)
for vol in new_vols:
self.volume_api.delete(context, vol, force=True)
group.destroy()
finally:
LOG.error("Error occurred when creating consistency "
"group %(group)s from cgsnapshot "
"%(cgsnap)s.",
{'group': group.id,
'cgsnap': cgsnapshot.id})
volumes = self.db.volume_get_all_by_group(context,
group.id)
for vol in volumes:
# Update the host field for the volume.
self.db.volume_update(context, vol['id'],
{'host': group.get('host')})
self.volume_rpcapi.create_consistencygroup_from_src(
context, group, cgsnapshot)
def _create_cg_from_source_cg(self, context, group, source_cgid):
try:
source_cg = objects.ConsistencyGroup.get_by_id(context,
source_cgid)
source_vols = self.db.volume_get_all_by_group(context,
source_cg.id)
if not source_vols:
msg = _("Source CG is empty. No consistency group "
"will be created.")
raise exception.InvalidConsistencyGroup(reason=msg)
try:
values = {'volumes': len(source_vols)}
QUOTAS.limit_check(context, project_id=context.project_id,
**values)
except exception.OverQuota as e:
group.destroy()
quotas = e.kwargs['quotas']
raise exception.VolumeLimitExceeded(
allowed=e.kwargs['overs'], limit=quotas['volumes'])
for source_vol in source_vols:
kwargs = {}
kwargs['availability_zone'] = group.availability_zone
kwargs['source_cg'] = source_cg
kwargs['consistencygroup'] = group
kwargs['source_volume'] = source_vol
volume_type_id = source_vol.get('volume_type_id')
if volume_type_id:
kwargs['volume_type'] = (
objects.VolumeType.get_by_name_or_id(
context, volume_type_id))
# Since source_cg is passed in, the following call will
# create a db entry for the volume, but will not call the
# volume manager to create a real volume in the backend yet.
# If error happens, taskflow will handle rollback of quota
# and removal of volume entry in the db.
try:
self.volume_api.create(context,
source_vol['size'],
None,
None,
**kwargs)
except exception.CinderException:
with excutils.save_and_reraise_exception():
LOG.error("Error occurred when creating cloned "
"volume in the process of creating "
"consistency group %(group)s from "
"source CG %(source_cg)s.",
{'group': group.id,
'source_cg': source_cg.id})
except Exception:
with excutils.save_and_reraise_exception():
try:
new_vols = self.db.volume_get_all_by_group(context,
group.id)
for vol in new_vols:
self.volume_api.delete(context, vol, force=True)
group.destroy()
finally:
LOG.error("Error occurred when creating consistency "
"group %(group)s from source CG "
"%(source_cg)s.",
{'group': group.id,
'source_cg': source_cg.id})
volumes = self.db.volume_get_all_by_group(context,
group.id)
for vol in volumes:
# Update the host field for the volume.
self.db.volume_update(context, vol['id'],
{'host': group.host})
self.volume_rpcapi.create_consistencygroup_from_src(context, group,
None, source_cg)
def _cast_create_consistencygroup(self, context, group,
request_spec_list,
filter_properties_list):
try:
for request_spec in request_spec_list:
volume_type = request_spec.get('volume_type', None)
volume_type_id = None
if volume_type:
volume_type_id = volume_type.get('id', None)
specs = {}
if volume_type_id:
qos_specs = volume_types.get_volume_type_qos_specs(
volume_type_id)
specs = qos_specs['qos_specs']
if not specs:
# to make sure we don't pass empty dict
specs = None
volume_properties = {
'size': 0, # Need to populate size for the scheduler
'user_id': context.user_id,
'project_id': context.project_id,
'status': 'creating',
'attach_status': c_fields.VolumeAttachStatus.DETACHED,
'encryption_key_id': request_spec.get('encryption_key_id',
None),
'display_description': request_spec.get('description',
None),
'display_name': request_spec.get('name', None),
'volume_type_id': volume_type_id,
}
request_spec['volume_properties'] = volume_properties
request_spec['qos_specs'] = specs
except Exception:
with excutils.save_and_reraise_exception():
try:
group.destroy()
finally:
LOG.error("Error occurred when building "
"request spec list for consistency group "
"%s.", group.id)
# Cast to the scheduler and let it handle whatever is needed
# to select the target host for this group.
self.scheduler_rpcapi.create_consistencygroup(
context,
group,
request_spec_list=request_spec_list,
filter_properties_list=filter_properties_list)
def update_quota(self, context, group, num, project_id=None):
reserve_opts = {'consistencygroups': num}
try:
reservations = CGQUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
if reservations:
CGQUOTAS.commit(context, reservations)
except Exception as e:
with excutils.save_and_reraise_exception():
try:
group.destroy()
if isinstance(e, exception.OverQuota):
quota_utils.process_reserve_over_quota(
context, e, resource='groups')
finally:
LOG.error("Failed to update quota for "
"consistency group %s.", group.id)
@wrap_check_policy
def delete(self, context, group, force=False):
if not group.host:
self.update_quota(context, group, -1, group.project_id)
LOG.debug("No host for consistency group %s. Deleting from "
"the database.", group.id)
group.destroy()
return
group.assert_not_frozen()
if force:
expected = {}
else:
expected = {'status': (c_fields.ConsistencyGroupStatus.AVAILABLE,
c_fields.ConsistencyGroupStatus.ERROR)}
filters = [~db.cg_has_cgsnapshot_filter(),
~db.cg_has_volumes_filter(attached_or_with_snapshots=force),
~db.cg_creating_from_src(cg_id=group.id)]
values = {'status': c_fields.ConsistencyGroupStatus.DELETING}
if not group.conditional_update(values, expected, filters):
if force:
reason = _('Consistency group must not have attached volumes, '
'volumes with snapshots, or dependent cgsnapshots')
else:
reason = _('Consistency group status must be available or '
'error and must not have volumes or dependent '
'cgsnapshots')
msg = (_('Cannot delete consistency group %(id)s. %(reason)s, and '
'it cannot be the source for an ongoing CG or CG '
'Snapshot creation.')
% {'id': group.id, 'reason': reason})
raise exception.InvalidConsistencyGroup(reason=msg)
self.volume_rpcapi.delete_consistencygroup(context, group)
def _check_update(self, group, name, description, add_volumes,
remove_volumes, allow_empty=False):
if allow_empty:
if (name is None and description is None
and not add_volumes and not remove_volumes):
msg = (_("Cannot update consistency group %(group_id)s "
"because no valid name, description, add_volumes, "
"or remove_volumes were provided.") %
{'group_id': group.id})
raise exception.InvalidConsistencyGroup(reason=msg)
else:
if not (name or description or add_volumes or remove_volumes):
msg = (_("Cannot update consistency group %(group_id)s "
"because no valid name, description, add_volumes, "
"or remove_volumes were provided.") %
{'group_id': group.id})
raise exception.InvalidConsistencyGroup(reason=msg)
def update(self, context, group, name, description,
add_volumes, remove_volumes, allow_empty=False):
"""Update consistency group."""
add_volumes_list = []
remove_volumes_list = []
if add_volumes:
add_volumes = add_volumes.strip(',')
add_volumes_list = add_volumes.split(',')
if remove_volumes:
remove_volumes = remove_volumes.strip(',')
remove_volumes_list = remove_volumes.split(',')
invalid_uuids = []
for uuid in add_volumes_list:
if uuid in remove_volumes_list:
invalid_uuids.append(uuid)
if invalid_uuids:
msg = _("UUIDs %s are in both add and remove volume "
"list.") % invalid_uuids
raise exception.InvalidVolume(reason=msg)
# Validate name.
if name == group.name:
name = None
# Validate description.
if description == group.description:
description = None
self._check_update(group, name, description, add_volumes,
remove_volumes, allow_empty)
fields = {'updated_at': timeutils.utcnow()}
# Update name and description in db now. No need to
# to send them over through an RPC call.
if allow_empty:
if name is not None:
fields['name'] = name
if description is not None:
fields['description'] = description
else:
if name:
fields['name'] = name
if description:
fields['description'] = description
# NOTE(geguileo): We will use the updating status in the CG as a lock
# mechanism to prevent volume add/remove races with other API, while we
# figure out if we really need to add or remove volumes.
if add_volumes or remove_volumes:
fields['status'] = c_fields.ConsistencyGroupStatus.UPDATING
# We cannot modify the members of this CG if the CG is being used
# to create another CG or a CGsnapshot is being created
filters = [~db.cg_creating_from_src(cg_id=group.id),
~db.cgsnapshot_creating_from_src()]
else:
filters = []
expected = {'status': c_fields.ConsistencyGroupStatus.AVAILABLE}
if not group.conditional_update(fields, expected, filters):
msg = _("Cannot update consistency group %s, status must be "
"available, and it cannot be the source for an ongoing "
"CG or CG Snapshot creation.") % group.id
raise exception.InvalidConsistencyGroup(reason=msg)
# Now the CG is "locked" for updating
try:
# Validate volumes in add_volumes and remove_volumes.
add_volumes_new = self._validate_add_volumes(
context, group.volumes, add_volumes_list, group)
remove_volumes_new = self._validate_remove_volumes(
group.volumes, remove_volumes_list, group)
self._check_update(group, name, description, add_volumes_new,
remove_volumes_new, allow_empty)
except Exception:
# If we have an error on the volume_lists we must return status to
# available as we were doing before removing API races
with excutils.save_and_reraise_exception():
group.status = c_fields.ConsistencyGroupStatus.AVAILABLE
group.save()
# Do an RPC call only if the update request includes
# adding/removing volumes. add_volumes_new and remove_volumes_new
# are strings of volume UUIDs separated by commas with no spaces
# in between.
if add_volumes_new or remove_volumes_new:
self.volume_rpcapi.update_consistencygroup(
context, group,
add_volumes=add_volumes_new,
remove_volumes=remove_volumes_new)
# If there are no new volumes to add or remove and we had changed
# the status to updating, turn it back to available
elif group.status == c_fields.ConsistencyGroupStatus.UPDATING:
group.status = c_fields.ConsistencyGroupStatus.AVAILABLE
group.save()
def _validate_remove_volumes(self, volumes, remove_volumes_list, group):
# Validate volumes in remove_volumes.
if not remove_volumes_list:
return None
remove_volumes_new = ""
for volume in volumes:
if volume['id'] in remove_volumes_list:
if volume['status'] not in VALID_REMOVE_VOL_FROM_CG_STATUS:
msg = (_("Cannot remove volume %(volume_id)s from "
"consistency group %(group_id)s because volume "
"is in an invalid state: %(status)s. Valid "
"states are: %(valid)s.") %
{'volume_id': volume['id'],
'group_id': group.id,
'status': volume['status'],
'valid': VALID_REMOVE_VOL_FROM_CG_STATUS})
raise exception.InvalidVolume(reason=msg)
# Volume currently in CG. It will be removed from CG.
if remove_volumes_new:
remove_volumes_new += ","
remove_volumes_new += volume['id']
for rem_vol in remove_volumes_list:
if rem_vol not in remove_volumes_new:
msg = (_("Cannot remove volume %(volume_id)s from "
"consistency group %(group_id)s because it "
"is not in the group.") %
{'volume_id': rem_vol,
'group_id': group.id})
raise exception.InvalidVolume(reason=msg)
return remove_volumes_new
def _validate_add_volumes(self, context, volumes, add_volumes_list, group):
if not add_volumes_list:
return None
add_volumes_new = ""
for volume in volumes:
if volume['id'] in add_volumes_list:
# Volume already in CG. Remove from add_volumes.
add_volumes_list.remove(volume['id'])
for add_vol in add_volumes_list:
try:
add_vol_ref = self.db.volume_get(context, add_vol)
except exception.VolumeNotFound:
msg = (_("Cannot add volume %(volume_id)s to consistency "
"group %(group_id)s because volume cannot be "
"found.") %
{'volume_id': add_vol,
'group_id': group.id})
raise exception.InvalidVolume(reason=msg)
orig_group = add_vol_ref.get('consistencygroup_id', None)
if orig_group:
# If volume to be added is already in the group to be updated,
# it should have been removed from the add_volumes_list in the
# beginning of this function. If we are here, it means it is
# in a different group.
msg = (_("Cannot add volume %(volume_id)s to consistency "
"group %(group_id)s because it is already in "
"consistency group %(orig_group)s.") %
{'volume_id': add_vol_ref['id'],
'group_id': group.id,
'orig_group': orig_group})
raise exception.InvalidVolume(reason=msg)
if add_vol_ref:
add_vol_type_id = add_vol_ref.get('volume_type_id', None)
if not add_vol_type_id:
msg = (_("Cannot add volume %(volume_id)s to consistency "
"group %(group_id)s because it has no volume "
"type.") %
{'volume_id': add_vol_ref['id'],
'group_id': group.id})
raise exception.InvalidVolume(reason=msg)
if add_vol_type_id not in group.volume_type_id:
msg = (_("Cannot add volume %(volume_id)s to consistency "
"group %(group_id)s because volume type "
"%(volume_type)s is not supported by the "
"group.") %
{'volume_id': add_vol_ref['id'],
'group_id': group.id,
'volume_type': add_vol_type_id})
raise exception.InvalidVolume(reason=msg)
if (add_vol_ref['status'] not in
VALID_ADD_VOL_TO_CG_STATUS):
msg = (_("Cannot add volume %(volume_id)s to consistency "
"group %(group_id)s because volume is in an "
"invalid state: %(status)s. Valid states are: "
"%(valid)s.") %
{'volume_id': add_vol_ref['id'],
'group_id': group.id,
'status': add_vol_ref['status'],
'valid': VALID_ADD_VOL_TO_CG_STATUS})
raise exception.InvalidVolume(reason=msg)
# group.host and add_vol_ref['host'] are in this format:
# 'host@backend#pool'. Extract host (host@backend) before
# doing comparison.
vol_host = vol_utils.extract_host(add_vol_ref['host'])
group_host = vol_utils.extract_host(group.host)
if group_host != vol_host:
raise exception.InvalidVolume(
reason=_("Volume is not local to this node."))
# Volume exists. It will be added to CG.
if add_volumes_new:
add_volumes_new += ","
add_volumes_new += add_vol_ref['id']
else:
msg = (_("Cannot add volume %(volume_id)s to consistency "
"group %(group_id)s because volume does not exist.") %
{'volume_id': add_vol_ref['id'],
'group_id': group.id})
raise exception.InvalidVolume(reason=msg)
return add_volumes_new
def get(self, context, group_id):
group = objects.ConsistencyGroup.get_by_id(context, group_id)
check_policy(context, 'get', group)
return group
def get_all(self, context, filters=None, marker=None, limit=None,
offset=None, sort_keys=None, sort_dirs=None):
check_policy(context, 'get_all')
if filters is None:
filters = {}
if filters:
LOG.debug("Searching by: %s", filters)
if (context.is_admin and 'all_tenants' in filters):
del filters['all_tenants']
groups = objects.ConsistencyGroupList.get_all(
context, filters=filters, marker=marker, limit=limit,
offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs)
else:
groups = objects.ConsistencyGroupList.get_all_by_project(
context, context.project_id, filters=filters, marker=marker,
limit=limit, offset=offset, sort_keys=sort_keys,
sort_dirs=sort_dirs)
return groups
def create_cgsnapshot(self, context, group, name, description):
group.assert_not_frozen()
options = {'consistencygroup_id': group.id,
'user_id': context.user_id,
'project_id': context.project_id,
'status': "creating",
'name': name,
'description': description}
cgsnapshot = None
cgsnapshot_id = None
try:
cgsnapshot = objects.CGSnapshot(context, **options)
cgsnapshot.create()
cgsnapshot_id = cgsnapshot.id
snap_name = cgsnapshot.name
snap_desc = cgsnapshot.description
with group.obj_as_admin():
self.volume_api.create_snapshots_in_db(
context, group.volumes, snap_name, snap_desc,
cgsnapshot_id)
except Exception:
with excutils.save_and_reraise_exception():
try:
# If the cgsnapshot has been created
if cgsnapshot.obj_attr_is_set('id'):
cgsnapshot.destroy()
finally:
LOG.error("Error occurred when creating cgsnapshot"
" %s.", cgsnapshot_id)
self.volume_rpcapi.create_cgsnapshot(context, cgsnapshot)
return cgsnapshot
def delete_cgsnapshot(self, context, cgsnapshot, force=False):
cgsnapshot.assert_not_frozen()
values = {'status': 'deleting'}
expected = {'status': ('available', 'error')}
filters = [~db.cg_creating_from_src(cgsnapshot_id=cgsnapshot.id)]
res = cgsnapshot.conditional_update(values, expected, filters)
if not res:
msg = _('CgSnapshot status must be available or error, and no CG '
'can be currently using it as source for its creation.')
raise exception.InvalidCgSnapshot(reason=msg)
self.volume_rpcapi.delete_cgsnapshot(context.elevated(), cgsnapshot)
def update_cgsnapshot(self, context, cgsnapshot, fields):
cgsnapshot.update(fields)
cgsnapshot.save()
def get_cgsnapshot(self, context, cgsnapshot_id):
check_policy(context, 'get_cgsnapshot')
cgsnapshots = objects.CGSnapshot.get_by_id(context, cgsnapshot_id)
return cgsnapshots
def get_all_cgsnapshots(self, context, search_opts=None):
check_policy(context, 'get_all_cgsnapshots')
search_opts = search_opts or {}
if context.is_admin and 'all_tenants' in search_opts:
# Need to remove all_tenants to pass the filtering below.
del search_opts['all_tenants']
cgsnapshots = objects.CGSnapshotList.get_all(context, search_opts)
else:
cgsnapshots = objects.CGSnapshotList.get_all_by_project(
context.elevated(), context.project_id, search_opts)
return cgsnapshots

18
cinder/scheduler/driver.py

@ -57,17 +57,6 @@ def volume_update_db(context, volume_id, host, cluster_name):
return volume
def group_update_db(context, group, host, cluster_name):
"""Set the host and the scheduled_at field of a consistencygroup.
:returns: A Consistencygroup with the updated fields set properly.
"""
group.update({'host': host, 'updated_at': timeutils.utcnow(),
'cluster_name': cluster_name})
group.save()
return group
def generic_group_update_db(context, group, host, cluster_name):
"""Set the host and the scheduled_at field of a group.
@ -141,13 +130,6 @@ class Scheduler(object):
"""Must override schedule method for scheduler to work."""
raise NotImplementedError(_("Must implement schedule_create_volume"))
def schedule_create_consistencygroup(self, context, group,
request_spec_list,
filter_properties_list):
"""Must override schedule method for scheduler to work."""
raise NotImplementedError(_(
"Must implement schedule_create_consistencygroup"))
def schedule_create_group(self, context, group,
group_spec,
request_spec_list,

135
cinder/scheduler/filter_scheduler.py

@ -62,25 +62,6 @@ class FilterScheduler(driver.Scheduler):
filter_properties['metadata'] = vol.get('metadata')
filter_properties['qos_specs'] = vol.get('qos_specs')
def schedule_create_consistencygroup(self, context, group,
request_spec_list,
filter_properties_list):
weighed_backend = self._schedule_group(
context,
request_spec_list,
filter_properties_list)
if not weighed_backend:
raise exception.NoValidBackend(reason=_("No weighed backends "
"available"))
backend = weighed_backend.obj
updated_group = driver.group_update_db(context, group, backend.host,
backend.cluster_name)
self.volume_rpcapi.create_consistencygroup(context, updated_group)
def schedule_create_group(self, context, group,
group_spec,
request_spec_list,
@ -350,98 +331,6 @@ class FilterScheduler(driver.Scheduler):
backends, filter_properties)
return weighed_backends
def _get_weighted_candidates_group(self, context, request_spec_list,
filter_properties_list=None):
"""Finds hosts that supports the consistencygroup.
Returns a list of hosts that meet the required specs,
ordered by their fitness.
"""
elevated = context.elevated()
weighed_backends = []
index = 0
for request_spec in request_spec_list:
volume_properties = request_spec['volume_properties']
# Since Cinder is using mixed filters from Oslo and it's own, which
# takes 'resource_XX' and 'volume_XX' as input respectively,
# copying 'volume_XX' to 'resource_XX' will make both filters
# happy.
resource_properties = volume_properties.copy()
volume_type = request_spec.get("volume_type", None)
resource_type = request_spec.get("volume_type", None)
request_spec.update({'resource_properties': resource_properties})
config_options = self._get_configuration_options()
filter_properties = {}
if filter_properties_list:
filter_properties = filter_properties_list[index]
if filter_properties is None:
filter_properties = {}
self._populate_retry(filter_properties, resource_properties)
# Add consistencygroup_support in extra_specs if it is not there.
# Make sure it is populated in filter_properties
if 'consistencygroup_support' not in resource_type.get(
'extra_specs', {}):
resource_type['extra_specs'].update(
consistencygroup_support='<is> True')
filter_properties.update({'context': context,
'request_spec': request_spec,
'config_options': config_options,
'volume_type': volume_type,
'resource_type': resource_type})
self.populate_filter_properties(request_spec,
filter_properties)
# Find our local list of acceptable backends by filtering and
# weighing our options. we virtually consume resources on
# it so subsequent selections can adjust accordingly.
# Note: remember, we are using an iterator here. So only
# traverse this list once.
all_backends = self.host_manager.get_all_backend_states(elevated)
if not all_backends:
return []
# Filter local backends based on requirements ...
backends = self.host_manager.get_filtered_backends(
all_backends, filter_properties)
if not backends:
return []
LOG.debug("Filtered %s", backends)
# weighted_host = WeightedHost() ... the best
# host for the job.
temp_weighed_backends = self.host_manager.get_weighed_backends(
backends,
filter_properties)
if not temp_weighed_backends:
return []
if index == 0:
weighed_backends = temp_weighed_backends
else:
new_weighed_backends = []
for backend1 in weighed_backends:
for backend2 in temp_weighed_backends:
# Should schedule creation of CG on backend level,
# not pool level.
if (utils.extract_host(backend1.obj.backend_id) ==
utils.extract_host(backend2.obj.backend_id)):
new_weighed_backends.append(backend1)
weighed_backends = new_weighed_backends
if not weighed_backends:
return []
index += 1
return weighed_backends
def _get_weighted_candidates_generic_group(
self, context, group_spec, request_spec_list,
group_filter_properties=None,
@ -618,11 +507,8 @@ class FilterScheduler(driver.Scheduler):
weighed_backends = self._get_weighted_candidates(context, request_spec,
filter_properties)
# When we get the weighed_backends, we clear those backends that don't
# match the consistencygroup's backend.
if request_spec.get('CG_backend'):
group_backend = request_spec.get('CG_backend')
else:
group_backend = request_spec.get('group_backend')
# match the group's backend.
group_backend = request_spec.get('group_backend')
if weighed_backends and group_backend:
# Get host name including host@backend#pool info from
# weighed_backends.
@ -637,17 +523,6 @@ class FilterScheduler(driver.Scheduler):
return None
return self._choose_top_backend(weighed_backends, request_spec)
def _schedule_group(self, context, request_spec_list,
filter_properties_list=None):
weighed_backends = self._get_weighted_candidates_group(
context,
request_spec_list,
filter_properties_list)
if not weighed_backends:
return None
return self._choose_top_backend_group(weighed_backends,
request_spec_list)
def _schedule_generic_group(self, context, group_spec, request_spec_list,
group_filter_properties=None,
filter_properties_list=None):
@ -669,12 +544,6 @@ class FilterScheduler(driver.Scheduler):
backend_state.consume_from_volume(volume_properties)
return top_backend
def _choose_top_backend_group(self, weighed_backends, request_spec_list):
top_backend = weighed_backends[0]
backend_state = top_backend.obj
LOG.debug("Choosing %s", backend_state.backend_id)
return top_backend
def _choose_top_backend_generic_group(self, weighed_backends):
top_backend = weighed_backends[0]
backend_state = top_backend.obj

22
cinder/scheduler/manager.py

@ -132,28 +132,6 @@ class SchedulerManager(manager.CleanableManager, manager.Manager):
while self._startup_delay and not self.driver.is_ready():
eventlet.sleep(1)
def create_consistencygroup(self, context, group, request_spec_list=None,
filter_properties_list=None):
self._wait_for_scheduler()
try:
self.driver.schedule_create_consistencygroup(
context, group,
request_spec_list,
filter_properties_list)
except exception.NoValidBackend:
LOG.error("Could not find a backend for consistency group "
"%(group_id)s.",
{'group_id': group.id})
group.status = 'error'
group.save()
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception("Failed to create consistency group "
"%(group_id)s.",
{'group_id': group.id})
group.status = 'error'
group.save()
def create_group(self, context