Remove unused CG code
There are CG related code in api, scheduler, and manager that are no longer invoked in Pike. This is because we will force users to migrate all existing CGs and CGsnapshots to the new generic volume groups tables when they upgrade to Pike. CG CLI and API are still supported in Pike. They will be re-directed to create/modify entries in generic volume groups tables instead. Database and versioned object related code are still kept for now because there are still drivers referencing them. Change-Id: Ieba87c6725f07564fd5a69674602eb3ca6200db3
This commit is contained in:
parent
5f66f158bf
commit
c979bdac87
@ -25,14 +25,9 @@ from cinder.api import common
|
||||
from cinder.api import extensions
|
||||
from cinder.api.openstack import wsgi
|
||||
from cinder.api.views import cgsnapshots as cgsnapshot_views
|
||||
from cinder import consistencygroup as consistencygroup_api
|
||||
from cinder import exception
|
||||
from cinder import group as group_api
|
||||
from cinder.i18n import _
|
||||
from cinder.objects import cgsnapshot as cgsnap_obj
|
||||
from cinder.objects import consistencygroup as cg_obj
|
||||
from cinder.objects import group as grp_obj
|
||||
from cinder.objects import group_snapshot as grpsnap_obj
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
@ -43,7 +38,6 @@ class CgsnapshotsController(wsgi.Controller):
|
||||
_view_builder_class = cgsnapshot_views.ViewBuilder
|
||||
|
||||
def __init__(self):
|
||||
self.cgsnapshot_api = consistencygroup_api.API()
|
||||
self.group_snapshot_api = group_api.API()
|
||||
super(CgsnapshotsController, self).__init__()
|
||||
|
||||
@ -66,20 +60,11 @@ class CgsnapshotsController(wsgi.Controller):
|
||||
|
||||
try:
|
||||
cgsnapshot = self._get_cgsnapshot(context, id)
|
||||
if isinstance(cgsnapshot, cgsnap_obj.CGSnapshot):
|
||||
self.cgsnapshot_api.delete_cgsnapshot(context, cgsnapshot)
|
||||
elif isinstance(cgsnapshot, grpsnap_obj.GroupSnapshot):
|
||||
self.group_snapshot_api.delete_group_snapshot(
|
||||
context, cgsnapshot)
|
||||
else:
|
||||
msg = _("Group snapshot '%s' not found.") % id
|
||||
raise exc.HTTPNotFound(explanation=msg)
|
||||
except (exception.CgSnapshotNotFound,
|
||||
exception.GroupSnapshotNotFound):
|
||||
self.group_snapshot_api.delete_group_snapshot(context, cgsnapshot)
|
||||
except exception.GroupSnapshotNotFound:
|
||||
# Not found exception will be handled at the wsgi level
|
||||
raise
|
||||
except (exception.InvalidCgSnapshot,
|
||||
exception.InvalidGroupSnapshot) as e:
|
||||
except exception.InvalidGroupSnapshot as e:
|
||||
raise exc.HTTPBadRequest(explanation=six.text_type(e))
|
||||
except Exception:
|
||||
msg = _("Failed cgsnapshot")
|
||||
@ -97,53 +82,32 @@ class CgsnapshotsController(wsgi.Controller):
|
||||
|
||||
def _get_cg(self, context, id):
|
||||
# Not found exception will be handled at the wsgi level
|
||||
try:
|
||||
consistencygroup = self.cgsnapshot_api.get(
|
||||
context,
|
||||
group_id=id)
|
||||
except exception.ConsistencyGroupNotFound:
|
||||
consistencygroup = self.group_snapshot_api.get(
|
||||
context, group_id=id)
|
||||
consistencygroup = self.group_snapshot_api.get(context, group_id=id)
|
||||
|
||||
return consistencygroup
|
||||
|
||||
def _get_cgsnapshot(self, context, id):
|
||||
# Not found exception will be handled at the wsgi level
|
||||
try:
|
||||
cgsnapshot = self.cgsnapshot_api.get_cgsnapshot(
|
||||
context,
|
||||
cgsnapshot_id=id)
|
||||
except exception.CgSnapshotNotFound:
|
||||
cgsnapshot = self.group_snapshot_api.get_group_snapshot(
|
||||
context,
|
||||
group_snapshot_id=id)
|
||||
cgsnapshot = self.group_snapshot_api.get_group_snapshot(
|
||||
context, group_snapshot_id=id)
|
||||
|
||||
return cgsnapshot
|
||||
|
||||
def _get_cgsnapshots(self, req, is_detail):
|
||||
"""Returns a list of cgsnapshots, transformed through view builder."""
|
||||
context = req.environ['cinder.context']
|
||||
cgsnapshots = self.cgsnapshot_api.get_all_cgsnapshots(context)
|
||||
cgsnap_limited_list = common.limited(cgsnapshots, req)
|
||||
grp_snapshots = self.group_snapshot_api.get_all_group_snapshots(
|
||||
context)
|
||||
grpsnap_limited_list = common.limited(grp_snapshots, req)
|
||||
|
||||
if is_detail:
|
||||
cgsnapshots = self._view_builder.detail_list(
|
||||
req, cgsnap_limited_list)
|
||||
grp_snapshots = self._view_builder.detail_list(
|
||||
req, grpsnap_limited_list)
|
||||
else:
|
||||
cgsnapshots = self._view_builder.summary_list(
|
||||
req, cgsnap_limited_list)
|
||||
grp_snapshots = self._view_builder.summary_list(
|
||||
req, grpsnap_limited_list)
|
||||
|
||||
cgsnapshots['cgsnapshots'] = (cgsnapshots['cgsnapshots'] +
|
||||
grp_snapshots['cgsnapshots'])
|
||||
|
||||
return cgsnapshots
|
||||
return grp_snapshots
|
||||
|
||||
@wsgi.response(http_client.ACCEPTED)
|
||||
def create(self, req, body):
|
||||
@ -172,19 +136,10 @@ class CgsnapshotsController(wsgi.Controller):
|
||||
context=context)
|
||||
|
||||
try:
|
||||
if isinstance(group, cg_obj.ConsistencyGroup):
|
||||
new_cgsnapshot = self.cgsnapshot_api.create_cgsnapshot(
|
||||
context, group, name, description)
|
||||
elif isinstance(group, grp_obj.Group):
|
||||
new_cgsnapshot = self.group_snapshot_api.create_group_snapshot(
|
||||
context, group, name, description)
|
||||
else:
|
||||
msg = _("Group %s not found.") % group.id
|
||||
raise exc.HTTPNotFound(explanation=msg)
|
||||
new_cgsnapshot = self.group_snapshot_api.create_group_snapshot(
|
||||
context, group, name, description)
|
||||
# Not found exception will be handled at the wsgi level
|
||||
except (exception.InvalidCgSnapshot,
|
||||
exception.InvalidConsistencyGroup,
|
||||
exception.InvalidGroup,
|
||||
except (exception.InvalidGroup,
|
||||
exception.InvalidGroupSnapshot,
|
||||
exception.InvalidVolume) as error:
|
||||
raise exc.HTTPBadRequest(explanation=error.msg)
|
||||
|
@ -25,14 +25,10 @@ from cinder.api import common
|
||||
from cinder.api import extensions
|
||||
from cinder.api.openstack import wsgi
|
||||
from cinder.api.views import consistencygroups as consistencygroup_views
|
||||
from cinder import consistencygroup as consistencygroup_api
|
||||
from cinder.consistencygroup import api as consistencygroup_api
|
||||
from cinder import exception
|
||||
from cinder import group as group_api
|
||||
from cinder.i18n import _
|
||||
from cinder.objects import cgsnapshot as cgsnap_obj
|
||||
from cinder.objects import consistencygroup as cg_obj
|
||||
from cinder.objects import group as grp_obj
|
||||
from cinder.objects import group_snapshot as grpsnap_obj
|
||||
from cinder.volume import group_types
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
@ -44,7 +40,6 @@ class ConsistencyGroupsController(wsgi.Controller):
|
||||
_view_builder_class = consistencygroup_views.ViewBuilder
|
||||
|
||||
def __init__(self):
|
||||
self.consistencygroup_api = consistencygroup_api.API()
|
||||
self.group_api = group_api.API()
|
||||
super(ConsistencyGroupsController, self).__init__()
|
||||
|
||||
@ -81,14 +76,8 @@ class ConsistencyGroupsController(wsgi.Controller):
|
||||
|
||||
try:
|
||||
group = self._get(context, id)
|
||||
if isinstance(group, cg_obj.ConsistencyGroup):
|
||||
self.consistencygroup_api.delete(context, group, force)
|
||||
elif isinstance(group, grp_obj.Group):
|
||||
consistencygroup_api.api.check_policy(context, 'delete')
|
||||
self.group_api.delete(context, group, force)
|
||||
else:
|
||||
msg = _("Group '%s' not found.") % id
|
||||
raise exc.HTTPNotFound(explanation=msg)
|
||||
consistencygroup_api.check_policy(context, 'delete')
|
||||
self.group_api.delete(context, group, force)
|
||||
# Not found exception will be handled at the wsgi level
|
||||
except exception.InvalidConsistencyGroup as error:
|
||||
raise exc.HTTPBadRequest(explanation=error.msg)
|
||||
@ -105,25 +94,15 @@ class ConsistencyGroupsController(wsgi.Controller):
|
||||
|
||||
def _get(self, context, id):
|
||||
# Not found exception will be handled at the wsgi level
|
||||
try:
|
||||
consistencygroup = self.consistencygroup_api.get(
|
||||
context,
|
||||
group_id=id)
|
||||
except exception.ConsistencyGroupNotFound:
|
||||
consistencygroup = self.group_api.get(context, group_id=id)
|
||||
consistencygroup = self.group_api.get(context, group_id=id)
|
||||
|
||||
return consistencygroup
|
||||
|
||||
def _get_cgsnapshot(self, context, id):
|
||||
# Not found exception will be handled at the wsgi level
|
||||
try:
|
||||
cgsnapshot = self.consistencygroup_api.get_cgsnapshot(
|
||||
context,
|
||||
cgsnapshot_id=id)
|
||||
except exception.CgSnapshotNotFound:
|
||||
cgsnapshot = self.group_api.get_group_snapshot(
|
||||
context,
|
||||
group_snapshot_id=id)
|
||||
cgsnapshot = self.group_api.get_group_snapshot(
|
||||
context,
|
||||
group_snapshot_id=id)
|
||||
|
||||
return cgsnapshot
|
||||
|
||||
@ -134,31 +113,19 @@ class ConsistencyGroupsController(wsgi.Controller):
|
||||
|
||||
# make another copy of filters, since it is being modified in
|
||||
# consistencygroup_api while getting consistencygroups
|
||||
group_filters = req.params.copy()
|
||||
marker, limit, offset = common.get_pagination_params(filters)
|
||||
sort_keys, sort_dirs = common.get_sort_params(filters)
|
||||
|
||||
consistencygroups = self.consistencygroup_api.get_all(
|
||||
groups = self.group_api.get_all(
|
||||
context, filters=filters, marker=marker, limit=limit,
|
||||
offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs)
|
||||
|
||||
groups = self.group_api.get_all(
|
||||
context, filters=group_filters, marker=marker, limit=limit,
|
||||
offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs)
|
||||
|
||||
if is_detail:
|
||||
consistencygroups = self._view_builder.detail_list(
|
||||
req, consistencygroups)
|
||||
groups = self._view_builder.detail_list(req, groups)
|
||||
else:
|
||||
consistencygroups = self._view_builder.summary_list(
|
||||
req, consistencygroups)
|
||||
groups = self._view_builder.summary_list(req, groups)
|
||||
|
||||
consistencygroups['consistencygroups'] = (
|
||||
consistencygroups['consistencygroups'] +
|
||||
groups['consistencygroups'])
|
||||
return consistencygroups
|
||||
return groups
|
||||
|
||||
@wsgi.response(http_client.ACCEPTED)
|
||||
def create(self, req, body):
|
||||
@ -189,7 +156,7 @@ class ConsistencyGroupsController(wsgi.Controller):
|
||||
{'name': name})
|
||||
|
||||
try:
|
||||
consistencygroup_api.api.check_policy(context, 'create')
|
||||
consistencygroup_api.check_policy(context, 'create')
|
||||
new_consistencygroup = self.group_api.create(
|
||||
context, name, description, group_type['id'], volume_types,
|
||||
availability_zone=availability_zone)
|
||||
@ -245,28 +212,13 @@ class ConsistencyGroupsController(wsgi.Controller):
|
||||
{'name': name, 'source_cgid': source_cgid})
|
||||
|
||||
try:
|
||||
src_grp = None
|
||||
src_snap = None
|
||||
if source_cgid:
|
||||
src_grp = self._get(context, source_cgid)
|
||||
self._get(context, source_cgid)
|
||||
if cgsnapshot_id:
|
||||
src_snap = self._get_cgsnapshot(context, cgsnapshot_id)
|
||||
if (isinstance(src_grp, cg_obj.ConsistencyGroup) or
|
||||
isinstance(src_snap, cgsnap_obj.CGSnapshot)):
|
||||
new_group = self.consistencygroup_api.create_from_src(
|
||||
context, name, description, cgsnapshot_id, source_cgid)
|
||||
elif (isinstance(src_grp, grp_obj.Group) or
|
||||
isinstance(src_snap, grpsnap_obj.GroupSnapshot)):
|
||||
consistencygroup_api.api.check_policy(context, 'create')
|
||||
new_group = self.group_api.create_from_src(
|
||||
context, name, description, cgsnapshot_id, source_cgid)
|
||||
else:
|
||||
msg = (_("Source CGSnapshot %(cgsnap)s or source CG %(cg)s "
|
||||
"not found.") % {'cgsnap': cgsnapshot_id,
|
||||
'cg': source_cgid})
|
||||
raise exc.HTTPNotFound(explanation=msg)
|
||||
except exception.InvalidConsistencyGroup as error:
|
||||
raise exc.HTTPBadRequest(explanation=error.msg)
|
||||
self._get_cgsnapshot(context, cgsnapshot_id)
|
||||
consistencygroup_api.check_policy(context, 'create')
|
||||
new_group = self.group_api.create_from_src(
|
||||
context, name, description, cgsnapshot_id, source_cgid)
|
||||
except exception.NotFound:
|
||||
# Not found exception will be handled at the wsgi level
|
||||
raise
|
||||
@ -296,16 +248,8 @@ class ConsistencyGroupsController(wsgi.Controller):
|
||||
'remove_volumes': remove_volumes})
|
||||
|
||||
group = self._get(context, id)
|
||||
if isinstance(group, cg_obj.ConsistencyGroup):
|
||||
self.consistencygroup_api.update(context, group, name, description,
|
||||
add_volumes, remove_volumes,
|
||||
allow_empty)
|
||||
elif isinstance(group, grp_obj.Group):
|
||||
self.group_api.update(context, group, name, description,
|
||||
add_volumes, remove_volumes)
|
||||
else:
|
||||
msg = _("Group '%s' not found.") % id
|
||||
raise exc.HTTPNotFound(explanation=msg)
|
||||
self.group_api.update(context, group, name, description,
|
||||
add_volumes, remove_volumes)
|
||||
|
||||
def update(self, req, id, body):
|
||||
"""Update the consistency group.
|
||||
|
@ -26,7 +26,6 @@ from webob import exc
|
||||
from cinder.api import common
|
||||
from cinder.api.openstack import wsgi
|
||||
from cinder.api.v2.views import volumes as volume_views
|
||||
from cinder import consistencygroup as consistencygroupAPI
|
||||
from cinder import exception
|
||||
from cinder import group as group_api
|
||||
from cinder.i18n import _
|
||||
@ -48,7 +47,6 @@ class VolumeController(wsgi.Controller):
|
||||
|
||||
def __init__(self, ext_mgr):
|
||||
self.volume_api = cinder_volume.API()
|
||||
self.consistencygroup_api = consistencygroupAPI.API()
|
||||
self.group_api = group_api.API()
|
||||
self.ext_mgr = ext_mgr
|
||||
super(VolumeController, self).__init__()
|
||||
@ -237,18 +235,12 @@ class VolumeController(wsgi.Controller):
|
||||
else:
|
||||
kwargs['source_replica'] = None
|
||||
|
||||
kwargs['group'] = None
|
||||
kwargs['consistencygroup'] = None
|
||||
consistencygroup_id = volume.get('consistencygroup_id')
|
||||
if consistencygroup_id is not None:
|
||||
try:
|
||||
kwargs['consistencygroup'] = (
|
||||
self.consistencygroup_api.get(context,
|
||||
consistencygroup_id))
|
||||
except exception.ConsistencyGroupNotFound:
|
||||
# Not found exception will be handled at the wsgi level
|
||||
kwargs['group'] = self.group_api.get(
|
||||
context, consistencygroup_id)
|
||||
else:
|
||||
kwargs['consistencygroup'] = None
|
||||
# Not found exception will be handled at the wsgi level
|
||||
kwargs['group'] = self.group_api.get(context, consistencygroup_id)
|
||||
|
||||
size = volume.get('size', None)
|
||||
if size is None and kwargs['snapshot'] is not None:
|
||||
|
@ -23,7 +23,6 @@ from cinder.api import common
|
||||
from cinder.api.openstack import wsgi
|
||||
from cinder.api.v2 import volumes as volumes_v2
|
||||
from cinder.api.v3.views import volumes as volume_views_v3
|
||||
from cinder import exception
|
||||
from cinder import group as group_api
|
||||
from cinder.i18n import _
|
||||
from cinder import objects
|
||||
@ -235,26 +234,18 @@ class VolumeController(volumes_v2.VolumeController):
|
||||
else:
|
||||
kwargs['source_replica'] = None
|
||||
|
||||
kwargs['group'] = None
|
||||
kwargs['consistencygroup'] = None
|
||||
consistencygroup_id = volume.get('consistencygroup_id')
|
||||
if consistencygroup_id is not None:
|
||||
try:
|
||||
kwargs['consistencygroup'] = (
|
||||
self.consistencygroup_api.get(context,
|
||||
consistencygroup_id))
|
||||
except exception.ConsistencyGroupNotFound:
|
||||
# Not found exception will be handled at the wsgi level
|
||||
kwargs['group'] = self.group_api.get(
|
||||
context, consistencygroup_id)
|
||||
else:
|
||||
kwargs['consistencygroup'] = None
|
||||
# Not found exception will be handled at the wsgi level
|
||||
kwargs['group'] = self.group_api.get(context, consistencygroup_id)
|
||||
|
||||
# Get group_id if volume is in a group.
|
||||
group_id = volume.get('group_id')
|
||||
if group_id is not None:
|
||||
try:
|
||||
kwargs['group'] = self.group_api.get(context, group_id)
|
||||
except exception.GroupNotFound as error:
|
||||
raise exc.HTTPNotFound(explanation=error.msg)
|
||||
# Not found exception will be handled at the wsgi level
|
||||
kwargs['group'] = self.group_api.get(context, group_id)
|
||||
|
||||
size = volume.get('size', None)
|
||||
if size is None and kwargs['snapshot'] is not None:
|
||||
|
@ -1,27 +0,0 @@
|
||||
# Copyright (C) 2012 - 2014 EMC Corporation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# Importing full names to not pollute the namespace and cause possible
|
||||
# collisions with use of 'from cinder.transfer import <foo>' elsewhere.
|
||||
|
||||
from oslo_utils import importutils
|
||||
|
||||
from cinder.common import config
|
||||
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
API = importutils.import_class(
|
||||
CONF.consistencygroup_api_class)
|
@ -17,57 +17,7 @@
|
||||
Handles all requests relating to consistency groups.
|
||||
"""
|
||||
|
||||
|
||||
import functools
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import excutils
|
||||
from oslo_utils import timeutils
|
||||
|
||||
from cinder import db
|
||||
from cinder.db import base
|
||||
from cinder import exception
|
||||
from cinder.i18n import _
|
||||
from cinder import objects
|
||||
from cinder.objects import fields as c_fields
|
||||
import cinder.policy
|
||||
from cinder import quota
|
||||
from cinder import quota_utils
|
||||
from cinder.scheduler import rpcapi as scheduler_rpcapi
|
||||
from cinder.volume import api as volume_api
|
||||
from cinder.volume import rpcapi as volume_rpcapi
|
||||
from cinder.volume import utils as vol_utils
|
||||
from cinder.volume import volume_types
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
CGQUOTAS = quota.CGQUOTAS
|
||||
QUOTAS = quota.QUOTAS
|
||||
VALID_REMOVE_VOL_FROM_CG_STATUS = (
|
||||
'available',
|
||||
'in-use',
|
||||
'error',
|
||||
'error_deleting')
|
||||
VALID_ADD_VOL_TO_CG_STATUS = (
|
||||
'available',
|
||||
'in-use')
|
||||
|
||||
|
||||
def wrap_check_policy(func):
|
||||
"""Check policy corresponding to the wrapped methods prior to execution.
|
||||
|
||||
This decorator requires the first 3 args of the wrapped function
|
||||
to be (self, context, consistencygroup)
|
||||
"""
|
||||
@functools.wraps(func)
|
||||
def wrapped(self, context, target_obj, *args, **kwargs):
|
||||
check_policy(context, func.__name__, target_obj)
|
||||
return func(self, context, target_obj, *args, **kwargs)
|
||||
|
||||
return wrapped
|
||||
|
||||
|
||||
def check_policy(context, action, target_obj=None):
|
||||
@ -79,743 +29,3 @@ def check_policy(context, action, target_obj=None):
|
||||
target.update(target_obj)
|
||||
_action = 'consistencygroup:%s' % action
|
||||
cinder.policy.enforce(context, _action, target)
|
||||
|
||||
|
||||
class API(base.Base):
|
||||
"""API for interacting with the volume manager for consistency groups."""
|
||||
|
||||
def __init__(self, db_driver=None):
|
||||
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
|
||||
self.volume_rpcapi = volume_rpcapi.VolumeAPI()
|
||||
self.availability_zone_names = ()
|
||||
self.volume_api = volume_api.API()
|
||||
|
||||
super(API, self).__init__(db_driver)
|
||||
|
||||
def _valid_availability_zone(self, availability_zone):
|
||||
if availability_zone in self.availability_zone_names:
|
||||
return True
|
||||
if CONF.storage_availability_zone == availability_zone:
|
||||
return True
|
||||
azs = self.volume_api.list_availability_zones()
|
||||
self.availability_zone_names = [az['name'] for az in azs]
|
||||
return availability_zone in self.availability_zone_names
|
||||
|
||||
def _extract_availability_zone(self, availability_zone):
|
||||
if availability_zone is None:
|
||||
if CONF.default_availability_zone:
|
||||
availability_zone = CONF.default_availability_zone
|
||||
else:
|
||||
# For backwards compatibility use the storage_availability_zone
|
||||
availability_zone = CONF.storage_availability_zone
|
||||
|
||||
valid = self._valid_availability_zone(availability_zone)
|
||||
if not valid:
|
||||
msg = _("Availability zone '%s' is invalid.") % availability_zone
|
||||
LOG.warning(msg)
|
||||
raise exception.InvalidInput(reason=msg)
|
||||
|
||||
return availability_zone
|
||||
|
||||
def create(self, context, name, description,
|
||||
cg_volume_types, availability_zone=None):
|
||||
check_policy(context, 'create')
|
||||
|
||||
volume_type_list = cg_volume_types.split(',')
|
||||
|
||||
# NOTE: Admin context is required to get extra_specs of volume_types.
|
||||
req_volume_types = (self.db.volume_types_get_by_name_or_id(
|
||||
context.elevated(), volume_type_list))
|
||||
|
||||
req_volume_type_ids = ""
|
||||
for voltype in req_volume_types:
|
||||
req_volume_type_ids = (
|
||||
req_volume_type_ids + voltype.get('id') + ",")
|
||||
if len(req_volume_type_ids) == 0:
|
||||
req_volume_type_ids = None
|
||||
|
||||
availability_zone = self._extract_availability_zone(availability_zone)
|
||||
kwargs = {'user_id': context.user_id,
|
||||
'project_id': context.project_id,
|
||||
'availability_zone': availability_zone,
|
||||
'status': c_fields.ConsistencyGroupStatus.CREATING,
|
||||
'name': name,
|
||||
'description': description,
|
||||
'volume_type_id': req_volume_type_ids}
|
||||
group = None
|
||||
try:
|
||||
group = objects.ConsistencyGroup(context=context, **kwargs)
|
||||
group.create()
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error("Error occurred when creating consistency group "
|
||||
"%s.", name)
|
||||
|
||||
request_spec_list = []
|
||||
filter_properties_list = []
|
||||
for req_volume_type in req_volume_types:
|
||||
request_spec = {'volume_type': req_volume_type.copy(),
|
||||
'consistencygroup_id': group.id}
|
||||
filter_properties = {}
|
||||
request_spec_list.append(request_spec)
|
||||
filter_properties_list.append(filter_properties)
|
||||
|
||||
# Update quota for consistencygroups
|
||||
self.update_quota(context, group, 1)
|
||||
|
||||
self._cast_create_consistencygroup(context, group,
|
||||
request_spec_list,
|
||||
filter_properties_list)
|
||||
|
||||
return group
|
||||
|
||||
def create_from_src(self, context, name, description=None,
|
||||
cgsnapshot_id=None, source_cgid=None):
|
||||
check_policy(context, 'create')
|
||||
|
||||
kwargs = {
|
||||
'user_id': context.user_id,
|
||||
'project_id': context.project_id,
|
||||
'status': c_fields.ConsistencyGroupStatus.CREATING,
|
||||
'name': name,
|
||||
'description': description,
|
||||
'cgsnapshot_id': cgsnapshot_id,
|
||||
'source_cgid': source_cgid,
|
||||
}
|
||||
|
||||
group = None
|
||||
try:
|
||||
group = objects.ConsistencyGroup(context=context, **kwargs)
|
||||
group.create(cg_snap_id=cgsnapshot_id, cg_id=source_cgid)
|
||||
except exception.ConsistencyGroupNotFound:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error("Source CG %(source_cg)s not found when "
|
||||
"creating consistency group %(cg)s from "
|
||||
"source.",
|
||||
{'cg': name, 'source_cg': source_cgid})
|
||||
except exception.CgSnapshotNotFound:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error("CG snapshot %(cgsnap)s not found when creating "
|
||||
"consistency group %(cg)s from source.",
|
||||
{'cg': name, 'cgsnap': cgsnapshot_id})
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error("Error occurred when creating consistency group"
|
||||
" %(cg)s from cgsnapshot %(cgsnap)s.",
|
||||
{'cg': name, 'cgsnap': cgsnapshot_id})
|
||||
|
||||
# Update quota for consistencygroups
|
||||
self.update_quota(context, group, 1)
|
||||
|
||||
if not group.host:
|
||||
msg = _("No host to create consistency group %s.") % group.id
|
||||
LOG.error(msg)
|
||||
raise exception.InvalidConsistencyGroup(reason=msg)
|
||||
|
||||
group.assert_not_frozen()
|
||||
|
||||
if cgsnapshot_id:
|
||||
self._create_cg_from_cgsnapshot(context, group, cgsnapshot_id)
|
||||
elif source_cgid:
|
||||
self._create_cg_from_source_cg(context, group, source_cgid)
|
||||
|
||||
return group
|
||||
|
||||
def _create_cg_from_cgsnapshot(self, context, group, cgsnapshot_id):
|
||||
try:
|
||||
cgsnapshot = objects.CGSnapshot.get_by_id(context, cgsnapshot_id)
|
||||
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
|
||||
context, cgsnapshot.id)
|
||||
|
||||
if not snapshots:
|
||||
msg = _("Cgsnahost is empty. No consistency group "
|
||||
"will be created.")
|
||||
raise exception.InvalidConsistencyGroup(reason=msg)
|
||||
|
||||
try:
|
||||
values = {'volumes': len(snapshots)}
|
||||
QUOTAS.limit_check(context, project_id=context.project_id,
|
||||
**values)
|
||||
except exception.OverQuota as e:
|
||||
group.destroy()
|
||||
quotas = e.kwargs['quotas']
|
||||
raise exception.VolumeLimitExceeded(
|
||||
allowed=e.kwargs['overs'], limit=quotas['volumes'])
|
||||
|
||||
for snapshot in snapshots:
|
||||
kwargs = {}
|
||||
kwargs['availability_zone'] = group.availability_zone
|
||||
kwargs['cgsnapshot'] = cgsnapshot
|
||||
kwargs['consistencygroup'] = group
|
||||
kwargs['snapshot'] = snapshot
|
||||
volume_type_id = snapshot.volume_type_id
|
||||
if volume_type_id:
|
||||
kwargs['volume_type'] = (
|
||||
objects.VolumeType.get_by_name_or_id(
|
||||
context, volume_type_id))
|
||||
|
||||
# Since cgsnapshot is passed in, the following call will
|
||||
# create a db entry for the volume, but will not call the
|
||||
# volume manager to create a real volume in the backend yet.
|
||||
# If error happens, taskflow will handle rollback of quota
|
||||
# and removal of volume entry in the db.
|
||||
try:
|
||||
self.volume_api.create(context,
|
||||
snapshot.volume_size,
|
||||
None,
|
||||
None,
|
||||
**kwargs)
|
||||
except exception.CinderException:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error("Error occurred when creating volume "
|
||||
"entry from snapshot in the process of "
|
||||
"creating consistency group %(group)s "
|
||||
"from cgsnapshot %(cgsnap)s.",
|
||||
{'group': group.id,
|
||||
'cgsnap': cgsnapshot.id})
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
try:
|
||||
new_vols = self.db.volume_get_all_by_group(context,
|
||||
group.id)
|
||||
for vol in new_vols:
|
||||
self.volume_api.delete(context, vol, force=True)
|
||||
group.destroy()
|
||||
finally:
|
||||
LOG.error("Error occurred when creating consistency "
|
||||
"group %(group)s from cgsnapshot "
|
||||
"%(cgsnap)s.",
|
||||
{'group': group.id,
|
||||
'cgsnap': cgsnapshot.id})
|
||||
|
||||
volumes = self.db.volume_get_all_by_group(context,
|
||||
group.id)
|
||||
for vol in volumes:
|
||||
# Update the host field for the volume.
|
||||
self.db.volume_update(context, vol['id'],
|
||||
{'host': group.get('host')})
|
||||
|
||||
self.volume_rpcapi.create_consistencygroup_from_src(
|
||||
context, group, cgsnapshot)
|
||||
|
||||
def _create_cg_from_source_cg(self, context, group, source_cgid):
|
||||
try:
|
||||
source_cg = objects.ConsistencyGroup.get_by_id(context,
|
||||
source_cgid)
|
||||
source_vols = self.db.volume_get_all_by_group(context,
|
||||
source_cg.id)
|
||||
|
||||
if not source_vols:
|
||||
msg = _("Source CG is empty. No consistency group "
|
||||
"will be created.")
|
||||
raise exception.InvalidConsistencyGroup(reason=msg)
|
||||
|
||||
try:
|
||||
values = {'volumes': len(source_vols)}
|
||||
QUOTAS.limit_check(context, project_id=context.project_id,
|
||||
**values)
|
||||
except exception.OverQuota as e:
|
||||
group.destroy()
|
||||
quotas = e.kwargs['quotas']
|
||||
raise exception.VolumeLimitExceeded(
|
||||
allowed=e.kwargs['overs'], limit=quotas['volumes'])
|
||||
|
||||
for source_vol in source_vols:
|
||||
kwargs = {}
|
||||
kwargs['availability_zone'] = group.availability_zone
|
||||
kwargs['source_cg'] = source_cg
|
||||
kwargs['consistencygroup'] = group
|
||||
kwargs['source_volume'] = source_vol
|
||||
volume_type_id = source_vol.get('volume_type_id')
|
||||
if volume_type_id:
|
||||
kwargs['volume_type'] = (
|
||||
objects.VolumeType.get_by_name_or_id(
|
||||
context, volume_type_id))
|
||||
|
||||
# Since source_cg is passed in, the following call will
|
||||
# create a db entry for the volume, but will not call the
|
||||
# volume manager to create a real volume in the backend yet.
|
||||
# If error happens, taskflow will handle rollback of quota
|
||||
# and removal of volume entry in the db.
|
||||
try:
|
||||
self.volume_api.create(context,
|
||||
source_vol['size'],
|
||||
None,
|
||||
None,
|
||||
**kwargs)
|
||||
except exception.CinderException:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error("Error occurred when creating cloned "
|
||||
"volume in the process of creating "
|
||||
"consistency group %(group)s from "
|
||||
"source CG %(source_cg)s.",
|
||||
{'group': group.id,
|
||||
'source_cg': source_cg.id})
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
try:
|
||||
new_vols = self.db.volume_get_all_by_group(context,
|
||||
group.id)
|
||||
for vol in new_vols:
|
||||
self.volume_api.delete(context, vol, force=True)
|
||||
group.destroy()
|
||||
finally:
|
||||
LOG.error("Error occurred when creating consistency "
|
||||
"group %(group)s from source CG "
|
||||
"%(source_cg)s.",
|
||||
{'group': group.id,
|
||||
'source_cg': source_cg.id})
|
||||
|
||||
volumes = self.db.volume_get_all_by_group(context,
|
||||
group.id)
|
||||
for vol in volumes:
|
||||
# Update the host field for the volume.
|
||||
self.db.volume_update(context, vol['id'],
|
||||
{'host': group.host})
|
||||
|
||||
self.volume_rpcapi.create_consistencygroup_from_src(context, group,
|
||||
None, source_cg)
|
||||
|
||||
def _cast_create_consistencygroup(self, context, group,
|
||||
request_spec_list,
|
||||
filter_properties_list):
|
||||
|
||||
try:
|
||||
for request_spec in request_spec_list:
|
||||
volume_type = request_spec.get('volume_type', None)
|
||||
volume_type_id = None
|
||||
if volume_type:
|
||||
volume_type_id = volume_type.get('id', None)
|
||||
|
||||
specs = {}
|
||||
if volume_type_id:
|
||||
qos_specs = volume_types.get_volume_type_qos_specs(
|
||||
volume_type_id)
|
||||
specs = qos_specs['qos_specs']
|
||||
if not specs:
|
||||
# to make sure we don't pass empty dict
|
||||
specs = None
|
||||
|
||||
volume_properties = {
|
||||
'size': 0, # Need to populate size for the scheduler
|
||||
'user_id': context.user_id,
|
||||
'project_id': context.project_id,
|
||||
'status': 'creating',
|
||||
'attach_status': c_fields.VolumeAttachStatus.DETACHED,
|
||||
'encryption_key_id': request_spec.get('encryption_key_id',
|
||||
None),
|
||||
'display_description': request_spec.get('description',
|
||||
None),
|
||||
'display_name': request_spec.get('name', None),
|
||||
'volume_type_id': volume_type_id,
|
||||
}
|
||||
|
||||
request_spec['volume_properties'] = volume_properties
|
||||
request_spec['qos_specs'] = specs
|
||||
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
try:
|
||||
group.destroy()
|
||||
finally:
|
||||
LOG.error("Error occurred when building "
|
||||
"request spec list for consistency group "
|
||||
"%s.", group.id)
|
||||
|
||||
# Cast to the scheduler and let it handle whatever is needed
|
||||
# to select the target host for this group.
|
||||
self.scheduler_rpcapi.create_consistencygroup(
|
||||
context,
|
||||
group,
|
||||
request_spec_list=request_spec_list,
|
||||
filter_properties_list=filter_properties_list)
|
||||
|
||||
def update_quota(self, context, group, num, project_id=None):
|
||||
reserve_opts = {'consistencygroups': num}
|
||||
try:
|
||||
reservations = CGQUOTAS.reserve(context,
|
||||
project_id=project_id,
|
||||
**reserve_opts)
|
||||
if reservations:
|
||||
CGQUOTAS.commit(context, reservations)
|
||||
except Exception as e:
|
||||
with excutils.save_and_reraise_exception():
|
||||
try:
|
||||
group.destroy()
|
||||
if isinstance(e, exception.OverQuota):
|
||||
quota_utils.process_reserve_over_quota(
|
||||
context, e, resource='groups')
|
||||
finally:
|
||||
LOG.error("Failed to update quota for "
|
||||
"consistency group %s.", group.id)
|
||||
|
||||
@wrap_check_policy
|
||||
def delete(self, context, group, force=False):
|
||||
if not group.host:
|
||||
self.update_quota(context, group, -1, group.project_id)
|
||||
|
||||
LOG.debug("No host for consistency group %s. Deleting from "
|
||||
"the database.", group.id)
|
||||
group.destroy()
|
||||
|
||||
return
|
||||
|
||||
group.assert_not_frozen()
|
||||
|
||||
if force:
|
||||
expected = {}
|
||||
else:
|
||||
expected = {'status': (c_fields.ConsistencyGroupStatus.AVAILABLE,
|
||||
c_fields.ConsistencyGroupStatus.ERROR)}
|
||||
filters = [~db.cg_has_cgsnapshot_filter(),
|
||||
~db.cg_has_volumes_filter(attached_or_with_snapshots=force),
|
||||
~db.cg_creating_from_src(cg_id=group.id)]
|
||||
values = {'status': c_fields.ConsistencyGroupStatus.DELETING}
|
||||
|
||||
if not group.conditional_update(values, expected, filters):
|
||||
if force:
|
||||
reason = _('Consistency group must not have attached volumes, '
|
||||
'volumes with snapshots, or dependent cgsnapshots')
|
||||
else:
|
||||
reason = _('Consistency group status must be available or '
|
||||
'error and must not have volumes or dependent '
|
||||
'cgsnapshots')
|
||||
msg = (_('Cannot delete consistency group %(id)s. %(reason)s, and '
|
||||
'it cannot be the source for an ongoing CG or CG '
|
||||
'Snapshot creation.')
|
||||
% {'id': group.id, 'reason': reason})
|
||||
raise exception.InvalidConsistencyGroup(reason=msg)
|
||||
self.volume_rpcapi.delete_consistencygroup(context, group)
|
||||
|
||||
def _check_update(self, group, name, description, add_volumes,
|
||||
remove_volumes, allow_empty=False):
|
||||
if allow_empty:
|
||||
if (name is None and description is None
|
||||
and not add_volumes and not remove_volumes):
|
||||
msg = (_("Cannot update consistency group %(group_id)s "
|
||||
"because no valid name, description, add_volumes, "
|
||||
"or remove_volumes were provided.") %
|
||||
{'group_id': group.id})
|
||||
raise exception.InvalidConsistencyGroup(reason=msg)
|
||||
else:
|
||||
if not (name or description or add_volumes or remove_volumes):
|
||||
msg = (_("Cannot update consistency group %(group_id)s "
|
||||
"because no valid name, description, add_volumes, "
|
||||
"or remove_volumes were provided.") %
|
||||
{'group_id': group.id})
|
||||
raise exception.InvalidConsistencyGroup(reason=msg)
|
||||
|
||||
def update(self, context, group, name, description,
|
||||
add_volumes, remove_volumes, allow_empty=False):
|
||||
"""Update consistency group."""
|
||||
add_volumes_list = []
|
||||
remove_volumes_list = []
|
||||
if add_volumes:
|
||||
add_volumes = add_volumes.strip(',')
|
||||
add_volumes_list = add_volumes.split(',')
|
||||
if remove_volumes:
|
||||
remove_volumes = remove_volumes.strip(',')
|
||||
remove_volumes_list = remove_volumes.split(',')
|
||||
|
||||
invalid_uuids = []
|
||||
for uuid in add_volumes_list:
|
||||
if uuid in remove_volumes_list:
|
||||
invalid_uuids.append(uuid)
|
||||
if invalid_uuids:
|
||||
msg = _("UUIDs %s are in both add and remove volume "
|
||||
"list.") % invalid_uuids
|
||||
raise exception.InvalidVolume(reason=msg)
|
||||
|
||||
# Validate name.
|
||||
if name == group.name:
|
||||
name = None
|
||||
|
||||
# Validate description.
|
||||
if description == group.description:
|
||||
description = None
|
||||
self._check_update(group, name, description, add_volumes,
|
||||
remove_volumes, allow_empty)
|
||||
|
||||
fields = {'updated_at': timeutils.utcnow()}
|
||||
|
||||
# Update name and description in db now. No need to
|
||||
# to send them over through an RPC call.
|
||||
if allow_empty:
|
||||
if name is not None:
|
||||
fields['name'] = name
|
||||
if description is not None:
|
||||
fields['description'] = description
|
||||
else:
|
||||
if name:
|
||||
fields['name'] = name
|
||||
if description:
|
||||
fields['description'] = description
|
||||
|
||||
# NOTE(geguileo): We will use the updating status in the CG as a lock
|
||||
# mechanism to prevent volume add/remove races with other API, while we
|
||||
# figure out if we really need to add or remove volumes.
|
||||
if add_volumes or remove_volumes:
|
||||
fields['status'] = c_fields.ConsistencyGroupStatus.UPDATING
|
||||
|
||||
# We cannot modify the members of this CG if the CG is being used
|
||||
# to create another CG or a CGsnapshot is being created
|
||||
filters = [~db.cg_creating_from_src(cg_id=group.id),
|
||||
~db.cgsnapshot_creating_from_src()]
|
||||
else:
|
||||
filters = []
|
||||
|
||||
expected = {'status': c_fields.ConsistencyGroupStatus.AVAILABLE}
|
||||
if not group.conditional_update(fields, expected, filters):
|
||||
msg = _("Cannot update consistency group %s, status must be "
|
||||
"available, and it cannot be the source for an ongoing "
|
||||
"CG or CG Snapshot creation.") % group.id
|
||||
raise exception.InvalidConsistencyGroup(reason=msg)
|
||||
|
||||
# Now the CG is "locked" for updating
|
||||
try:
|
||||
# Validate volumes in add_volumes and remove_volumes.
|
||||
add_volumes_new = self._validate_add_volumes(
|
||||
context, group.volumes, add_volumes_list, group)
|
||||
remove_volumes_new = self._validate_remove_volumes(
|
||||
group.volumes, remove_volumes_list, group)
|
||||
|
||||
self._check_update(group, name, description, add_volumes_new,
|
||||
remove_volumes_new, allow_empty)
|
||||
except Exception:
|
||||
# If we have an error on the volume_lists we must return status to
|
||||
# available as we were doing before removing API races
|
||||
with excutils.save_and_reraise_exception():
|
||||
group.status = c_fields.ConsistencyGroupStatus.AVAILABLE
|
||||
group.save()
|
||||
|
||||
# Do an RPC call only if the update request includes
|
||||
# adding/removing volumes. add_volumes_new and remove_volumes_new
|
||||
# are strings of volume UUIDs separated by commas with no spaces
|
||||
# in between.
|
||||
if add_volumes_new or remove_volumes_new:
|
||||
self.volume_rpcapi.update_consistencygroup(
|
||||
context, group,
|
||||
add_volumes=add_volumes_new,
|
||||
remove_volumes=remove_volumes_new)
|
||||
# If there are no new volumes to add or remove and we had changed
|
||||
# the status to updating, turn it back to available
|
||||
elif group.status == c_fields.ConsistencyGroupStatus.UPDATING:
|
||||
group.status = c_fields.ConsistencyGroupStatus.AVAILABLE
|
||||
group.save()
|
||||
|
||||
def _validate_remove_volumes(self, volumes, remove_volumes_list, group):
|
||||
# Validate volumes in remove_volumes.
|
||||
if not remove_volumes_list:
|
||||
return None
|
||||
remove_volumes_new = ""
|
||||
for volume in volumes:
|
||||
if volume['id'] in remove_volumes_list:
|
||||
if volume['status'] not in VALID_REMOVE_VOL_FROM_CG_STATUS:
|
||||
msg = (_("Cannot remove volume %(volume_id)s from "
|
||||
"consistency group %(group_id)s because volume "
|
||||
"is in an invalid state: %(status)s. Valid "
|
||||
"states are: %(valid)s.") %
|
||||
{'volume_id': volume['id'],
|
||||
'group_id': group.id,
|
||||
'status': volume['status'],
|
||||
'valid': VALID_REMOVE_VOL_FROM_CG_STATUS})
|
||||
raise exception.InvalidVolume(reason=msg)
|
||||
# Volume currently in CG. It will be removed from CG.
|
||||
if remove_volumes_new:
|
||||
remove_volumes_new += ","
|
||||
remove_volumes_new += volume['id']
|
||||
|
||||
for rem_vol in remove_volumes_list:
|
||||
if rem_vol not in remove_volumes_new:
|
||||
msg = (_("Cannot remove volume %(volume_id)s from "
|
||||
"consistency group %(group_id)s because it "
|
||||
"is not in the group.") %
|
||||
{'volume_id': rem_vol,
|
||||
'group_id': group.id})
|
||||
raise exception.InvalidVolume(reason=msg)
|
||||
|
||||
return remove_volumes_new
|
||||
|
||||
def _validate_add_volumes(self, context, volumes, add_volumes_list, group):
|
||||
if not add_volumes_list:
|
||||
return None
|
||||
add_volumes_new = ""
|
||||
for volume in volumes:
|
||||
if volume['id'] in add_volumes_list:
|
||||
# Volume already in CG. Remove from add_volumes.
|
||||
add_volumes_list.remove(volume['id'])
|
||||
|
||||
for add_vol in add_volumes_list:
|
||||
try:
|
||||
add_vol_ref = self.db.volume_get(context, add_vol)
|
||||
except exception.VolumeNotFound:
|
||||
msg = (_("Cannot add volume %(volume_id)s to consistency "
|
||||
"group %(group_id)s because volume cannot be "
|
||||
"found.") %
|
||||
{'volume_id': add_vol,
|
||||
'group_id': group.id})
|
||||
raise exception.InvalidVolume(reason=msg)
|
||||
orig_group = add_vol_ref.get('consistencygroup_id', None)
|
||||
if orig_group:
|
||||
# If volume to be added is already in the group to be updated,
|
||||
# it should have been removed from the add_volumes_list in the
|
||||
# beginning of this function. If we are here, it means it is
|
||||
# in a different group.
|
||||
msg = (_("Cannot add volume %(volume_id)s to consistency "
|
||||
"group %(group_id)s because it is already in "
|
||||
"consistency group %(orig_group)s.") %
|
||||
{'volume_id': add_vol_ref['id'],
|
||||
'group_id': group.id,
|
||||
'orig_group': orig_group})
|
||||
raise exception.InvalidVolume(reason=msg)
|
||||
if add_vol_ref:
|
||||
add_vol_type_id = add_vol_ref.get('volume_type_id', None)
|
||||
if not add_vol_type_id:
|
||||
msg = (_("Cannot add volume %(volume_id)s to consistency "
|
||||
"group %(group_id)s because it has no volume "
|
||||
"type.") %
|
||||
{'volume_id': add_vol_ref['id'],
|
||||
'group_id': group.id})
|
||||
raise exception.InvalidVolume(reason=msg)
|
||||
if add_vol_type_id not in group.volume_type_id:
|
||||
msg = (_("Cannot add volume %(volume_id)s to consistency "
|
||||
"group %(group_id)s because volume type "
|
||||
"%(volume_type)s is not supported by the "
|
||||
"group.") %
|
||||
{'volume_id': add_vol_ref['id'],
|
||||
'group_id': group.id,
|
||||
'volume_type': add_vol_type_id})
|
||||
raise exception.InvalidVolume(reason=msg)
|
||||
if (add_vol_ref['status'] not in
|
||||
VALID_ADD_VOL_TO_CG_STATUS):
|
||||
msg = (_("Cannot add volume %(volume_id)s to consistency "
|
||||
"group %(group_id)s because volume is in an "
|
||||
"invalid state: %(status)s. Valid states are: "
|
||||
"%(valid)s.") %
|
||||
{'volume_id': add_vol_ref['id'],
|
||||
'group_id': group.id,
|
||||
'status': add_vol_ref['status'],
|
||||
'valid': VALID_ADD_VOL_TO_CG_STATUS})
|
||||
raise exception.InvalidVolume(reason=msg)
|
||||
|
||||
# group.host and add_vol_ref['host'] are in this format:
|
||||
# 'host@backend#pool'. Extract host (host@backend) before
|
||||
# doing comparison.
|
||||
vol_host = vol_utils.extract_host(add_vol_ref['host'])
|
||||
group_host = vol_utils.extract_host(group.host)
|
||||
if group_host != vol_host:
|
||||
raise exception.InvalidVolume(
|
||||
reason=_("Volume is not local to this node."))
|
||||
|
||||
# Volume exists. It will be added to CG.
|
||||
if add_volumes_new:
|
||||
add_volumes_new += ","
|
||||
add_volumes_new += add_vol_ref['id']
|
||||
|
||||
else:
|
||||
msg = (_("Cannot add volume %(volume_id)s to consistency "
|
||||
"group %(group_id)s because volume does not exist.") %
|
||||
{'volume_id': add_vol_ref['id'],
|
||||
'group_id': group.id})
|
||||
raise exception.InvalidVolume(reason=msg)
|
||||
|
||||
return add_volumes_new
|
||||
|
||||
def get(self, context, group_id):
|
||||
group = objects.ConsistencyGroup.get_by_id(context, group_id)
|
||||
check_policy(context, 'get', group)
|
||||
return group
|
||||
|
||||
def get_all(self, context, filters=None, marker=None, limit=None,
|
||||
offset=None, sort_keys=None, sort_dirs=None):
|
||||
check_policy(context, 'get_all')
|
||||
if filters is None:
|
||||
filters = {}
|
||||
|
||||
if filters:
|
||||
LOG.debug("Searching by: %s", filters)
|
||||
|
||||
if (context.is_admin and 'all_tenants' in filters):
|
||||
del filters['all_tenants']
|
||||
groups = objects.ConsistencyGroupList.get_all(
|
||||
context, filters=filters, marker=marker, limit=limit,
|
||||
offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs)
|
||||
else:
|
||||
groups = objects.ConsistencyGroupList.get_all_by_project(
|
||||
context, context.project_id, filters=filters, marker=marker,
|
||||
limit=limit, offset=offset, sort_keys=sort_keys,
|
||||
sort_dirs=sort_dirs)
|
||||
return groups
|
||||
|
||||
def create_cgsnapshot(self, context, group, name, description):
|
||||
group.assert_not_frozen()
|
||||
options = {'consistencygroup_id': group.id,
|
||||
'user_id': context.user_id,
|
||||
'project_id': context.project_id,
|
||||
'status': "creating",
|
||||
'name': name,
|
||||
'description': description}
|
||||
|
||||
cgsnapshot = None
|
||||
cgsnapshot_id = None
|
||||
try:
|
||||
cgsnapshot = objects.CGSnapshot(context, **options)
|
||||
cgsnapshot.create()
|
||||
cgsnapshot_id = cgsnapshot.id
|
||||
|
||||
snap_name = cgsnapshot.name
|
||||
snap_desc = cgsnapshot.description
|
||||
with group.obj_as_admin():
|
||||
self.volume_api.create_snapshots_in_db(
|
||||
context, group.volumes, snap_name, snap_desc,
|
||||
cgsnapshot_id)
|
||||
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
try:
|
||||
# If the cgsnapshot has been created
|
||||
if cgsnapshot.obj_attr_is_set('id'):
|
||||
cgsnapshot.destroy()
|
||||
finally:
|
||||
LOG.error("Error occurred when creating cgsnapshot"
|
||||
" %s.", cgsnapshot_id)
|
||||
|
||||
self.volume_rpcapi.create_cgsnapshot(context, cgsnapshot)
|
||||
|
||||
return cgsnapshot
|
||||
|
||||
def delete_cgsnapshot(self, context, cgsnapshot, force=False):
|
||||
cgsnapshot.assert_not_frozen()
|
||||
values = {'status': 'deleting'}
|
||||
expected = {'status': ('available', 'error')}
|
||||
filters = [~db.cg_creating_from_src(cgsnapshot_id=cgsnapshot.id)]
|
||||
res = cgsnapshot.conditional_update(values, expected, filters)
|
||||
|
||||
if not res:
|
||||
msg = _('CgSnapshot status must be available or error, and no CG '
|
||||
'can be currently using it as source for its creation.')
|
||||
raise exception.InvalidCgSnapshot(reason=msg)
|
||||
self.volume_rpcapi.delete_cgsnapshot(context.elevated(), cgsnapshot)
|
||||
|
||||
def update_cgsnapshot(self, context, cgsnapshot, fields):
|
||||
cgsnapshot.update(fields)
|
||||
cgsnapshot.save()
|
||||
|
||||
def get_cgsnapshot(self, context, cgsnapshot_id):
|
||||
check_policy(context, 'get_cgsnapshot')
|
||||
cgsnapshots = objects.CGSnapshot.get_by_id(context, cgsnapshot_id)
|
||||
return cgsnapshots
|
||||
|
||||
def get_all_cgsnapshots(self, context, search_opts=None):
|
||||
check_policy(context, 'get_all_cgsnapshots')
|
||||
|
||||
search_opts = search_opts or {}
|
||||
|
||||
if context.is_admin and 'all_tenants' in search_opts:
|
||||
# Need to remove all_tenants to pass the filtering below.
|
||||
del search_opts['all_tenants']
|
||||
cgsnapshots = objects.CGSnapshotList.get_all(context, search_opts)
|
||||
else:
|
||||
cgsnapshots = objects.CGSnapshotList.get_all_by_project(
|
||||
context.elevated(), context.project_id, search_opts)
|
||||
return cgsnapshots
|
||||
|
@ -57,17 +57,6 @@ def volume_update_db(context, volume_id, host, cluster_name):
|
||||
return volume
|
||||
|
||||
|
||||
def group_update_db(context, group, host, cluster_name):
|
||||
"""Set the host and the scheduled_at field of a consistencygroup.
|
||||
|
||||
:returns: A Consistencygroup with the updated fields set properly.
|
||||
"""
|
||||
group.update({'host': host, 'updated_at': timeutils.utcnow(),
|
||||
'cluster_name': cluster_name})
|
||||
group.save()
|
||||
return group
|
||||
|
||||
|
||||
def generic_group_update_db(context, group, host, cluster_name):
|
||||
"""Set the host and the scheduled_at field of a group.
|
||||
|
||||
@ -141,13 +130,6 @@ class Scheduler(object):
|
||||
"""Must override schedule method for scheduler to work."""
|
||||
raise NotImplementedError(_("Must implement schedule_create_volume"))
|
||||
|
||||
def schedule_create_consistencygroup(self, context, group,
|
||||
request_spec_list,
|
||||
filter_properties_list):
|
||||
"""Must override schedule method for scheduler to work."""
|
||||
raise NotImplementedError(_(
|
||||
"Must implement schedule_create_consistencygroup"))
|
||||
|
||||
def schedule_create_group(self, context, group,
|
||||
group_spec,
|
||||
request_spec_list,
|
||||
|
@ -62,25 +62,6 @@ class FilterScheduler(driver.Scheduler):
|
||||
filter_properties['metadata'] = vol.get('metadata')
|
||||
filter_properties['qos_specs'] = vol.get('qos_specs')
|
||||
|
||||
def schedule_create_consistencygroup(self, context, group,
|
||||
request_spec_list,
|
||||
filter_properties_list):
|
||||
|
||||
weighed_backend = self._schedule_group(
|
||||
context,
|
||||
request_spec_list,
|
||||
filter_properties_list)
|
||||
|
||||
if not weighed_backend:
|
||||
raise exception.NoValidBackend(reason=_("No weighed backends "
|
||||
"available"))
|
||||
|
||||
backend = weighed_backend.obj
|
||||
updated_group = driver.group_update_db(context, group, backend.host,
|
||||
backend.cluster_name)
|
||||
|
||||
self.volume_rpcapi.create_consistencygroup(context, updated_group)
|
||||
|
||||
def schedule_create_group(self, context, group,
|
||||
group_spec,
|
||||
request_spec_list,
|
||||
@ -350,98 +331,6 @@ class FilterScheduler(driver.Scheduler):
|
||||
backends, filter_properties)
|
||||
return weighed_backends
|
||||
|
||||
def _get_weighted_candidates_group(self, context, request_spec_list,
|
||||
filter_properties_list=None):
|
||||
"""Finds hosts that supports the consistencygroup.
|
||||
|
||||
Returns a list of hosts that meet the required specs,
|
||||
ordered by their fitness.
|
||||
"""
|
||||
elevated = context.elevated()
|
||||
|
||||
weighed_backends = []
|
||||
index = 0
|
||||
for request_spec in request_spec_list:
|
||||
volume_properties = request_spec['volume_properties']
|
||||
# Since Cinder is using mixed filters from Oslo and it's own, which
|
||||
# takes 'resource_XX' and 'volume_XX' as input respectively,
|
||||
# copying 'volume_XX' to 'resource_XX' will make both filters
|
||||
# happy.
|
||||
resource_properties = volume_properties.copy()
|
||||
volume_type = request_spec.get("volume_type", None)
|
||||
resource_type = request_spec.get("volume_type", None)
|
||||
request_spec.update({'resource_properties': resource_properties})
|
||||
|
||||
config_options = self._get_configuration_options()
|
||||
|
||||
filter_properties = {}
|
||||
if filter_properties_list:
|
||||
filter_properties = filter_properties_list[index]
|
||||
if filter_properties is None:
|
||||
filter_properties = {}
|
||||
self._populate_retry(filter_properties, resource_properties)
|
||||
|
||||
# Add consistencygroup_support in extra_specs if it is not there.
|
||||
# Make sure it is populated in filter_properties
|
||||
if 'consistencygroup_support' not in resource_type.get(
|
||||
'extra_specs', {}):
|
||||
resource_type['extra_specs'].update(
|
||||
consistencygroup_support='<is> True')
|
||||
|
||||
filter_properties.update({'context': context,
|
||||
'request_spec': request_spec,
|
||||
'config_options': config_options,
|
||||
'volume_type': volume_type,
|
||||
'resource_type': resource_type})
|
||||
|
||||
self.populate_filter_properties(request_spec,
|
||||
filter_properties)
|
||||
|
||||
# Find our local list of acceptable backends by filtering and
|
||||
# weighing our options. we virtually consume resources on
|
||||
# it so subsequent selections can adjust accordingly.
|
||||
|
||||
# Note: remember, we are using an iterator here. So only
|
||||
# traverse this list once.
|
||||
all_backends = self.host_manager.get_all_backend_states(elevated)
|
||||
if not all_backends:
|
||||
return []
|
||||
|
||||
# Filter local backends based on requirements ...
|
||||
backends = self.host_manager.get_filtered_backends(
|
||||
all_backends, filter_properties)
|
||||
|
||||
if not backends:
|
||||
return []
|
||||
|
||||
LOG.debug("Filtered %s", backends)
|
||||
|
||||
# weighted_host = WeightedHost() ... the best
|
||||
# host for the job.
|
||||
temp_weighed_backends = self.host_manager.get_weighed_backends(
|
||||
backends,
|
||||
filter_properties)
|
||||
if not temp_weighed_backends:
|
||||
return []
|
||||
if index == 0:
|
||||
weighed_backends = temp_weighed_backends
|
||||
else:
|
||||
new_weighed_backends = []
|
||||
for backend1 in weighed_backends:
|
||||
for backend2 in temp_weighed_backends:
|
||||
# Should schedule creation of CG on backend level,
|
||||
# not pool level.
|
||||
if (utils.extract_host(backend1.obj.backend_id) ==
|
||||
utils.extract_host(backend2.obj.backend_id)):
|
||||
new_weighed_backends.append(backend1)
|
||||
weighed_backends = new_weighed_backends
|
||||
if not weighed_backends:
|
||||
return []
|
||||
|
||||
index += 1
|
||||
|
||||
return weighed_backends
|
||||
|
||||
def _get_weighted_candidates_generic_group(
|
||||
self, context, group_spec, request_spec_list,
|
||||
group_filter_properties=None,
|
||||
@ -618,11 +507,8 @@ class FilterScheduler(driver.Scheduler):
|
||||
weighed_backends = self._get_weighted_candidates(context, request_spec,
|
||||
filter_properties)
|
||||
# When we get the weighed_backends, we clear those backends that don't
|
||||
# match the consistencygroup's backend.
|
||||
if request_spec.get('CG_backend'):
|
||||
group_backend = request_spec.get('CG_backend')
|
||||
else:
|
||||
group_backend = request_spec.get('group_backend')
|
||||
# match the group's backend.
|
||||
group_backend = request_spec.get('group_backend')
|
||||
if weighed_backends and group_backend:
|
||||
# Get host name including host@backend#pool info from
|
||||
# weighed_backends.
|
||||
@ -637,17 +523,6 @@ class FilterScheduler(driver.Scheduler):
|
||||
return None
|
||||
return self._choose_top_backend(weighed_backends, request_spec)
|
||||
|
||||
def _schedule_group(self, context, request_spec_list,
|
||||
filter_properties_list=None):
|
||||
weighed_backends = self._get_weighted_candidates_group(
|
||||
context,
|
||||
request_spec_list,
|
||||
filter_properties_list)
|
||||
if not weighed_backends:
|
||||
return None
|
||||
return self._choose_top_backend_group(weighed_backends,
|
||||
request_spec_list)
|
||||
|
||||
def _schedule_generic_group(self, context, group_spec, request_spec_list,
|
||||
group_filter_properties=None,
|
||||
filter_properties_list=None):
|
||||
@ -669,12 +544,6 @@ class FilterScheduler(driver.Scheduler):
|
||||
backend_state.consume_from_volume(volume_properties)
|
||||
return top_backend
|
||||
|
||||
def _choose_top_backend_group(self, weighed_backends, request_spec_list):
|
||||
top_backend = weighed_backends[0]
|
||||
backend_state = top_backend.obj
|
||||
LOG.debug("Choosing %s", backend_state.backend_id)
|
||||
return top_backend
|
||||
|
||||
def _choose_top_backend_generic_group(self, weighed_backends):
|
||||
top_backend = weighed_backends[0]
|
||||
backend_state = top_backend.obj
|
||||
|
@ -132,28 +132,6 @@ class SchedulerManager(manager.CleanableManager, manager.Manager):
|
||||
while self._startup_delay and not self.driver.is_ready():
|
||||
eventlet.sleep(1)
|
||||
|
||||
def create_consistencygroup(self, context, group, request_spec_list=None,
|
||||
filter_properties_list=None):
|
||||
self._wait_for_scheduler()
|
||||
try:
|
||||
self.driver.schedule_create_consistencygroup(
|
||||
context, group,
|
||||
request_spec_list,
|
||||
filter_properties_list)
|
||||
except exception.NoValidBackend:
|
||||
LOG.error("Could not find a backend for consistency group "
|
||||
"%(group_id)s.",
|
||||
{'group_id': group.id})
|
||||
group.status = 'error'
|
||||
group.save()
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception("Failed to create consistency group "
|
||||
"%(group_id)s.",
|
||||
{'group_id': group.id})
|
||||
group.status = 'error'
|
||||
group.save()
|
||||
|
||||
def create_group(self, context, group, group_spec=None,
|
||||
group_filter_properties=None, request_spec_list=None,
|
||||
filter_properties_list=None):
|
||||
|
@ -66,25 +66,14 @@ class SchedulerAPI(rpc.RPCAPI):
|
||||
capabilities.
|
||||
3.4 - Adds work_cleanup and do_cleanup methods.
|
||||
3.5 - Make notify_service_capabilities support A/A
|
||||
3.6 - Removed create_consistencygroup method
|
||||
"""
|
||||
|
||||
RPC_API_VERSION = '3.5'
|
||||
RPC_API_VERSION = '3.6'
|
||||
RPC_DEFAULT_VERSION = '3.0'
|
||||
TOPIC = constants.SCHEDULER_TOPIC
|
||||
BINARY = 'cinder-scheduler'
|
||||
|
||||
def create_consistencygroup(self, ctxt, group, request_spec_list=None,
|
||||
filter_properties_list=None):
|
||||
cctxt = self._get_cctxt()
|
||||
request_spec_p_list = [jsonutils.to_primitive(rs)
|
||||
for rs in request_spec_list]
|
||||
msg_args = {
|
||||
'group': group, 'request_spec_list': request_spec_p_list,
|
||||
'filter_properties_list': filter_properties_list,
|
||||
}
|
||||
|
||||
cctxt.cast(ctxt, 'create_consistencygroup', **msg_args)
|
||||
|
||||
def create_group(self, ctxt, group, group_spec=None,
|
||||
request_spec_list=None, group_filter_properties=None,
|
||||
filter_properties_list=None):
|
||||
|
@ -22,10 +22,10 @@ from oslo_serialization import jsonutils
|
||||
from six.moves import http_client
|
||||
import webob
|
||||
|
||||
from cinder.consistencygroup import api as consistencygroupAPI
|
||||
from cinder import context
|
||||
from cinder import db
|
||||
from cinder import exception
|
||||
from cinder.group import api as groupAPI
|
||||
from cinder import objects
|
||||
from cinder import test
|
||||
from cinder.tests.unit.api import fakes
|
||||
@ -48,12 +48,25 @@ class CgsnapshotsAPITestCase(test.TestCase):
|
||||
fake.USER_ID, fake.PROJECT_ID, auth_token=True)
|
||||
|
||||
def test_show_cgsnapshot(self):
|
||||
consistencygroup = utils.create_consistencygroup(self.context)
|
||||
vol_type = utils.create_volume_type(context.get_admin_context(),
|
||||
self, name='my_vol_type')
|
||||
consistencygroup = utils.create_group(
|
||||
self.context,
|
||||
group_type_id=fake.GROUP_TYPE_ID,
|
||||
volume_type_ids=[vol_type['id']])
|
||||
volume_id = utils.create_volume(self.context,
|
||||
consistencygroup_id=
|
||||
volume_type_id=vol_type['id'],
|
||||
group_id=
|
||||
consistencygroup.id)['id']
|
||||
cgsnapshot = utils.create_cgsnapshot(
|
||||
self.context, consistencygroup_id=consistencygroup.id)
|
||||
cgsnapshot = utils.create_group_snapshot(
|
||||
self.context, group_id=consistencygroup.id,
|
||||
group_type_id=fake.GROUP_TYPE_ID,)
|
||||
snapshot_id = utils.create_snapshot(
|
||||
self.context,
|
||||
volume_type_id=vol_type['id'],
|
||||
volume_id=volume_id,
|
||||
group_snapshot_id=cgsnapshot.id)['id']
|
||||
|
||||
req = webob.Request.blank('/v2/%s/cgsnapshots/%s' % (
|
||||
fake.PROJECT_ID, cgsnapshot.id))
|
||||
req.method = 'GET'
|
||||
@ -63,16 +76,16 @@ class CgsnapshotsAPITestCase(test.TestCase):
|
||||
res_dict = jsonutils.loads(res.body)
|
||||
|
||||
self.assertEqual(http_client.OK, res.status_int)
|
||||
self.assertEqual('this is a test cgsnapshot',
|
||||
self.assertEqual('this is a test group snapshot',
|
||||
res_dict['cgsnapshot']['description'])
|
||||
|
||||
self.assertEqual('test_cgsnapshot',
|
||||
self.assertEqual('test_group_snapshot',
|
||||
res_dict['cgsnapshot']['name'])
|
||||
self.assertEqual('creating', res_dict['cgsnapshot']['status'])
|
||||
|
||||
db.snapshot_destroy(context.get_admin_context(), snapshot_id)
|
||||
cgsnapshot.destroy()
|
||||
db.volume_destroy(context.get_admin_context(),
|
||||
volume_id)
|
||||
db.volume_destroy(context.get_admin_context(), volume_id)
|
||||
consistencygroup.destroy()
|
||||
|
||||
def test_show_cgsnapshot_with_cgsnapshot_NotFound(self):
|
||||
@ -92,16 +105,25 @@ class CgsnapshotsAPITestCase(test.TestCase):
|
||||
res_dict['itemNotFound']['message'])
|
||||
|
||||
def test_list_cgsnapshots_json(self):
|
||||
consistencygroup = utils.create_consistencygroup(self.context)
|
||||
vol_type = utils.create_volume_type(context.get_admin_context(),
|
||||
self, name='my_vol_type')
|
||||
consistencygroup = utils.create_group(
|
||||
self.context,
|
||||
group_type_id=fake.GROUP_TYPE_ID,
|
||||
volume_type_ids=[vol_type['id']])
|
||||
volume_id = utils.create_volume(self.context,
|
||||
consistencygroup_id=
|
||||
volume_type_id=vol_type['id'],
|
||||
group_id=
|
||||
consistencygroup.id)['id']
|
||||
cgsnapshot1 = utils.create_cgsnapshot(
|
||||
self.context, consistencygroup_id=consistencygroup.id)
|
||||
cgsnapshot2 = utils.create_cgsnapshot(
|
||||
self.context, consistencygroup_id=consistencygroup.id)
|
||||
cgsnapshot3 = utils.create_cgsnapshot(
|
||||
self.context, consistencygroup_id=consistencygroup.id)
|
||||
cgsnapshot1 = utils.create_group_snapshot(
|
||||
self.context, group_id=consistencygroup.id,
|
||||
group_type_id=fake.GROUP_TYPE_ID,)
|
||||
cgsnapshot2 = utils.create_group_snapshot(
|
||||
self.context, group_id=consistencygroup.id,
|
||||
group_type_id=fake.GROUP_TYPE_ID,)
|
||||
cgsnapshot3 = utils.create_group_snapshot(
|
||||
self.context, group_id=consistencygroup.id,
|
||||
group_type_id=fake.GROUP_TYPE_ID,)
|
||||
|
||||
req = webob.Request.blank('/v2/%s/cgsnapshots' % fake.PROJECT_ID)
|
||||
req.method = 'GET'
|
||||
@ -111,37 +133,45 @@ class CgsnapshotsAPITestCase(test.TestCase):
|
||||
res_dict = jsonutils.loads(res.body)
|
||||
|
||||
self.assertEqual(http_client.OK, res.status_int)
|
||||
self.assertEqual(cgsnapshot1.id,
|
||||
self.assertEqual(cgsnapshot3.id,
|
||||
res_dict['cgsnapshots'][0]['id'])
|
||||
self.assertEqual('test_cgsnapshot',
|
||||
self.assertEqual('test_group_snapshot',
|
||||
res_dict['cgsnapshots'][0]['name'])
|
||||
self.assertEqual(cgsnapshot2.id,
|
||||
res_dict['cgsnapshots'][1]['id'])
|
||||
self.assertEqual('test_cgsnapshot',
|
||||
self.assertEqual('test_group_snapshot',
|
||||
res_dict['cgsnapshots'][1]['name'])
|
||||
self.assertEqual(cgsnapshot3.id,
|
||||
self.assertEqual(cgsnapshot1.id,
|
||||
res_dict['cgsnapshots'][2]['id'])
|
||||
self.assertEqual('test_cgsnapshot',
|
||||
self.assertEqual('test_group_snapshot',
|
||||
res_dict['cgsnapshots'][2]['name'])
|
||||
|
||||
cgsnapshot3.destroy()
|
||||
cgsnapshot2.destroy()
|
||||
cgsnapshot1.destroy()
|
||||
db.volume_destroy(context.get_admin_context(),
|
||||
volume_id)
|
||||
db.volume_destroy(context.get_admin_context(), volume_id)
|
||||
consistencygroup.destroy()
|
||||
|
||||
def test_list_cgsnapshots_detail_json(self):
|
||||
consistencygroup = utils.create_consistencygroup(self.context)
|
||||
vol_type = utils.create_volume_type(context.get_admin_context(),
|
||||
self, name='my_vol_type')
|
||||
consistencygroup = utils.create_group(
|
||||
self.context,
|
||||
group_type_id=fake.GROUP_TYPE_ID,
|
||||
volume_type_ids=[vol_type['id']])
|
||||
volume_id = utils.create_volume(self.context,
|
||||
consistencygroup_id=
|
||||
volume_type_id=vol_type['id'],
|
||||
group_id=
|
||||
consistencygroup.id)['id']
|
||||
cgsnapshot1 = utils.create_cgsnapshot(
|
||||
self.context, consistencygroup_id=consistencygroup.id)
|
||||
cgsnapshot2 = utils.create_cgsnapshot(
|
||||
self.context, consistencygroup_id=consistencygroup.id)
|
||||
cgsnapshot3 = utils.create_cgsnapshot(
|
||||
self.context, consistencygroup_id=consistencygroup.id)
|
||||
cgsnapshot1 = utils.create_group_snapshot(
|
||||
self.context, group_id=consistencygroup.id,
|
||||
group_type_id=fake.GROUP_TYPE_ID,)
|
||||
cgsnapshot2 = utils.create_group_snapshot(
|
||||
self.context, group_id=consistencygroup.id,
|
||||
group_type_id=fake.GROUP_TYPE_ID,)
|
||||
cgsnapshot3 = utils.create_group_snapshot(
|
||||
self.context, group_id=consistencygroup.id,
|
||||
group_type_id=fake.GROUP_TYPE_ID,)
|
||||
|
||||
req = webob.Request.blank('/v2/%s/cgsnapshots/detail' %
|
||||
fake.PROJECT_ID)
|
||||
@ -153,29 +183,29 @@ class CgsnapshotsAPITestCase(test.TestCase):
|
||||
res_dict = jsonutils.loads(res.body)
|
||||
|
||||
self.assertEqual(http_client.OK, res.status_int)
|
||||
self.assertEqual('this is a test cgsnapshot',
|
||||
self.assertEqual('this is a test group snapshot',
|
||||
res_dict['cgsnapshots'][0]['description'])
|
||||
self.assertEqual('test_cgsnapshot',
|
||||
self.assertEqual('test_group_snapshot',
|
||||
res_dict['cgsnapshots'][0]['name'])
|
||||
self.assertEqual(cgsnapshot1.id,
|
||||
self.assertEqual(cgsnapshot3.id,
|
||||
res_dict['cgsnapshots'][0]['id'])
|
||||
self.assertEqual('creating',
|
||||
res_dict['cgsnapshots'][0]['status'])
|
||||
|
||||
self.assertEqual('this is a test cgsnapshot',
|
||||
self.assertEqual('this is a test group snapshot',
|
||||
res_dict['cgsnapshots'][1]['description'])
|
||||
self.assertEqual('test_cgsnapshot',
|
||||
self.assertEqual('test_group_snapshot',
|
||||
res_dict['cgsnapshots'][1]['name'])
|
||||
self.assertEqual(cgsnapshot2.id,
|
||||
res_dict['cgsnapshots'][1]['id'])
|
||||
self.assertEqual('creating',
|
||||
res_dict['cgsnapshots'][1]['status'])
|
||||
|
||||
self.assertEqual('this is a test cgsnapshot',
|
||||
self.assertEqual('this is a test group snapshot',
|
||||
res_dict['cgsnapshots'][2]['description'])
|
||||
self.assertEqual('test_cgsnapshot',
|
||||
self.assertEqual('test_group_snapshot',
|
||||
res_dict['cgsnapshots'][2]['name'])
|
||||
self.assertEqual(cgsnapshot3.id,
|
||||
self.assertEqual(cgsnapshot1.id,
|
||||
res_dict['cgsnapshots'][2]['id'])
|
||||
self.assertEqual('creating',
|
||||
res_dict['cgsnapshots'][2]['status'])
|
||||
@ -183,16 +213,22 @@ class CgsnapshotsAPITestCase(test.TestCase):
|
||||
cgsnapshot3.destroy()
|
||||
cgsnapshot2.destroy()
|
||||
cgsnapshot1.destroy()
|
||||
db.volume_destroy(context.get_admin_context(),
|
||||
volume_id)
|
||||
db.volume_destroy(context.get_admin_context(), volume_id)
|
||||
consistencygroup.destroy()
|
||||
|
||||
@mock.patch(
|
||||
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
|
||||
def test_create_cgsnapshot_json(self, mock_validate):
|
||||
consistencygroup = utils.create_consistencygroup(self.context)
|
||||
utils.create_volume(
|
||||
self.context, consistencygroup_id=consistencygroup.id)
|
||||
vol_type = utils.create_volume_type(context.get_admin_context(),
|
||||
self, name='my_vol_type')
|
||||
consistencygroup = utils.create_group(
|
||||
self.context,
|
||||
group_type_id=fake.GROUP_TYPE_ID,
|
||||
volume_type_ids=[vol_type['id']])
|
||||
volume_id = utils.create_volume(self.context,
|
||||
volume_type_id=vol_type['id'],
|
||||
group_id=
|
||||
consistencygroup.id)['id']
|
||||
|
||||
body = {"cgsnapshot": {"name": "cg1",
|
||||
"description":
|
||||
@ -211,21 +247,27 @@ class CgsnapshotsAPITestCase(test.TestCase):
|
||||
self.assertIn('id', res_dict['cgsnapshot'])
|
||||
self.assertTrue(mock_validate.called)
|
||||
|
||||
consistencygroup.destroy()
|
||||
cgsnapshot = objects.CGSnapshot.get_by_id(
|
||||
cgsnapshot = objects.GroupSnapshot.get_by_id(
|
||||
context.get_admin_context(), res_dict['cgsnapshot']['id'])
|
||||
cgsnapshot.destroy()
|
||||
db.volume_destroy(context.get_admin_context(), volume_id)
|
||||
consistencygroup.destroy()
|
||||
|
||||
@mock.patch(
|
||||
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
|
||||
def test_create_cgsnapshot_when_volume_in_error_status(self,
|
||||
mock_validate):
|
||||
consistencygroup = utils.create_consistencygroup(self.context)
|
||||
utils.create_volume(
|
||||
vol_type = utils.create_volume_type(context.get_admin_context(),
|
||||
self, name='my_vol_type')
|
||||
consistencygroup = utils.create_group(
|
||||
self.context,
|
||||
status='error',
|
||||
consistencygroup_id=consistencygroup.id
|
||||
)
|
||||
group_type_id=fake.GROUP_TYPE_ID,
|
||||
volume_type_ids=[vol_type['id']])
|
||||
volume_id = utils.create_volume(self.context,
|
||||
volume_type_id=vol_type['id'],
|
||||
group_id=consistencygroup.id,
|
||||
status='error')['id']
|
||||
|
||||
body = {"cgsnapshot": {"name": "cg1",
|
||||
"description":
|
||||
"CG Snapshot 1",
|
||||
@ -248,6 +290,7 @@ class CgsnapshotsAPITestCase(test.TestCase):
|
||||
)
|
||||
self.assertTrue(mock_validate.called)
|
||||
|
||||
db.volume_destroy(context.get_admin_context(), volume_id)
|
||||
consistencygroup.destroy()
|
||||
|
||||
def test_create_cgsnapshot_with_no_body(self):
|
||||
@ -268,13 +311,19 @@ class CgsnapshotsAPITestCase(test.TestCase):
|
||||
"request body.",
|
||||
res_dict['badRequest']['message'])
|
||||
|
||||
@mock.patch.object(consistencygroupAPI.API, 'create_cgsnapshot',
|
||||
side_effect=exception.InvalidCgSnapshot(
|
||||
reason='invalid cgsnapshot'))
|
||||
@mock.patch.object(groupAPI.API, 'create_group_snapshot',
|
||||
side_effect=exception.InvalidGroupSnapshot(
|
||||
reason='invalid group_snapshot'))
|
||||
def test_create_with_invalid_cgsnapshot(self, mock_create_cgsnapshot):
|
||||
consistencygroup = utils.create_consistencygroup(self.context)
|
||||
utils.create_volume(
|
||||
self.context, consistencygroup_id=consistencygroup.id)
|
||||
vol_type = utils.create_volume_type(context.get_admin_context(),
|
||||
self, name='my_vol_type')
|
||||
consistencygroup = utils.create_group(
|
||||
self.context,
|
||||
group_type_id=fake.GROUP_TYPE_ID,
|
||||
volume_type_ids=[vol_type['id']])
|
||||
volume_id = utils.create_volume(self.context,
|
||||
volume_type_id=vol_type['id'],
|
||||
group_id=consistencygroup.id)['id']
|
||||
|
||||
body = {"cgsnapshot": {"name": "cg1",
|
||||
"description":
|
||||
@ -291,17 +340,25 @@ class CgsnapshotsAPITestCase(test.TestCase):
|
||||
self.assertEqual(http_client.BAD_REQUEST, res.status_int)
|
||||
self.assertEqual(http_client.BAD_REQUEST,
|
||||
res_dict['badRequest']['code'])
|
||||
self.assertEqual('Invalid CgSnapshot: invalid cgsnapshot',
|
||||
self.assertEqual('Invalid GroupSnapshot: invalid group_snapshot',
|
||||
res_dict['badRequest']['message'])
|
||||
|
||||
db.volume_destroy(context.get_admin_context(), volume_id)
|
||||
consistencygroup.destroy()
|
||||
|
||||
@mock.patch.object(consistencygroupAPI.API, 'create_cgsnapshot',
|
||||
side_effect=exception.CgSnapshotNotFound(
|
||||
cgsnapshot_id='invalid_id'))
|
||||
@mock.patch.object(groupAPI.API, 'create_group_snapshot',
|
||||
side_effect=exception.GroupSnapshotNotFound(
|
||||
group_snapshot_id='invalid_id'))
|
||||
def test_create_with_cgsnapshot_not_found(self, mock_create_cgsnapshot):
|
||||
consistencygroup = utils.create_consistencygroup(self.context)
|
||||
utils.create_volume(
|
||||
self.context, consistencygroup_id=consistencygroup.id)
|
||||
vol_type = utils.create_volume_type(context.get_admin_context(),
|
||||
self, name='my_vol_type')
|
||||
consistencygroup = utils.create_group(
|
||||
self.context,
|
||||
group_type_id=fake.GROUP_TYPE_ID,
|
||||
volume_type_ids=[vol_type['id']])
|
||||
volume_id = utils.create_volume(self.context,
|
||||
volume_type_id=vol_type['id'],
|
||||
group_id=consistencygroup.id)['id']
|
||||
|
||||
body = {"cgsnapshot": {"name": "cg1",
|
||||
"description":
|
||||
@ -319,12 +376,19 @@ class CgsnapshotsAPITestCase(test.TestCase):
|
||||
self.assertEqual(http_client.NOT_FOUND, res.status_int)
|
||||
self.assertEqual(http_client.NOT_FOUND,
|
||||
res_dict['itemNotFound']['code'])
|
||||
self.assertEqual('CgSnapshot invalid_id could not be found.',
|
||||
self.assertEqual('GroupSnapshot invalid_id could not be found.',
|
||||
res_dict['itemNotFound']['message'])
|
||||
|
||||
db.volume_destroy(context.get_admin_context(), volume_id)
|
||||
consistencygroup.destroy()
|
||||
|
||||
def test_create_cgsnapshot_from_empty_consistencygroup(self):
|
||||
consistencygroup = utils.create_consistencygroup(self.context)
|
||||
vol_type = utils.create_volume_type(context.get_admin_context(),
|
||||
self, name='my_vol_type')
|
||||
consistencygroup = utils.create_group(
|
||||
self.context,
|
||||
group_type_id=fake.GROUP_TYPE_ID,
|
||||
volume_type_ids=[vol_type['id']])
|
||||
|
||||
body = {"cgsnapshot": {"name": "cg1",
|
||||
"description":
|
||||
@ -342,25 +406,28 @@ class CgsnapshotsAPITestCase(test.TestCase):
|
||||
self.assertEqual(http_client.BAD_REQUEST, res.status_int)
|
||||
self.assertEqual(http_client.BAD_REQUEST,
|
||||
res_dict['badRequest']['code'])
|
||||
expected = ("Invalid ConsistencyGroup: Source CG cannot be empty or "
|
||||
"in 'creating' or 'updating' state. No cgsnapshot will be "
|
||||
"created.")
|
||||
self.assertEqual(expected, res_dict['badRequest']['message'])
|
||||
self.assertIsNotNone(res_dict['badRequest']['message'])
|
||||
|
||||
# If failed to create cgsnapshot, its DB object should not be created
|
||||
self.assertListEqual(
|
||||
[],
|
||||
list(objects.CGSnapshotList.get_all(self.context)))
|
||||
list(objects.GroupSnapshotList.get_all(self.context)))
|
||||
consistencygroup.destroy()
|
||||
|
||||
def test_delete_cgsnapshot_available(self):
|
||||
consistencygroup = utils.create_consistencygroup(self.context)
|
||||
volume_id = utils.create_volume(
|
||||
vol_type = utils.create_volume_type(context.get_admin_context(),
|
||||
self, name='my_vol_type')
|
||||
consistencygroup = utils.create_group(
|
||||
self.context,
|
||||
consistencygroup_id=consistencygroup.id)['id']
|
||||
cgsnapshot = utils.create_cgsnapshot(
|
||||
self.context,
|
||||
consistencygroup_id=consistencygroup.id,
|
||||
group_type_id=fake.GROUP_TYPE_ID,
|
||||
volume_type_ids=[vol_type['id']])
|
||||
volume_id = utils.create_volume(self.context,
|
||||
volume_type_id=vol_type['id'],
|
||||
group_id=
|
||||
consistencygroup.id)['id']
|
||||
cgsnapshot = utils.create_group_snapshot(
|
||||
self.context, group_id=consistencygroup.id,
|
||||
group_type_id=fake.GROUP_TYPE_ID,
|
||||
status='available')
|
||||
req = webob.Request.blank('/v2/%s/cgsnapshots/%s' %
|
||||
(fake.PROJECT_ID, cgsnapshot.id))
|
||||
@ -369,40 +436,48 @@ class CgsnapshotsAPITestCase(test.TestCase):
|
||||
res = req.get_response(fakes.wsgi_app(
|
||||
fake_auth_context=self.user_ctxt))
|
||||
|
||||
cgsnapshot = objects.CGSnapshot.get_by_id(self.context, cgsnapshot.id)
|
||||
cgsnapshot = objects.GroupSnapshot.get_by_id(self.context,
|
||||
cgsnapshot.id)
|
||||
self.assertEqual(http_client.ACCEPTED, res.status_int)
|
||||
self.assertEqual('deleting', cgsnapshot.status)
|
||||
|
||||
cgsnapshot.destroy()
|
||||
db.volume_destroy(context.get_admin_context(),
|
||||
volume_id)
|
||||
db.volume_destroy(context.get_admin_context(), volume_id)
|
||||
consistencygroup.destroy()
|
||||
|
||||
def test_delete_cgsnapshot_available_used_as_source(self):
|
||||
consistencygroup = utils.create_consistencygroup(self.context)
|
||||
volume_id = utils.create_volume(
|
||||
vol_type = utils.create_volume_type(context.get_admin_context(),
|
||||
self, name='my_vol_type')
|
||||
consistencygroup = utils.create_group(
|
||||
self.context,
|
||||
consistencygroup_id=consistencygroup.id)['id']
|
||||
cgsnapshot = utils.create_cgsnapshot(
|
||||
self.context,
|
||||
consistencygroup_id=consistencygroup.id,
|
||||
group_type_id=fake.GROUP_TYPE_ID,
|
||||
volume_type_ids=[vol_type['id']])
|
||||
volume_id = utils.create_volume(self.context,
|
||||
volume_type_id=vol_type['id'],
|
||||
group_id=
|
||||
consistencygroup.id)['id']
|
||||
cgsnapshot = utils.create_group_snapshot(
|
||||
self.context, group_id=consistencygroup.id,
|
||||
group_type_id=fake.GROUP_TYPE_ID,
|
||||
status='available')
|
||||
|
||||
cg2 = utils.create_consistencygroup(
|
||||
self.context, status='creating', cgsnapshot_id=cgsnapshot.id)
|
||||
self.context, status='creating',
|
||||
group_snapshot_id=cgsnapshot.id,
|
||||
group_type_id=fake.GROUP_TYPE_ID)
|
||||
req = webob.Request.blank('/v2/fake/cgsnapshots/%s' %
|
||||
cgsnapshot.id)
|
||||
req.method = 'DELETE'
|
||||
req.headers['Content-Type'] = 'application/json'
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
|
||||
cgsnapshot = objects.CGSnapshot.get_by_id(self.context, cgsnapshot.id)
|
||||
cgsnapshot = objects.GroupSnapshot.get_by_id(self.context,
|
||||
cgsnapshot.id)
|
||||
self.assertEqual(http_client.BAD_REQUEST, res.status_int)
|
||||
self.assertEqual('available', cgsnapshot.status)
|
||||
|
||||
cgsnapshot.destroy()
|
||||
db.volume_destroy(context.get_admin_context(),
|
||||
volume_id)
|
||||
db.volume_destroy(context.get_admin_context(), volume_id)
|
||||
consistencygroup.destroy()
|
||||
cg2.destroy()
|
||||
|
||||
@ -422,15 +497,22 @@ class CgsnapshotsAPITestCase(test.TestCase):
|
||||
fake.WILL_NOT_BE_FOUND_ID,
|
||||
res_dict['itemNotFound']['message'])
|
||||
|
||||
def test_delete_cgsnapshot_with_Invalidcgsnapshot(self):
|
||||
consistencygroup = utils.create_consistencygroup(self.context)
|
||||
volume_id = utils.create_volume(
|
||||
def test_delete_cgsnapshot_with_invalid_cgsnapshot(self):
|
||||
vol_type = utils.create_volume_type(context.get_admin_context(),
|
||||
self, name='my_vol_type')
|
||||
consistencygroup = utils.create_group(
|
||||
self.context,
|
||||
consistencygroup_id=consistencygroup.id)['id']
|
||||
cgsnapshot = utils.create_cgsnapshot(
|
||||
self.context,
|
||||
consistencygroup_id=consistencygroup.id,
|
||||
group_type_id=fake.GROUP_TYPE_ID,
|
||||
volume_type_ids=[vol_type['id']])
|
||||
volume_id = utils.create_volume(self.context,
|
||||
volume_type_id=vol_type['id'],
|
||||
group_id=
|
||||
consistencygroup.id)['id']
|
||||
cgsnapshot = utils.create_group_snapshot(
|
||||
self.context, group_id=consistencygroup.id,
|
||||
group_type_id=fake.GROUP_TYPE_ID,
|
||||
status='invalid')
|
||||
|
||||
req = webob.Request.blank('/v2/%s/cgsnapshots/%s' % (
|
||||
fake.PROJECT_ID, cgsnapshot.id))
|
||||
req.method = 'DELETE'
|
||||
@ -442,12 +524,8 @@ class CgsnapshotsAPITestCase(test.TestCase):
|
||||
self.assertEqual(http_client.BAD_REQUEST, res.status_int)
|
||||
self.assertEqual(http_client.BAD_REQUEST,
|
||||
res_dict['badRequest']['code'])
|
||||
expected = ('Invalid CgSnapshot: CgSnapshot status must be available '
|
||||
'or error, and no CG can be currently using it as source '
|
||||
'for its creation.')
|
||||
self.assertEqual(expected, res_dict['badRequest']['message'])
|
||||
self.assertIsNotNone(res_dict['badRequest']['message'])
|
||||
|
||||
cgsnapshot.destroy()
|
||||
db.volume_destroy(context.get_admin_context(),
|
||||
volume_id)
|
||||
db.volume_destroy(context.get_admin_context(), volume_id)
|
||||
consistencygroup.destroy()
|
||||
|
@ -23,10 +23,10 @@ from oslo_serialization import jsonutils
|
||||
from six.moves import http_client
|
||||
import webob
|
||||
|
||||
import cinder.consistencygroup
|
||||
from cinder import context
|
||||
from cinder import db
|
||||
from cinder import exception
|
||||
import cinder.group
|
||||
from cinder.i18n import _
|
||||
from cinder import objects
|
||||
from cinder.objects import fields
|
||||
@ -44,7 +44,7 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(ConsistencyGroupsAPITestCase, self).setUp()
|
||||
self.cg_api = cinder.consistencygroup.API()
|
||||
self.cg_api = cinder.group.API()
|
||||
self.ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID,
|
||||
auth_token=True,
|
||||
is_admin=True)
|
||||
@ -58,55 +58,33 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
|
||||
user_id=fake.USER_ID,
|
||||
project_id=fake.PROJECT_ID,
|
||||
description='this is a test consistency group',
|
||||
volume_type_id=fake.VOLUME_TYPE_ID,
|
||||
group_type_id=fake.GROUP_TYPE_ID,
|
||||
volume_type_ids=[fake.VOLUME_TYPE_ID],
|
||||
availability_zone='az1',
|
||||
host='fakehost',
|
||||
status=fields.ConsistencyGroupStatus.CREATING,
|
||||
**kwargs):
|
||||
"""Create a consistency group object."""
|
||||
ctxt = ctxt or self.ctxt
|
||||
consistencygroup = objects.ConsistencyGroup(ctxt)
|
||||
consistencygroup = objects.Group(ctxt)
|
||||
consistencygroup.user_id = user_id
|
||||
consistencygroup.project_id = project_id
|
||||
consistencygroup.availability_zone = availability_zone
|
||||
consistencygroup.name = name
|
||||
consistencygroup.description = description
|
||||
consistencygroup.volume_type_id = volume_type_id
|
||||
consistencygroup.group_type_id = group_type_id
|
||||
consistencygroup.volume_type_ids = volume_type_ids
|
||||
consistencygroup.host = host
|
||||
consistencygroup.status = status
|
||||
consistencygroup.update(kwargs)
|
||||
consistencygroup.create()
|
||||
return consistencygroup
|
||||
|
||||
def _create_group(
|
||||
self,
|
||||
ctxt=None,
|
||||
name='test_group',
|
||||
user_id=fake.USER_ID,
|
||||
project_id=fake.PROJECT_ID,
|
||||
description='this is a test group',
|
||||
group_type_id=fake.VOLUME_TYPE_ID,
|
||||
availability_zone='az1',
|
||||
host='fakehost',
|
||||
status=fields.GroupStatus.CREATING,
|
||||
**kwargs):
|
||||
"""Create a consistency group object."""
|
||||
ctxt = ctxt or self.ctxt
|
||||
group = objects.Group(ctxt)
|
||||
group.user_id = user_id
|
||||
group.project_id = project_id
|
||||
group.availability_zone = availability_zone
|
||||
group.name = name
|
||||
group.description = description
|
||||
group.group_type_id = group_type_id
|
||||
group.host = host
|
||||
group.status = status
|
||||
group.update(kwargs)
|
||||
group.create()
|
||||
return group
|
||||
|
||||
def test_show_consistencygroup(self):
|
||||
consistencygroup = self._create_consistencygroup()
|
||||
vol_type = utils.create_volume_type(context.get_admin_context(),
|
||||
self, name='my_vol_type')
|
||||
consistencygroup = self._create_consistencygroup(
|
||||
volume_type_ids=[vol_type['id']])
|
||||
req = webob.Request.blank('/v2/%s/consistencygroups/%s' %
|
||||
(fake.PROJECT_ID, consistencygroup.id))
|
||||
req.method = 'GET'
|
||||
@ -115,6 +93,8 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
|
||||
fake_auth_context=self.user_ctxt))
|
||||
res_dict = jsonutils.loads(res.body)
|
||||
|
||||
consistencygroup.destroy()
|
||||
|
||||
self.assertEqual(http_client.OK, res.status_int)
|
||||
self.assertEqual('az1',
|
||||
res_dict['consistencygroup']['availability_zone'])
|
||||
@ -124,11 +104,9 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
|
||||
res_dict['consistencygroup']['name'])
|
||||
self.assertEqual('creating',
|
||||
res_dict['consistencygroup']['status'])
|
||||
self.assertEqual([fake.VOLUME_TYPE_ID],
|
||||
self.assertEqual([vol_type['id']],
|
||||
res_dict['consistencygroup']['volume_types'])
|
||||
|
||||
consistencygroup.destroy()
|
||||
|
||||
def test_show_consistencygroup_with_consistencygroup_NotFound(self):
|
||||
req = webob.Request.blank('/v2/%s/consistencygroups/%s' %
|
||||
(fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID))
|
||||
@ -331,8 +309,6 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
|
||||
consistencygroup2 = self._create_consistencygroup(
|
||||
name="group", project_id=fake.PROJECT2_ID)
|
||||
|
||||
group1 = self._create_group()
|
||||
group2 = self._create_group(name="group", project_id=fake.PROJECT2_ID)
|
||||
url = ('/v2/%s/consistencygroups?'
|
||||
'all_tenants=True&project_id=%s') % (fake.PROJECT_ID,
|
||||
fake.PROJECT2_ID)
|
||||
@ -346,15 +322,11 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
|
||||
res = req.get_response(fakes.wsgi_app(fake_auth_context=self.ctxt))
|
||||
res_dict = jsonutils.loads(res.body)
|
||||
self.assertEqual(200, res.status_int)
|
||||
self.assertEqual(2, len(res_dict['consistencygroups']))
|
||||
self.assertEqual(1, len(res_dict['consistencygroups']))
|
||||
self.assertEqual("group",
|
||||
res_dict['consistencygroups'][0]['name'])
|
||||
self.assertEqual("group",
|
||||
res_dict['consistencygroups'][1]['name'])
|
||||
consistencygroup1.destroy()
|
||||
consistencygroup2.destroy()
|
||||
group1.destroy()
|
||||
group2.destroy()
|
||||
|
||||
@ddt.data(False, True)
|
||||
def test_list_consistencygroups_with_sort(self, is_detail):
|
||||
@ -388,11 +360,16 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
|
||||
consistencygroup3.destroy()
|
||||
|
||||
def test_list_consistencygroups_detail_json(self):
|
||||
consistencygroup1 = self._create_consistencygroup()
|
||||
consistencygroup2 = self._create_consistencygroup()
|
||||
vol_type1 = utils.create_volume_type(context.get_admin_context(),
|
||||
self, name='my_vol_type1')
|
||||
vol_type2 = utils.create_volume_type(context.get_admin_context(),
|
||||
self, name='my_vol_type2')
|
||||
consistencygroup1 = self._create_consistencygroup(
|
||||
volume_type_ids=[vol_type1['id']])
|
||||
consistencygroup2 = self._create_consistencygroup(
|
||||
volume_type_ids=[vol_type1['id']])
|
||||
consistencygroup3 = self._create_consistencygroup(
|
||||
volume_type_id='%s,%s' % (fake.VOLUME_TYPE_ID,
|
||||
fake.VOLUME_TYPE2_ID))
|
||||
volume_type_ids=[vol_type1['id'], vol_type2['id']])
|
||||
req = webob.Request.blank('/v2/%s/consistencygroups/detail' %
|
||||
fake.PROJECT_ID)
|
||||
req.method = 'GET'
|
||||
@ -402,6 +379,14 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
|
||||
fake_auth_context=self.user_ctxt))
|
||||
res_dict = jsonutils.loads(res.body)
|
||||
|
||||
cg_ids = [consistencygroup1.id, consistencygroup2.id,
|
||||
consistencygroup3.id]
|
||||
vol_type_ids = [vol_type1['id'], vol_type2['id']]
|
||||
|
||||
consistencygroup1.destroy()
|
||||
consistencygroup2.destroy()
|
||||
consistencygroup3.destroy()
|
||||
|
||||
self.assertEqual(http_client.OK, res.status_int)
|
||||
self.assertEqual('az1',
|
||||
res_dict['consistencygroups'][0]['availability_zone'])
|
||||
@ -409,12 +394,11 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
|
||||
res_dict['consistencygroups'][0]['description'])
|
||||
self.assertEqual('test_consistencygroup',
|
||||
res_dict['consistencygroups'][0]['name'])
|
||||
self.assertEqual(consistencygroup3.id,
|
||||
res_dict['consistencygroups'][0]['id'])
|
||||
self.assertIn(res_dict['consistencygroups'][0]['id'], cg_ids)
|
||||
self.assertEqual('creating',
|
||||
res_dict['consistencygroups'][0]['status'])
|
||||
self.assertEqual([fake.VOLUME_TYPE_ID, fake.VOLUME_TYPE2_ID],
|
||||
res_dict['consistencygroups'][0]['volume_types'])
|
||||
for vol_type_id in res_dict['consistencygroups'][0]['volume_types']:
|
||||
self.assertIn(vol_type_id, vol_type_ids)
|
||||
|
||||
self.assertEqual('az1',
|
||||
res_dict['consistencygroups'][1]['availability_zone'])
|
||||
@ -422,12 +406,11 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
|
||||
res_dict['consistencygroups'][1]['description'])
|
||||
self.assertEqual('test_consistencygroup',
|
||||
res_dict['consistencygroups'][1]['name'])
|
||||
self.assertEqual(consistencygroup2.id,
|
||||
res_dict['consistencygroups'][1]['id'])
|
||||
self.assertIn(res_dict['consistencygroups'][0]['id'], cg_ids)
|
||||
self.assertEqual('creating',
|
||||
res_dict['consistencygroups'][1]['status'])
|
||||
self.assertEqual([fake.VOLUME_TYPE_ID],
|
||||
res_dict['consistencygroups'][1]['volume_types'])
|
||||
for vol_type_id in res_dict['consistencygroups'][1]['volume_types']:
|
||||
self.assertIn(vol_type_id, vol_type_ids)
|
||||
|
||||
self.assertEqual('az1',
|
||||
res_dict['consistencygroups'][2]['availability_zone'])
|
||||
@ -435,16 +418,11 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
|
||||
res_dict['consistencygroups'][2]['description'])
|
||||
self.assertEqual('test_consistencygroup',
|
||||
res_dict['consistencygroups'][2]['name'])
|
||||
self.assertEqual(consistencygroup1.id,
|
||||
res_dict['consistencygroups'][2]['id'])
|
||||
self.assertIn(res_dict['consistencygroups'][0]['id'], cg_ids)
|
||||
self.assertEqual('creating',
|
||||
res_dict['consistencygroups'][2]['status'])
|
||||
self.assertEqual([fake.VOLUME_TYPE_ID],
|
||||
res_dict['consistencygroups'][2]['volume_types'])
|
||||
|
||||
consistencygroup1.destroy()
|
||||
consistencygroup2.destroy()
|
||||
consistencygroup3.destroy()
|
||||
for vol_type_id in res_dict['consistencygroups'][2]['volume_types']:
|
||||
self.assertIn(vol_type_id, vol_type_ids)
|
||||
|
||||
@mock.patch(
|
||||
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
|
||||
@ -453,11 +431,11 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
|
||||
|
||||
# Create volume type
|
||||
vol_type = 'test'
|
||||
db.volume_type_create(self.ctxt,
|
||||
{'name': vol_type, 'extra_specs': {}})
|
||||
vol_type_id = db.volume_type_create(
|
||||
self.ctxt, {'name': vol_type, 'extra_specs': {}})['id']
|
||||
|
||||
body = {"consistencygroup": {"name": "cg1",
|
||||
"volume_types": vol_type,
|
||||
"volume_types": vol_type_id,
|
||||
"description":
|
||||
"Consistency Group 1", }}
|
||||
req = webob.Request.blank('/v2/%s/consistencygroups' % fake.PROJECT_ID)
|
||||
@ -473,12 +451,8 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
|
||||
self.assertTrue(mock_validate.called)
|
||||
|
||||
group_id = res_dict['consistencygroup']['id']
|
||||
try:
|
||||
cg = objects.ConsistencyGroup.get_by_id(self.ctxt,
|
||||
group_id)
|
||||
except exception.ConsistencyGroupNotFound:
|
||||
cg = objects.Group.get_by_id(self.ctxt,
|
||||
group_id)
|
||||
cg = objects.Group.get_by_id(self.ctxt, group_id)
|
||||
|
||||
cg.destroy()
|
||||
|
||||
def test_create_consistencygroup_with_no_body(self):
|
||||
@ -509,34 +483,13 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
|
||||
req.body = jsonutils.dump_as_bytes({})
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
|
||||
consistencygroup = objects.ConsistencyGroup.get_by_id(
|
||||
consistencygroup = objects.Group.get_by_id(
|
||||
self.ctxt, consistencygroup.id)
|
||||
self.assertEqual(http_client.ACCEPTED, res.status_int)
|
||||
self.assertEqual('deleting', consistencygroup.status)
|
||||
|
||||
consistencygroup.destroy()
|
||||
|
||||
def test_delete_consistencygroup_available_used_as_source(self):
|
||||
consistencygroup = self._create_consistencygroup(
|
||||
status=fields.ConsistencyGroupStatus.AVAILABLE)
|
||||
req = webob.Request.blank('/v2/%s/consistencygroups/%s/delete' %
|
||||
(fake.PROJECT_ID, consistencygroup.id))
|
||||
cg2 = self._create_consistencygroup(
|
||||
status=fields.ConsistencyGroupStatus.CREATING,
|
||||
source_cgid=consistencygroup.id)
|
||||
req.method = 'POST'
|
||||
req.headers['Content-Type'] = 'application/json'
|
||||
req.body = jsonutils.dump_as_bytes({})
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
|
||||
consistencygroup = objects.ConsistencyGroup.get_by_id(
|
||||
self.ctxt, consistencygroup.id)
|
||||
self.assertEqual(http_client.BAD_REQUEST, res.status_int)
|
||||
self.assertEqual('available', consistencygroup.status)
|
||||
|
||||
consistencygroup.destroy()
|
||||
cg2.destroy()
|
||||
|
||||
def test_delete_consistencygroup_available_used_as_source_success(self):
|
||||
consistencygroup = self._create_consistencygroup(
|
||||
status=fields.ConsistencyGroupStatus.AVAILABLE)
|
||||
@ -552,7 +505,7 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
|
||||
req.body = jsonutils.dump_as_bytes({})
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
|
||||
consistencygroup = objects.ConsistencyGroup.get_by_id(
|
||||
consistencygroup = objects.Group.get_by_id(
|
||||
self.ctxt, consistencygroup.id)
|
||||
self.assertEqual(http_client.ACCEPTED, res.status_int)
|
||||
self.assertEqual('deleting', consistencygroup.status)
|
||||
@ -571,7 +524,7 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
|
||||
res = req.get_response(fakes.wsgi_app(
|
||||
fake_auth_context=self.user_ctxt))
|
||||
|
||||
consistencygroup = objects.ConsistencyGroup.get_by_id(
|
||||
consistencygroup = objects.Group.get_by_id(
|
||||
self.ctxt, consistencygroup.id)
|
||||
self.assertEqual(http_client.ACCEPTED, res.status_int)
|
||||
self.assertEqual(fields.ConsistencyGroupStatus.DELETING,
|
||||
@ -613,7 +566,7 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
|
||||
req.body = jsonutils.dump_as_bytes(body)
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
|
||||
consistencygroup = objects.ConsistencyGroup.get_by_id(
|
||||
consistencygroup = objects.Group.get_by_id(
|
||||
self.ctxt, consistencygroup.id)
|
||||
self.assertEqual(http_client.ACCEPTED, res.status_int)
|
||||
self.assertEqual('deleting', consistencygroup.status)
|
||||
@ -632,7 +585,7 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
|
||||
fake_auth_context=self.user_ctxt))
|
||||
self.assertEqual(http_client.ACCEPTED, res.status_int)
|
||||
|
||||
cg = objects.ConsistencyGroup.get_by_id(
|
||||
cg = objects.Group.get_by_id(
|
||||
context.get_admin_context(read_deleted='yes'),
|
||||
consistencygroup.id)
|
||||
self.assertEqual(fields.ConsistencyGroupStatus.DELETED, cg.status)
|
||||
@ -641,13 +594,16 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
|
||||
def test_create_delete_consistencygroup_update_quota(self):
|
||||
name = 'mycg'
|
||||
description = 'consistency group 1'
|
||||
fake_type = {'id': fake.CONSISTENCY_GROUP_ID, 'name': 'fake_type'}
|
||||
fake_grp_type = {'id': fake.GROUP_TYPE_ID, 'name': 'fake_grp_type'}
|
||||
fake_vol_type = {'id': fake.VOLUME_TYPE_ID, 'name': 'fake_vol_type'}
|
||||
self.mock_object(db, 'group_type_get',
|
||||
return_value=fake_grp_type)
|
||||
self.mock_object(db, 'volume_types_get_by_name_or_id',
|
||||
return_value=[fake_type])
|
||||
self.mock_object(self.cg_api, '_cast_create_consistencygroup')
|
||||
return_value=[fake_vol_type])
|
||||
self.mock_object(self.cg_api, '_cast_create_group')
|
||||
self.mock_object(self.cg_api, 'update_quota')
|
||||
cg = self.cg_api.create(self.ctxt, name, description,
|
||||
fake_type['name'])
|
||||
fake.GROUP_TYPE_ID, fake_vol_type['name'])
|
||||
self.cg_api.update_quota.assert_called_once_with(
|
||||
self.ctxt, cg, 1)
|
||||
|
||||
@ -659,7 +615,7 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
|
||||
|
||||
self.cg_api.update_quota.assert_called_once_with(
|
||||
self.ctxt, cg, -1, self.ctxt.project_id)
|
||||
cg = objects.ConsistencyGroup.get_by_id(
|
||||
cg = objects.Group.get_by_id(
|
||||
context.get_admin_context(read_deleted='yes'),
|
||||
cg.id)
|
||||
self.assertEqual(fields.ConsistencyGroupStatus.DELETED, cg.status)
|
||||
@ -716,26 +672,14 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
self.assertEqual(http_client.BAD_REQUEST, res.status_int)
|
||||
|
||||
if force:
|
||||
reason = _('Consistency group must not have attached volumes, '
|
||||
'volumes with snapshots, or dependent cgsnapshots')
|
||||
else:
|
||||
reason = _('Consistency group status must be available or '
|
||||
'error and must not have volumes or dependent '
|
||||
'cgsnapshots')
|
||||
msg = (_('Invalid ConsistencyGroup: Cannot delete consistency group '
|
||||
'%(id)s. %(reason)s, and it cannot be the source for an '
|
||||
'ongoing CG or CG Snapshot creation.')
|
||||
% {'id': cg_id, 'reason': reason})
|
||||
|
||||
res_dict = jsonutils.loads(res.body)
|
||||
self.assertEqual(http_client.BAD_REQUEST,
|
||||
res_dict['badRequest']['code'])
|
||||
self.assertEqual(msg, res_dict['badRequest']['message'])
|
||||
self.assertIsNotNone(res_dict['badRequest']['message'])
|
||||
|
||||
def test_delete_consistencygroup_with_volumes(self):
|
||||
consistencygroup = self._create_consistencygroup(status='available')
|
||||
utils.create_volume(self.ctxt, consistencygroup_id=consistencygroup.id,
|
||||
utils.create_volume(self.ctxt, group_id=consistencygroup.id,
|
||||
testcase_instance=self)
|
||||
self._assert_deleting_result_400(consistencygroup.id)
|
||||
consistencygroup.destroy()
|
||||
@ -743,10 +687,14 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
|
||||
def test_delete_consistencygroup_with_cgsnapshot(self):
|
||||
consistencygroup = self._create_consistencygroup(status='available')
|
||||
# If we don't add a volume to the CG the cgsnapshot creation will fail
|
||||
utils.create_volume(self.ctxt,
|
||||
consistencygroup_id=consistencygroup.id,
|
||||
testcase_instance=self)
|
||||
cg_snap = utils.create_cgsnapshot(self.ctxt, consistencygroup.id)
|
||||
vol = utils.create_volume(self.ctxt,
|
||||
group_id=consistencygroup.id,
|
||||
testcase_instance=self)
|
||||
cg_snap = utils.create_group_snapshot(self.ctxt, consistencygroup.id,
|
||||
group_type_id=fake.GROUP_TYPE_ID)
|
||||
utils.create_snapshot(self.ctxt, volume_id=vol.id,
|
||||
group_snapshot_id=cg_snap.id,
|
||||
testcase_instance=self)
|
||||
self._assert_deleting_result_400(consistencygroup.id)
|
||||
cg_snap.destroy()
|
||||
consistencygroup.destroy()
|
||||
@ -754,10 +702,14 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
|
||||
def test_delete_consistencygroup_with_cgsnapshot_force(self):
|
||||
consistencygroup = self._create_consistencygroup(status='available')
|
||||
# If we don't add a volume to the CG the cgsnapshot creation will fail
|
||||
utils.create_volume(self.ctxt,
|
||||
consistencygroup_id=consistencygroup.id,
|
||||
testcase_instance=self)
|
||||
cg_snap = utils.create_cgsnapshot(self.ctxt, consistencygroup.id)
|
||||
vol = utils.create_volume(self.ctxt,
|
||||
group_id=consistencygroup.id,
|
||||
testcase_instance=self)
|
||||
cg_snap = utils.create_group_snapshot(self.ctxt, consistencygroup.id,
|
||||
group_type_id=fake.GROUP_TYPE_ID)
|
||||
utils.create_snapshot(self.ctxt, volume_id=vol.id,
|
||||
group_snapshot_id=cg_snap.id,
|
||||
testcase_instance=self)
|
||||
self._assert_deleting_result_400(consistencygroup.id, force=True)
|
||||
cg_snap.destroy()
|
||||
consistencygroup.destroy()
|
||||
@ -775,27 +727,12 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
|
||||
req.body = jsonutils.dump_as_bytes(body)
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
|
||||
consistencygroup = objects.ConsistencyGroup.get_by_id(
|
||||
consistencygroup = objects.Group.get_by_id(
|
||||
self.ctxt, consistencygroup.id)
|
||||
self.assertEqual(http_client.ACCEPTED, res.status_int)
|
||||
self.assertEqual('deleting', consistencygroup.status)
|
||||
consistencygroup.destroy()
|
||||
|
||||
def test_delete_consistencygroup_force_with_attached_volumes(self):
|
||||
consistencygroup = self._create_consistencygroup(status='available')
|
||||
utils.create_volume(self.ctxt, consistencygroup_id=consistencygroup.id,
|
||||
testcase_instance=self, attach_status='attached')
|
||||
self._assert_deleting_result_400(consistencygroup.id, force=True)
|
||||
consistencygroup.destroy()
|
||||
|
||||
def test_delete_consistencygroup_force_with_volumes_with_snapshots(self):
|
||||
consistencygroup = self._create_consistencygroup(status='available')
|
||||
vol = utils.create_volume(self.ctxt, testcase_instance=self,
|
||||
consistencygroup_id=consistencygroup.id)
|
||||
utils.create_snapshot(self.ctxt, vol.id)
|
||||
self._assert_deleting_result_400(consistencygroup.id, force=True)
|
||||
consistencygroup.destroy()
|
||||
|
||||
def test_delete_cg_force_with_volumes_with_deleted_snapshots(self):
|
||||
consistencygroup = self._create_consistencygroup(status='available')
|
||||
vol = utils.create_volume(self.ctxt, testcase_instance=self,
|
||||
@ -811,7 +748,7 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
|
||||
req.body = jsonutils.dump_as_bytes(body)
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
|
||||
consistencygroup = objects.ConsistencyGroup.get_by_id(
|
||||
consistencygroup = objects.Group.get_by_id(
|
||||
self.ctxt, consistencygroup.id)
|
||||
self.assertEqual(http_client.ACCEPTED, res.status_int)
|
||||
self.assertEqual('deleting', consistencygroup.status)
|
||||
@ -833,16 +770,20 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
|
||||
self.assertEqual(http_client.BAD_REQUEST, res.status_int)
|
||||
self.assertEqual(http_client.BAD_REQUEST,
|
||||
res_dict['badRequest']['code'])
|
||||
msg = (_('volume_types must be provided to create '
|
||||
'consistency group %s.') % name)
|
||||
self.assertEqual(msg, res_dict['badRequest']['message'])
|
||||
self.assertIsNotNone(res_dict['badRequest']['message'])
|
||||
|
||||
@mock.patch(
|
||||
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
|
||||
def test_update_consistencygroup_success(self, mock_validate):
|
||||
volume_type_id = fake.VOLUME_TYPE_ID
|
||||
volume_type_id = utils.create_volume_type(
|
||||
context.get_admin_context(), self, name='my_vol_type')['id']
|
||||
fake_grp_type = {'id': fake.GROUP_TYPE_ID, 'name': 'fake_grp_type'}
|
||||
self.mock_object(db, 'group_type_get',
|
||||
return_value=fake_grp_type)
|
||||
consistencygroup = self._create_consistencygroup(
|
||||
status=fields.ConsistencyGroupStatus.AVAILABLE,
|
||||
volume_type_ids=[volume_type_id],
|
||||
group_type_id=fake.GROUP_TYPE_ID,
|
||||
host='test_host')
|
||||
|
||||
# We create another CG from the one we are updating to confirm that
|
||||
@ -850,28 +791,32 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
|
||||
cg2 = self._create_consistencygroup(
|
||||
status=fields.ConsistencyGroupStatus.AVAILABLE,
|
||||
host='test_host',
|
||||
source_cgid=consistencygroup.id)
|
||||
volume_type_ids=[volume_type_id],
|
||||
source_group_id=consistencygroup.id,)
|
||||
|
||||
remove_volume_id = utils.create_volume(
|
||||
self.ctxt,
|
||||
testcase_instance=self,
|
||||
volume_type_id=volume_type_id,
|
||||
consistencygroup_id=consistencygroup.id)['id']
|
||||
group_id=consistencygroup.id)['id']
|
||||
remove_volume_id2 = utils.create_volume(
|
||||
self.ctxt,
|
||||
testcase_instance=self,
|
||||
volume_type_id=volume_type_id,
|
||||
consistencygroup_id=consistencygroup.id,
|
||||
group_id=consistencygroup.id,
|
||||
status='error')['id']
|
||||
remove_volume_id3 = utils.create_volume(
|
||||
self.ctxt,
|
||||
testcase_instance=self,
|
||||
volume_type_id=volume_type_id,
|
||||
consistencygroup_id=consistencygroup.id,
|
||||
group_id=consistencygroup.id,
|
||||
status='error_deleting')['id']
|
||||
|
||||
self.assertEqual(fields.ConsistencyGroupStatus.AVAILABLE,
|
||||
consistencygroup.status)
|
||||
|
||||
cg_volumes = db.volume_get_all_by_group(self.ctxt.elevated(),
|
||||
consistencygroup.id)
|
||||
cg_volumes = db.volume_get_all_by_generic_group(self.ctxt.elevated(),
|
||||
consistencygroup.id)
|
||||
cg_vol_ids = [cg_vol['id'] for cg_vol in cg_volumes]
|
||||
self.assertIn(remove_volume_id, cg_vol_ids)
|
||||
self.assertIn(remove_volume_id2, cg_vol_ids)
|
||||
@ -879,9 +824,11 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
|
||||
|
||||
add_volume_id = utils.create_volume(
|
||||
self.ctxt,
|
||||
testcase_instance=self,
|
||||
volume_type_id=volume_type_id)['id']
|
||||
add_volume_id2 = utils.create_volume(
|
||||
self.ctxt,
|
||||
testcase_instance=self,
|
||||
volume_type_id=volume_type_id)['id']
|
||||
req = webob.Request.blank('/v2/%s/consistencygroups/%s/update' %
|
||||
(fake.PROJECT_ID, consistencygroup.id))
|
||||
@ -900,7 +847,7 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
|
||||
res = req.get_response(fakes.wsgi_app(
|
||||
fake_auth_context=self.user_ctxt))
|
||||
|
||||
consistencygroup = objects.ConsistencyGroup.get_by_id(
|
||||
consistencygroup = objects.Group.get_by_id(
|
||||
self.ctxt, consistencygroup.id)
|
||||
self.assertEqual(http_client.ACCEPTED, res.status_int)
|
||||
self.assertTrue(mock_validate.called)
|
||||
@ -945,7 +892,7 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
|
||||
req.body = jsonutils.dump_as_bytes(body)
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
|
||||
consistencygroup = objects.ConsistencyGroup.get_by_id(
|
||||
consistencygroup = objects.Group.get_by_id(
|
||||
self.ctxt, consistencygroup.id)
|
||||
self.assertEqual(http_client.BAD_REQUEST, res.status_int)
|
||||
self.assertEqual(fields.ConsistencyGroupStatus.AVAILABLE,
|
||||
@ -990,7 +937,7 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
|
||||
req.body = jsonutils.dump_as_bytes(body)
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
|
||||
consistencygroup = objects.ConsistencyGroup.get_by_id(
|
||||
consistencygroup = objects.Group.get_by_id(
|
||||
self.ctxt, consistencygroup.id)
|
||||
self.assertEqual(http_client.BAD_REQUEST, res.status_int)
|
||||
self.assertEqual(fields.ConsistencyGroupStatus.AVAILABLE,
|
||||
@ -1019,11 +966,7 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
|
||||
self.assertEqual(http_client.BAD_REQUEST, res.status_int)
|
||||
self.assertEqual(http_client.BAD_REQUEST,
|
||||
res_dict['badRequest']['code'])
|
||||
msg = (_("Invalid volume: Cannot add volume fake-volume-uuid "
|
||||
"to consistency group %(group_id)s because volume cannot "
|
||||
"be found.") %
|
||||
{'group_id': consistencygroup.id})
|
||||
self.assertEqual(msg, res_dict['badRequest']['message'])
|
||||
self.assertIsNotNone(res_dict['badRequest']['message'])
|
||||
|
||||
consistencygroup.destroy()
|
||||
|
||||
@ -1047,11 +990,7 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
|
||||
self.assertEqual(http_client.BAD_REQUEST, res.status_int)
|
||||
self.assertEqual(http_client.BAD_REQUEST,
|
||||
res_dict['badRequest']['code'])
|
||||
msg = (_("Invalid volume: Cannot remove volume fake-volume-uuid "
|
||||
"from consistency group %(group_id)s because it is not "
|
||||
"in the group.") %
|
||||
{'group_id': consistencygroup.id})
|
||||
self.assertEqual(msg, res_dict['badRequest']['message'])
|
||||
self.assertIsNotNone(res_dict['badRequest']['message'])
|
||||
|
||||
consistencygroup.destroy()
|
||||
|
||||
@ -1104,14 +1043,7 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
|
||||
self.assertEqual(http_client.BAD_REQUEST, res.status_int)
|
||||
self.assertEqual(http_client.BAD_REQUEST,
|
||||
res_dict['badRequest']['code'])
|
||||
msg = (_("Invalid volume: Cannot add volume %(volume_id)s "
|
||||
"to consistency group %(group_id)s because volume is in an "
|
||||
"invalid state: %(status)s. Valid states are: ('available', "
|
||||
"'in-use').") %
|
||||
{'volume_id': add_volume_id,
|
||||
'group_id': consistencygroup.id,
|
||||
'status': 'wrong_status'})
|
||||
self.assertEqual(msg, res_dict['badRequest']['message'])
|
||||
self.assertIsNotNone(res_dict['badRequest']['message'])
|
||||
|
||||
consistencygroup.destroy()
|
||||
|
||||
@ -1140,13 +1072,7 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
|
||||
self.assertEqual(http_client.BAD_REQUEST, res.status_int)
|
||||
self.assertEqual(http_client.BAD_REQUEST,
|
||||
res_dict['badRequest']['code'])
|
||||
msg = (_("Invalid volume: Cannot add volume %(volume_id)s "
|
||||
"to consistency group %(group_id)s because volume type "
|
||||
"%(volume_type)s is not supported by the group.") %
|
||||
{'volume_id': add_volume_id,
|
||||
'group_id': consistencygroup.id,
|
||||
'volume_type': wrong_type})
|
||||
self.assertEqual(msg, res_dict['badRequest']['message'])
|
||||
self.assertIsNotNone(res_dict['badRequest']['message'])
|
||||
|
||||
consistencygroup.destroy()
|
||||
|
||||
@ -1198,30 +1124,29 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
|
||||
self.assertEqual(http_client.BAD_REQUEST, res.status_int)
|
||||
self.assertEqual(http_client.BAD_REQUEST,
|
||||
res_dict['badRequest']['code'])
|
||||
msg = (_("Invalid ConsistencyGroup: Cannot update consistency group "
|
||||
"%s, status must be available, and it cannot be the source "
|
||||
"for an ongoing CG or CG Snapshot creation.")
|
||||
% consistencygroup.id)
|
||||
self.assertEqual(msg, res_dict['badRequest']['message'])
|
||||
self.assertIsNotNone(res_dict['badRequest']['message'])
|
||||
|
||||
consistencygroup.destroy()
|
||||
|
||||
@mock.patch('cinder.quota.QuotaEngine.limit_check')
|
||||
@mock.patch(
|
||||
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
|
||||
def test_create_consistencygroup_from_src(self, mock_validate, mock_quota):
|
||||
def test_create_consistencygroup_from_src_snap(self, mock_validate):
|
||||
self.mock_object(volume_api.API, "create", v2_fakes.fake_volume_create)
|
||||
|
||||
consistencygroup = utils.create_consistencygroup(self.ctxt)
|
||||
consistencygroup = utils.create_group(
|
||||
self.ctxt, group_type_id=fake.GROUP_TYPE_ID,
|
||||
volume_type_ids=[fake.VOLUME_TYPE_ID],)
|
||||
volume_id = utils.create_volume(
|
||||
self.ctxt,
|
||||
consistencygroup_id=consistencygroup.id)['id']
|
||||
cgsnapshot = utils.create_cgsnapshot(
|
||||
self.ctxt, consistencygroup_id=consistencygroup.id)
|
||||
volume_type_id=fake.VOLUME_TYPE_ID,
|
||||
group_id=consistencygroup.id)['id']
|
||||
cgsnapshot = utils.create_group_snapshot(
|
||||
self.ctxt, group_id=consistencygroup.id,
|
||||
group_type_id=fake.GROUP_TYPE_ID)
|
||||
snapshot = utils.create_snapshot(
|
||||
self.ctxt,
|
||||
volume_id,
|
||||
cgsnapshot_id=cgsnapshot.id,
|
||||
group_snapshot_id=cgsnapshot.id,
|
||||
status=fields.SnapshotStatus.AVAILABLE)
|
||||
|
||||
test_cg_name = 'test cg'
|
||||
@ -1243,7 +1168,7 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
|
||||
self.assertEqual(test_cg_name, res_dict['consistencygroup']['name'])
|
||||
self.assertTrue(mock_validate.called)
|
||||
|
||||
cg_ref = objects.ConsistencyGroup.get_by_id(
|
||||
cg_ref = objects.Group.get_by_id(
|
||||
self.ctxt.elevated(), res_dict['consistencygroup']['id'])
|
||||
|
||||
cg_ref.destroy()
|
||||
@ -1252,14 +1177,16 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
|
||||
consistencygroup.destroy()
|
||||
cgsnapshot.destroy()
|
||||
|
||||
@mock.patch('cinder.quota.QuotaEngine.limit_check')
|
||||
def test_create_consistencygroup_from_src_cg(self, mock_quota):
|
||||
def test_create_consistencygroup_from_src_cg(self):
|
||||
self.mock_object(volume_api.API, "create", v2_fakes.fake_volume_create)
|
||||
|
||||
source_cg = utils.create_consistencygroup(self.ctxt)
|
||||
source_cg = utils.create_group(
|
||||
self.ctxt, group_type_id=fake.GROUP_TYPE_ID,
|
||||
volume_type_ids=[fake.VOLUME_TYPE_ID],)
|
||||
|
||||
volume_id = utils.create_volume(
|
||||
self.ctxt,
|
||||
consistencygroup_id=source_cg.id)['id']
|
||||
group_id=source_cg.id)['id']
|
||||
|
||||
test_cg_name = 'test cg'
|
||||
body = {"consistencygroup-from-src": {"name": test_cg_name,
|
||||
@ -1279,7 +1206,7 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
|
||||
self.assertIn('id', res_dict['consistencygroup'])
|
||||
self.assertEqual(test_cg_name, res_dict['consistencygroup']['name'])
|
||||
|
||||
cg = objects.ConsistencyGroup.get_by_id(
|
||||
cg = objects.Group.get_by_id(
|
||||
self.ctxt, res_dict['consistencygroup']['id'])
|
||||
cg.destroy()
|
||||
db.volume_destroy(self.ctxt.elevated(), volume_id)
|
||||
@ -1288,17 +1215,21 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
|
||||
def test_create_consistencygroup_from_src_both_snap_cg(self):
|
||||
self.mock_object(volume_api.API, "create", v2_fakes.fake_volume_create)
|
||||
|
||||
consistencygroup = utils.create_consistencygroup(self.ctxt)
|
||||
consistencygroup = utils.create_group(
|
||||
self.ctxt, group_type_id=fake.GROUP_TYPE_ID,
|
||||
volume_type_ids=[fake.VOLUME_TYPE_ID],)
|
||||
|
||||
volume_id = utils.create_volume(
|
||||
self.ctxt,
|
||||
consistencygroup_id=consistencygroup.id)['id']
|
||||
cgsnapshot_id = utils.create_cgsnapshot(
|
||||
group_id=consistencygroup.id)['id']
|
||||
cgsnapshot_id = utils.create_group_snapshot(
|
||||
self.ctxt,
|
||||
consistencygroup_id=consistencygroup.id)['id']
|
||||
group_type_id=fake.GROUP_TYPE_ID,
|
||||
group_id=consistencygroup.id)['id']
|
||||
snapshot = utils.create_snapshot(
|
||||
self.ctxt,
|
||||
volume_id,
|
||||
cgsnapshot_id=cgsnapshot_id,
|
||||
group_snapshot_id=cgsnapshot_id,
|
||||
status=fields.SnapshotStatus.AVAILABLE)
|
||||
|
||||
test_cg_name = 'test cg'
|
||||
@ -1367,16 +1298,19 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
|
||||
self.assertIsNotNone(res_dict['badRequest']['message'])
|
||||
|
||||
def test_create_consistencygroup_from_src_no_host(self):
|
||||
consistencygroup = utils.create_consistencygroup(self.ctxt, host=None)
|
||||
consistencygroup = utils.create_group(
|
||||
self.ctxt, group_type_id=fake.GROUP_TYPE_ID,
|
||||
volume_type_ids=[fake.VOLUME_TYPE_ID], host=None)
|
||||
volume_id = utils.create_volume(
|
||||
self.ctxt,
|
||||
consistencygroup_id=consistencygroup.id)['id']
|
||||
cgsnapshot = utils.create_cgsnapshot(
|
||||
self.ctxt, consistencygroup_id=consistencygroup.id)
|
||||
group_id=consistencygroup.id)['id']
|
||||
cgsnapshot = utils.create_group_snapshot(
|
||||
self.ctxt, group_id=consistencygroup.id,
|
||||
group_type_id=fake.GROUP_TYPE_ID,)
|
||||
snapshot = utils.create_snapshot(
|
||||
self.ctxt,
|
||||
volume_id,
|
||||
cgsnapshot_id=cgsnapshot.id,
|
||||
group_snapshot_id=cgsnapshot.id,
|
||||
status=fields.SnapshotStatus.AVAILABLE)
|
||||
|
||||
test_cg_name = 'test cg'
|
||||
@ -1396,8 +1330,7 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
|
||||
self.assertEqual(http_client.BAD_REQUEST, res.status_int)
|
||||
self.assertEqual(http_client.BAD_REQUEST,
|
||||
res_dict['badRequest']['code'])
|
||||
msg = _('Invalid ConsistencyGroup: No host to create consistency '
|
||||
'group')
|
||||
msg = _('Invalid Group: No host to create group')
|
||||
self.assertIn(msg, res_dict['badRequest']['message'])
|
||||
|
||||
snapshot.destroy()
|
||||
@ -1406,13 +1339,15 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
|
||||
cgsnapshot.destroy()
|
||||
|
||||
def test_create_consistencygroup_from_src_cgsnapshot_empty(self):
|
||||
consistencygroup = utils.create_consistencygroup(self.ctxt)
|
||||
consistencygroup = utils.create_group(
|
||||
self.ctxt, group_type_id=fake.GROUP_TYPE_ID,
|
||||
volume_type_ids=[fake.VOLUME_TYPE_ID],)
|
||||
volume_id = utils.create_volume(
|
||||
self.ctxt,
|
||||
consistencygroup_id=consistencygroup.id)['id']
|
||||
cgsnapshot = utils.create_cgsnapshot(
|
||||
self.ctxt,
|
||||
consistencygroup_id=consistencygroup.id)
|
||||
group_id=consistencygroup.id)['id']
|
||||
cgsnapshot = utils.create_group_snapshot(
|
||||
self.ctxt, group_id=consistencygroup.id,
|
||||
group_type_id=fake.GROUP_TYPE_ID,)
|
||||
|
||||
test_cg_name = 'test cg'
|
||||
body = {"consistencygroup-from-src": {"name": test_cg_name,
|
||||
@ -1438,7 +1373,9 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
|
||||
cgsnapshot.destroy()
|
||||
|
||||
def test_create_consistencygroup_from_src_source_cg_empty(self):
|
||||
source_cg = utils.create_consistencygroup(self.ctxt)
|
||||
source_cg = utils.create_group(
|
||||
self.ctxt, group_type_id=fake.GROUP_TYPE_ID,
|
||||
volume_type_ids=[fake.VOLUME_TYPE_ID],)
|
||||
|
||||
test_cg_name = 'test cg'
|
||||
body = {"consistencygroup-from-src": {"name": test_cg_name,
|
||||
@ -1462,10 +1399,12 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
|
||||
source_cg.destroy()
|
||||
|
||||
def test_create_consistencygroup_from_src_cgsnapshot_notfound(self):
|
||||
consistencygroup = utils.create_consistencygroup(self.ctxt)
|
||||
consistencygroup = utils.create_group(
|
||||
self.ctxt, group_type_id=fake.GROUP_TYPE_ID,
|
||||
volume_type_ids=[fake.VOLUME_TYPE_ID],)
|
||||
volume_id = utils.create_volume(
|
||||
self.ctxt,
|
||||
consistencygroup_id=consistencygroup.id)['id']
|
||||
group_id=consistencygroup.id)['id']
|
||||
|
||||
test_cg_name = 'test cg'
|
||||
body = {
|
||||
@ -1517,22 +1456,24 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
|
||||
res_dict['itemNotFound']['code'])
|
||||
self.assertIsNotNone(res_dict['itemNotFound']['message'])
|
||||
|
||||
@mock.patch('cinder.quota.QuotaEngine.limit_check')
|
||||
@mock.patch.object(volume_api.API, 'create',
|
||||
side_effect=exception.CinderException(
|
||||
'Create volume failed.'))
|
||||
def test_create_consistencygroup_from_src_cgsnapshot_create_volume_failed(
|
||||
self, mock_create, mock_quota):
|
||||
consistencygroup = utils.create_consistencygroup(self.ctxt)
|
||||
self, mock_create):
|
||||
consistencygroup = utils.create_group(
|
||||
self.ctxt, group_type_id=fake.GROUP_TYPE_ID,
|
||||
volume_type_ids=[fake.VOLUME_TYPE_ID],)
|
||||
volume_id = utils.create_volume(
|
||||
self.ctxt,
|
||||
consistencygroup_id=consistencygroup.id)['id']
|
||||
cgsnapshot = utils.create_cgsnapshot(
|
||||
self.ctxt, consistencygroup_id=consistencygroup.id)
|
||||
group_id=consistencygroup.id)['id']
|
||||
cgsnapshot = utils.create_group_snapshot(
|
||||
self.ctxt, group_id=consistencygroup.id,
|
||||
group_type_id=fake.GROUP_TYPE_ID,)
|
||||
snapshot = utils.create_snapshot(
|
||||
self.ctxt,
|
||||
volume_id,
|
||||
cgsnapshot_id=cgsnapshot.id,
|
||||
group_snapshot_id=cgsnapshot.id,
|
||||
status=fields.SnapshotStatus.AVAILABLE)
|
||||
|
||||
test_cg_name = 'test cg'
|
||||
@ -1560,16 +1501,17 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
|
||||
consistencygroup.destroy()
|
||||
cgsnapshot.destroy()
|
||||
|
||||
@mock.patch('cinder.quota.QuotaEngine.limit_check')
|
||||
@mock.patch.object(volume_api.API, 'create',
|
||||
side_effect=exception.CinderException(
|
||||
'Create volume failed.'))
|
||||
def test_create_consistencygroup_from_src_cg_create_volume_failed(
|
||||
self, mock_create, mock_quota):
|
||||
source_cg = utils.create_consistencygroup(self.ctxt)
|
||||
self, mock_create):
|
||||
source_cg = utils.create_group(
|
||||
self.ctxt, group_type_id=fake.GROUP_TYPE_ID,
|
||||
volume_type_ids=[fake.VOLUME_TYPE_ID],)
|
||||
volume_id = utils.create_volume(
|
||||
self.ctxt,
|
||||
consistencygroup_id=source_cg.id)['id']
|
||||
group_id=source_cg.id)['id']
|
||||
|
||||
test_cg_name = 'test cg'
|
||||
body = {"consistencygroup-from-src": {"name": test_cg_name,
|
||||
@ -1592,88 +1534,3 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
|
||||
|
||||
db.volume_destroy(self.ctxt.elevated(), volume_id)
|
||||
source_cg.destroy()
|
||||
|
||||
@mock.patch('cinder.quota.QuotaEngine.limit_check')
|
||||
def test_create_consistencygroup_from_src_cg_over_quota(self, mock_quota):
|
||||
self.mock_object(volume_api.API, "create", v2_fakes.fake_volume_create)
|
||||
|
||||
source_cg = utils.create_consistencygroup(self.ctxt)
|
||||
volume_id = utils.create_volume(
|
||||
self.ctxt,
|
||||
consistencygroup_id=source_cg.id)['id']
|
||||
|
||||
mock_quota.side_effect = exception.OverQuota(
|
||||
overs=10, quotas='volumes', usages={})
|
||||
|
||||
test_cg_name = 'test cg'
|
||||
body = {"consistencygroup-from-src": {"name": test_cg_name,
|
||||
"description":
|
||||
"Consistency Group 1",
|
||||
"source_cgid": source_cg.id}}
|
||||
req = webob.Request.blank('/v2/%s/consistencygroups/create_from_src' %
|
||||
fake.PROJECT_ID)
|
||||
req.method = 'POST'
|
||||
req.headers['Content-Type'] = 'application/json'
|
||||
req.body = jsonutils.dump_as_bytes(body)
|
||||
res = req.get_response(fakes.wsgi_app(
|
||||
fake_auth_context=self.user_ctxt))
|
||||
res_dict = jsonutils.loads(res.body)
|
||||
|
||||
self.assertEqual(http_client.BAD_REQUEST, res.status_int)
|
||||
self.assertIn('message', res_dict['badRequest'])
|
||||
|
||||
cg = objects.ConsistencyGroupList.get_all(self.ctxt)
|
||||
# The new cg has been deleted already.
|
||||
self.assertEqual(1, len(cg))
|
||||
|
||||
db.volume_destroy(self.ctxt.elevated(), volume_id)
|
||||
source_cg.destroy()
|
||||
|
||||
@mock.patch('cinder.quota.QuotaEngine.limit_check')
|
||||
@mock.patch(
|
||||
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
|
||||
def test_create_consistencygroup_from_src_cgsnapshot_over_quota(
|
||||
self, mock_validate, mock_quota):
|
||||
self.mock_object(volume_api.API, "create", v2_fakes.fake_volume_create)
|
||||
|
||||
consistencygroup = utils.create_consistencygroup(self.ctxt)
|
||||
volume_id = utils.create_volume(
|
||||
self.ctxt,
|
||||
consistencygroup_id=consistencygroup.id)['id']
|
||||
cgsnapshot = utils.create_cgsnapshot(
|
||||
self.ctxt, consistencygroup_id=consistencygroup.id)
|
||||
snapshot = utils.create_snapshot(
|
||||
self.ctxt,
|
||||
volume_id,
|
||||
cgsnapshot_id=cgsnapshot.id,
|
||||
status=fields.SnapshotStatus.AVAILABLE)
|
||||
|
||||
mock_quota.side_effect = exception.OverQuota(
|
||||
overs=10, quotas='volumes', usages={})
|
||||
|
||||
test_cg_name = 'test cg'
|
||||
body = {"consistencygroup-from-src": {"name": test_cg_name,
|
||||
"description":
|
||||
"Consistency Group 1",
|
||||
"cgsnapshot_id": cgsnapshot.id}}
|
||||
req = webob.Request.blank('/v2/%s/consistencygroups/create_from_src' %
|
||||
fake.PROJECT_ID)
|
||||
req.method = 'POST'
|
||||
req.headers['Content-Type'] = 'application/json'
|
||||
req.body = jsonutils.dump_as_bytes(body)
|
||||
res = req.get_response(fakes.wsgi_app(
|
||||
fake_auth_context=self.user_ctxt))
|
||||
res_dict = jsonutils.loads(res.body)
|
||||
|
||||
self.assertEqual(http_client.BAD_REQUEST, res.status_int)
|
||||
self.assertIn('message', res_dict['badRequest'])
|
||||
self.assertTrue(mock_validate.called)
|
||||
|
||||
cg = objects.ConsistencyGroupList.get_all(self.ctxt)
|
||||
# The new cg has been deleted already.
|
||||
self.assertEqual(1, len(cg))
|
||||
|
||||
snapshot.destroy()
|
||||
db.volume_destroy(self.ctxt.elevated(), volume_id)
|
||||
consistencygroup.destroy()
|
||||
cgsnapshot.destroy()
|
||||
|
@ -238,7 +238,7 @@ def fake_snapshot_get(self, context, snapshot_id):
|
||||
|
||||
|
||||
def fake_consistencygroup_get_notfound(self, context, cg_id):
|
||||
raise exc.ConsistencyGroupNotFound(consistencygroup_id=cg_id)
|
||||
raise exc.GroupNotFound(group_id=cg_id)
|
||||
|
||||
|
||||
def fake_volume_type_get(context, id, *args, **kwargs):
|
||||
|
@ -28,10 +28,10 @@ import webob
|
||||
from cinder.api import common
|
||||
from cinder.api import extensions
|
||||
from cinder.api.v2 import volumes
|
||||
from cinder import consistencygroup as consistencygroupAPI
|
||||
from cinder import context
|
||||
from cinder import db
|
||||
from cinder import exception
|
||||
from cinder import group as groupAPI
|
||||
from cinder import objects
|
||||
from cinder.objects import fields
|
||||
from cinder import test
|
||||
@ -222,6 +222,7 @@ class VolumeApiTest(test.TestCase):
|
||||
'snapshot': snapshot,
|
||||
'source_volume': source_volume,
|
||||
'source_replica': None,
|
||||
'group': None,
|
||||
'consistencygroup': None,
|
||||
'availability_zone': availability_zone,
|
||||
'scheduler_hints': None,
|
||||
@ -372,7 +373,7 @@ class VolumeApiTest(test.TestCase):
|
||||
get_volume.assert_called_once_with(self.controller.volume_api,
|
||||
context, source_replica)
|
||||
|
||||
@mock.patch.object(consistencygroupAPI.API, 'get', autospec=True)
|
||||
@mock.patch.object(groupAPI.API, 'get', autospec=True)
|
||||
def test_volume_creation_fails_with_invalid_consistency_group(self,
|
||||
get_cg):
|
||||
|
||||
@ -388,7 +389,7 @@ class VolumeApiTest(test.TestCase):
|
||||
self.controller.create, req, body)
|
||||
|
||||
context = req.environ['cinder.context']
|
||||
get_cg.assert_called_once_with(self.controller.consistencygroup_api,
|
||||
get_cg.assert_called_once_with(self.controller.group_api,
|
||||
context, consistencygroup_id)
|
||||
|
||||
def test_volume_creation_fails_with_bad_size(self):
|
||||
|
@ -16,7 +16,6 @@ import webob
|
||||
|
||||
from cinder.api.openstack import api_version_request as api_version
|
||||
from cinder.api.v3 import consistencygroups
|
||||
import cinder.consistencygroup
|
||||
from cinder import context
|
||||
from cinder import objects
|
||||
from cinder.objects import fields
|
||||
@ -31,7 +30,6 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(ConsistencyGroupsAPITestCase, self).setUp()
|
||||
self.cg_api = cinder.consistencygroup.API()
|
||||
self.ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID,
|
||||
auth_token=True,
|
||||
is_admin=True)
|
||||
@ -44,20 +42,22 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
|
||||
ctxt=None,
|
||||
name='test_consistencygroup',
|
||||
description='this is a test consistency group',
|
||||
volume_type_id=fake.VOLUME_TYPE_ID,
|
||||
group_type_id=fake.GROUP_TYPE_ID,
|
||||
volume_type_ids=[fake.VOLUME_TYPE_ID],
|
||||
availability_zone='az1',
|
||||
host='fakehost',
|
||||
status=fields.ConsistencyGroupStatus.CREATING,
|
||||
**kwargs):
|
||||
"""Create a consistency group object."""
|
||||
ctxt = ctxt or self.ctxt
|
||||
consistencygroup = objects.ConsistencyGroup(ctxt)
|
||||
consistencygroup = objects.Group(ctxt)
|
||||
consistencygroup.user_id = fake.USER_ID
|
||||
consistencygroup.project_id = fake.PROJECT_ID
|
||||
consistencygroup.availability_zone = availability_zone
|
||||
consistencygroup.name = name
|
||||
consistencygroup.description = description
|
||||
consistencygroup.volume_type_id = volume_type_id
|
||||
consistencygroup.group_type_id = group_type_id
|
||||
consistencygroup.volume_type_ids = volume_type_ids
|
||||
consistencygroup.host = host
|
||||
consistencygroup.status = status
|
||||
consistencygroup.update(kwargs)
|
||||
@ -81,7 +81,7 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
|
||||
res_dict = self.controller.update(req,
|
||||
consistencygroup.id,
|
||||
body)
|
||||
consistencygroup = objects.ConsistencyGroup.get_by_id(
|
||||
consistencygroup = objects.Group.get_by_id(
|
||||
self.ctxt, consistencygroup.id)
|
||||
self.assertEqual(202, res_dict.status_int)
|
||||
self.assertEqual("", consistencygroup.name)
|
||||
|
@ -1,778 +0,0 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import ddt
|
||||
import mock
|
||||
from oslo_config import cfg
|
||||
|
||||
import cinder.consistencygroup
|
||||
from cinder import context
|
||||
from cinder import db
|
||||
from cinder import exception
|
||||
from cinder import objects
|
||||
from cinder.objects import fields
|
||||
from cinder import quota
|
||||
from cinder.tests.unit import conf_fixture
|
||||
from cinder.tests.unit import fake_constants as fake
|
||||
from cinder.tests.unit import fake_snapshot
|
||||
from cinder.tests.unit import fake_volume
|
||||
from cinder.tests.unit import utils as tests_utils
|
||||
from cinder.tests.unit import volume as base
|
||||
import cinder.volume
|
||||
from cinder.volume import driver
|
||||
from cinder.volume import utils as volutils
|
||||
|
||||
CGQUOTAS = quota.CGQUOTAS
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
@ddt.ddt
|
||||
class ConsistencyGroupTestCase(base.BaseVolumeTestCase):
|
||||
def test_delete_volume_in_consistency_group(self):
|
||||
"""Test deleting a volume that's tied to a consistency group fails."""
|
||||
consistencygroup_id = fake.CONSISTENCY_GROUP_ID
|
||||
volume_api = cinder.volume.api.API()
|
||||
self.volume_params.update({'status': 'available',
|
||||
'consistencygroup_id': consistencygroup_id})
|
||||
volume = tests_utils.create_volume(self.context, **self.volume_params)
|
||||
self.assertRaises(exception.InvalidVolume,
|
||||
volume_api.delete, self.context, volume)
|
||||
|
||||
@mock.patch.object(CGQUOTAS, "reserve",
|
||||
return_value=["RESERVATION"])
|
||||
@mock.patch.object(CGQUOTAS, "commit")
|
||||
@mock.patch.object(CGQUOTAS, "rollback")
|
||||
@mock.patch.object(driver.VolumeDriver,
|
||||
"delete_consistencygroup",
|
||||
return_value=({'status': (
|
||||
fields.ConsistencyGroupStatus.DELETED)}, []))
|
||||
def test_create_delete_consistencygroup(self, fake_delete_cg,
|
||||
fake_rollback,
|
||||
fake_commit, fake_reserve):
|
||||
"""Test consistencygroup can be created and deleted."""
|
||||
|
||||
def fake_driver_create_cg(context, group):
|
||||
"""Make sure that the pool is part of the host."""
|
||||
self.assertIn('host', group)
|
||||
host = group.host
|
||||
pool = volutils.extract_host(host, level='pool')
|
||||
self.assertEqual('fakepool', pool)
|
||||
return {'status': 'available'}
|
||||
|
||||
self.mock_object(self.volume.driver, 'create_consistencygroup',
|
||||
fake_driver_create_cg)
|
||||
|
||||
group = tests_utils.create_consistencygroup(
|
||||
self.context,
|
||||
availability_zone=CONF.storage_availability_zone,
|
||||
volume_type='type1,type2',
|
||||
host='fakehost@fakedrv#fakepool')
|
||||
group = objects.ConsistencyGroup.get_by_id(self.context, group.id)
|
||||
self.assertEqual(0, len(self.notifier.notifications),
|
||||
self.notifier.notifications)
|
||||
self.volume.create_consistencygroup(self.context, group)
|
||||
self.assertEqual(2, len(self.notifier.notifications),
|
||||
self.notifier.notifications)
|
||||
msg = self.notifier.notifications[0]
|
||||
self.assertEqual('consistencygroup.create.start', msg['event_type'])
|
||||
expected = {
|
||||
'status': fields.ConsistencyGroupStatus.AVAILABLE,
|
||||
'name': 'test_cg',
|
||||
'availability_zone': 'nova',
|
||||
'tenant_id': self.context.project_id,
|
||||
'created_at': mock.ANY,
|
||||
'user_id': fake.USER_ID,
|
||||
'consistencygroup_id': group.id
|
||||
}
|
||||
self.assertDictEqual(expected, msg['payload'])
|
||||
msg = self.notifier.notifications[1]
|
||||
self.assertEqual('consistencygroup.create.end', msg['event_type'])
|
||||
self.assertDictEqual(expected, msg['payload'])
|
||||
self.assertEqual(
|
||||
group.id,
|
||||
objects.ConsistencyGroup.get_by_id(context.get_admin_context(),
|
||||
group.id).id)
|
||||
|
||||
self.volume.delete_consistencygroup(self.context, group)
|
||||
cg = objects.ConsistencyGroup.get_by_id(
|
||||
context.get_admin_context(read_deleted='yes'), group.id)
|
||||
self.assertEqual(fields.ConsistencyGroupStatus.DELETED, cg.status)
|
||||
self.assertEqual(4, len(self.notifier.notifications),
|
||||
self.notifier.notifications)
|
||||
msg = self.notifier.notifications[2]
|
||||
self.assertEqual('consistencygroup.delete.start', msg['event_type'])
|
||||
self.assertDictEqual(expected, msg['payload'])
|
||||
msg = self.notifier.notifications[3]
|
||||
self.assertEqual('consistencygroup.delete.end', msg['event_type'])
|
||||
expected['status'] = fields.ConsistencyGroupStatus.DELETED
|
||||
self.assertDictEqual(expected, msg['payload'])
|
||||
self.assertRaises(exception.NotFound,
|
||||
objects.ConsistencyGroup.get_by_id,
|
||||
self.context,
|
||||
group.id)
|
||||
|
||||
@mock.patch.object(CGQUOTAS, "reserve",
|
||||
return_value=["RESERVATION"])
|
||||
@mock.patch.object(CGQUOTAS, "commit")
|
||||
@mock.patch.object(CGQUOTAS, "rollback")
|
||||
@mock.patch.object(driver.VolumeDriver,
|
||||
"create_consistencygroup",
|
||||
return_value={'status': 'available'})
|
||||
@mock.patch.object(driver.VolumeDriver,
|
||||
"update_consistencygroup")
|
||||
def test_update_consistencygroup(self, fake_update_cg,
|
||||
fake_create_cg, fake_rollback,
|
||||
fake_commit, fake_reserve):
|
||||
"""Test consistencygroup can be updated."""
|
||||
group = tests_utils.create_consistencygroup(
|
||||
self.context,
|
||||
availability_zone=CONF.storage_availability_zone,
|
||||
volume_type='type1,type2')
|
||||
self.volume.create_consistencygroup(self.context, group)
|
||||
|
||||
volume = tests_utils.create_volume(
|
||||
self.context,
|
||||
consistencygroup_id=group.id,
|
||||
**self.volume_params)
|
||||
self.volume.create_volume(self.context, volume)
|
||||
|
||||
volume2 = tests_utils.create_volume(
|
||||
self.context,
|
||||
consistencygroup_id=None,
|
||||
**self.volume_params)
|
||||
self.volume.create_volume(self.context, volume2)
|
||||
|
||||
fake_update_cg.return_value = (
|
||||
{'status': fields.ConsistencyGroupStatus.AVAILABLE},
|
||||
[{'id': volume2.id, 'status': 'available'}],
|
||||
[{'id': volume.id, 'status': 'available'}])
|
||||
|
||||
self.volume.update_consistencygroup(self.context, group,
|
||||
add_volumes=volume2.id,
|
||||
remove_volumes=volume.id)
|
||||
cg = objects.ConsistencyGroup.get_by_id(self.context, group.id)
|
||||
expected = {
|
||||
'status': fields.ConsistencyGroupStatus.AVAILABLE,
|
||||
'name': 'test_cg',
|
||||
'availability_zone': 'nova',
|
||||
'tenant_id': self.context.project_id,
|
||||
'created_at': mock.ANY,
|
||||
'user_id': fake.USER_ID,
|
||||
'consistencygroup_id': group.id
|
||||
}
|
||||
self.assertEqual(fields.ConsistencyGroupStatus.AVAILABLE, cg.status)
|
||||
self.assertEqual(10, len(self.notifier.notifications),
|
||||
self.notifier.notifications)
|
||||
msg = self.notifier.notifications[6]
|
||||
self.assertEqual('consistencygroup.update.start', msg['event_type'])
|
||||
self.assertDictEqual(expected, msg['payload'])
|
||||
msg = self.notifier.notifications[8]
|
||||
self.assertEqual('consistencygroup.update.end', msg['event_type'])
|
||||
self.assertDictEqual(expected, msg['payload'])
|
||||
cgvolumes = db.volume_get_all_by_group(self.context, group.id)
|
||||
cgvol_ids = [cgvol['id'] for cgvol in cgvolumes]
|
||||
# Verify volume is removed.
|
||||
self.assertNotIn(volume.id, cgvol_ids)
|
||||
# Verify volume is added.
|
||||
self.assertIn(volume2.id, cgvol_ids)
|
||||
|
||||
self.volume_params['status'] = 'wrong-status'
|
||||
volume3 = tests_utils.create_volume(
|
||||
self.context,
|
||||
consistencygroup_id=None,
|
||||
**self.volume_params)
|
||||
volume_id3 = volume3['id']
|
||||
|
||||
volume_get_orig = self.volume.db.volume_get
|
||||
self.volume.db.volume_get = mock.Mock(
|
||||
return_value={'status': 'wrong_status',
|
||||
'id': volume_id3})
|
||||
# Try to add a volume in wrong status
|
||||
self.assertRaises(exception.InvalidVolume,
|
||||
self.volume.update_consistencygroup,
|
||||
self.context,
|
||||
group,
|
||||
add_volumes=volume_id3,
|
||||
remove_volumes=None)
|
||||
self.volume.db.volume_get.reset_mock()
|
||||
self.volume.db.volume_get = volume_get_orig
|
||||
|
||||
def test_update_consistencygroup_volume_not_found(self):
|
||||
group = tests_utils.create_consistencygroup(
|
||||
self.context,
|
||||
availability_zone=CONF.storage_availability_zone,
|
||||
volume_type='type1,type2')
|
||||
self.assertRaises(exception.VolumeNotFound,
|
||||
self.volume.update_consistencygroup,
|
||||
self.context,
|
||||
group,
|
||||
fake.VOLUME_ID)
|
||||
self.assertRaises(exception.VolumeNotFound,
|
||||
self.volume.update_consistencygroup,
|
||||
self.context,
|
||||
group,
|
||||
None,
|
||||
fake.VOLUME_ID)
|
||||
|
||||
@mock.patch.object(driver.VolumeDriver,
|
||||
"create_consistencygroup",
|
||||
return_value={'status': 'available'})
|
||||
@mock.patch.object(driver.VolumeDriver,
|
||||
"delete_consistencygroup",
|
||||
return_value=({'status': 'deleted'}, []))
|
||||
@mock.patch.object(driver.VolumeDriver,
|
||||
"create_cgsnapshot",
|
||||
return_value={'status': 'available'})
|
||||
@mock.patch.object(driver.VolumeDriver,
|
||||
"delete_cgsnapshot",
|
||||
return_value=({'status': 'deleted'}, []))
|
||||
@mock.patch.object(driver.VolumeDriver,
|
||||
"create_consistencygroup_from_src",
|
||||
return_value=(None, None))
|
||||
@mock.patch('cinder.volume.drivers.lvm.LVMVolumeDriver.'
|
||||
'create_volume_from_snapshot')
|
||||
@mock.patch('cinder.volume.drivers.lvm.LVMVolumeDriver.'
|
||||
'create_cloned_volume')
|
||||
def test_create_consistencygroup_from_src(self,
|
||||
mock_create_cloned_vol,
|
||||
mock_create_vol_from_snap,
|
||||
mock_create_from_src,
|
||||
mock_delete_cgsnap,
|
||||
mock_create_cgsnap,
|
||||
mock_delete_cg,
|
||||
mock_create_cg):
|
||||
"""Test consistencygroup can be created and deleted."""
|
||||
group = tests_utils.create_consistencygroup(
|
||||
self.context,
|
||||
availability_zone=CONF.storage_availability_zone,
|
||||
volume_type='type1,type2',
|
||||
status=fields.ConsistencyGroupStatus.AVAILABLE)
|
||||
volume = tests_utils.create_volume(
|
||||
self.context,
|
||||
consistencygroup_id=group.id,
|
||||
status='available',
|
||||
host=CONF.host,
|
||||
size=1)
|
||||
volume_id = volume['id']
|
||||
cgsnapshot_returns = self._create_cgsnapshot(group.id, [volume_id])
|
||||
cgsnapshot = cgsnapshot_returns[0]
|
||||
snapshot_id = cgsnapshot_returns[1][0]['id']
|
||||
|
||||
# Create CG from source CG snapshot.
|
||||
group2 = tests_utils.create_consistencygroup(
|
||||
self.context,
|
||||
availability_zone=CONF.storage_availability_zone,
|
||||
volume_type='type1,type2',
|
||||
cgsnapshot_id=cgsnapshot.id)
|
||||
group2 = objects.ConsistencyGroup.get_by_id(self.context, group2.id)
|
||||
volume2 = tests_utils.create_volume(
|
||||
self.context,
|
||||
consistencygroup_id=group2.id,
|
||||
snapshot_id=snapshot_id,
|
||||
**self.volume_params)
|
||||
self.volume.create_volume(self.context, volume2)
|
||||
self.volume.create_consistencygroup_from_src(
|
||||
self.context, group2, cgsnapshot=cgsnapshot)
|
||||
cg2 = objects.ConsistencyGroup.get_by_id(self.context, group2.id)
|
||||
expected = {
|
||||
'status': fields.ConsistencyGroupStatus.AVAILABLE,
|
||||
'name': 'test_cg',
|
||||
'availability_zone': 'nova',
|
||||
'tenant_id': self.context.project_id,
|
||||
'created_at': mock.ANY,
|
||||
'user_id': fake.USER_ID,
|
||||
'consistencygroup_id': group2.id,
|
||||
}
|
||||
self.assertEqual(fields.ConsistencyGroupStatus.AVAILABLE, cg2.status)
|
||||
self.assertEqual(group2.id, cg2['id'])
|
||||
self.assertEqual(cgsnapshot.id, cg2['cgsnapshot_id'])
|
||||
self.assertIsNone(cg2['source_cgid'])
|
||||
|
||||
msg = self.notifier.notifications[2]
|
||||
self.assertEqual('consistencygroup.create.start', msg['event_type'])
|
||||
self.assertDictEqual(expected, msg['payload'])
|
||||
msg = self.notifier.notifications[4]
|
||||
self.assertEqual('consistencygroup.create.end', msg['event_type'])
|
||||
self.assertDictEqual(expected, msg['payload'])
|
||||
|
||||
if len(self.notifier.notifications) > 6:
|
||||
self.assertFalse(self.notifier.notifications[6],
|
||||
self.notifier.notifications)
|
||||
self.assertEqual(6, len(self.notifier.notifications),
|
||||
self.notifier.notifications)
|
||||
|
||||
self.volume.delete_consistencygroup(self.context, group2)
|
||||
|
||||
if len(self.notifier.notifications) > 10:
|
||||
self.assertFalse(self.notifier.notifications[10],
|
||||
self.notifier.notifications)
|
||||
self.assertEqual(10, len(self.notifier.notifications),
|
||||
self.notifier.notifications)
|
||||
|
||||
msg = self.notifier.notifications[6]
|
||||
self.assertEqual('consistencygroup.delete.start', msg['event_type'])
|
||||
expected['status'] = fields.ConsistencyGroupStatus.AVAILABLE
|
||||
self.assertDictEqual(expected, msg['payload'])
|
||||
msg = self.notifier.notifications[8]
|
||||
self.assertEqual('consistencygroup.delete.end', msg['event_type'])
|
||||
expected['status'] = fields.ConsistencyGroupStatus.DELETED
|
||||
self.assertDictEqual(expected, msg['payload'])
|
||||
|
||||
cg2 = objects.ConsistencyGroup.get_by_id(
|
||||
context.get_admin_context(read_deleted='yes'), group2.id)
|
||||
self.assertEqual(fields.ConsistencyGroupStatus.DELETED, cg2.status)
|
||||
self.assertRaises(exception.NotFound,
|
||||
objects.ConsistencyGroup.get_by_id,
|
||||
self.context,
|
||||
group2.id)
|
||||
|
||||
# Create CG from source CG.
|
||||
group3 = tests_utils.create_consistencygroup(
|
||||
self.context,
|
||||
availability_zone=CONF.storage_availability_zone,
|
||||
volume_type='type1,type2',
|
||||
source_cgid=group.id)
|
||||
volume3 = tests_utils.create_volume(
|
||||
self.context,
|
||||
consistencygroup_id=group3.id,
|
||||
source_volid=volume_id,
|
||||
**self.volume_params)
|
||||
self.volume.create_volume(self.context, volume3)
|
||||
self.volume.create_consistencygroup_from_src(
|
||||
self.context, group3, source_cg=group)
|
||||
|
||||
cg3 = objects.ConsistencyGroup.get_by_id(self.context, group3.id)
|
||||
|
||||
self.assertEqual(fields.ConsistencyGroupStatus.AVAILABLE, cg3.status)
|
||||
self.assertEqual(group3.id, cg3.id)
|
||||
self.assertEqual(group.id, cg3.source_cgid)
|
||||
self.assertIsNone(cg3.cgsnapshot_id)
|
||||
|
||||
self.volume.delete_cgsnapshot(self.context, cgsnapshot)
|
||||
|
||||
self.volume.delete_consistencygroup(self.context, group)
|
||||
|
||||
def test_create_consistencygroup_from_src_frozen(self):
|
||||
service = tests_utils.create_service(self.context, {'frozen': True})
|
||||
cg = tests_utils.create_consistencygroup(self.context,
|
||||
host=service.host)
|
||||
cg_api = cinder.consistencygroup.api.API()
|
||||
self.assertRaises(exception.InvalidInput,
|
||||
cg_api.create_from_src,
|
||||
self.context, 'cg', 'desc', cgsnapshot_id=None,
|
||||
source_cgid=cg.id)
|
||||
|
||||
def test_delete_consistencygroup_frozen(self):
|
||||
service = tests_utils.create_service(self.context, {'frozen': True})
|
||||
cg = tests_utils.create_consistencygroup(self.context,
|
||||
host=service.host)
|
||||
cg_api = cinder.consistencygroup.api.API()
|
||||
self.assertRaises(exception.InvalidInput,
|
||||
cg_api.delete, self.context, cg)
|
||||
|
||||
def test_create_cgsnapshot_frozen(self):
|
||||
service = tests_utils.create_service(self.context, {'frozen': True})
|
||||
cg = tests_utils.create_consistencygroup(self.context,
|
||||
host=service.host)
|
||||
cg_api = cinder.consistencygroup.api.API()
|
||||
self.assertRaises(exception.InvalidInput,
|
||||
cg_api.create_cgsnapshot,
|
||||
self.context, cg, 'cg', 'desc')
|
||||
|
||||
def test_delete_cgsnapshot_frozen(self):
|
||||
service = tests_utils.create_service(self.context, {'frozen': True})
|
||||
cg = tests_utils.create_consistencygroup(self.context,
|
||||
host=service.host)
|
||||
cgsnap = tests_utils.create_cgsnapshot(self.context, cg.id)
|
||||
cg_api = cinder.consistencygroup.api.API()
|
||||
self.assertRaises(exception.InvalidInput,
|
||||
cg_api.delete_cgsnapshot,
|
||||
self.context, cgsnap)
|
||||
|
||||
def test_sort_snapshots(self):
|
||||
vol1 = {'id': fake.VOLUME_ID, 'name': 'volume 1',
|
||||
'snapshot_id': fake.SNAPSHOT_ID,
|
||||
'consistencygroup_id': fake.CONSISTENCY_GROUP_ID}
|
||||
vol2 = {'id': fake.VOLUME2_ID, 'name': 'volume 2',
|
||||
'snapshot_id': fake.SNAPSHOT2_ID,
|
||||
'consistencygroup_id': fake.CONSISTENCY_GROUP_ID}
|
||||
vol3 = {'id': fake.VOLUME3_ID, 'name': 'volume 3',
|
||||
'snapshot_id': fake.SNAPSHOT3_ID,
|
||||
'consistencygroup_id': fake.CONSISTENCY_GROUP_ID}
|
||||
snp1 = {'id': fake.SNAPSHOT_ID, 'name': 'snap 1',
|
||||
'cgsnapshot_id': fake.CONSISTENCY_GROUP_ID}
|
||||
snp2 = {'id': fake.SNAPSHOT2_ID, 'name': 'snap 2',
|
||||
'cgsnapshot_id': fake.CONSISTENCY_GROUP_ID}
|
||||
snp3 = {'id': fake.SNAPSHOT3_ID, 'name': 'snap 3',
|
||||
'cgsnapshot_id': fake.CONSISTENCY_GROUP_ID}
|
||||
snp1_obj = fake_snapshot.fake_snapshot_obj(self.context, **snp1)
|
||||
snp2_obj = fake_snapshot.fake_snapshot_obj(self.context, **snp2)
|
||||
snp3_obj = fake_snapshot.fake_snapshot_obj(self.context, **snp3)
|
||||
volumes = []
|
||||
snapshots = []
|
||||
volumes.append(vol1)
|
||||
volumes.append(vol2)
|
||||
volumes.append(vol3)
|
||||
snapshots.append(snp2_obj)
|
||||
snapshots.append(snp3_obj)
|
||||
snapshots.append(snp1_obj)
|
||||
i = 0
|
||||
for vol in volumes:
|
||||
snap = snapshots[i]
|
||||
i += 1
|
||||
self.assertNotEqual(vol['snapshot_id'], snap.id)
|
||||
sorted_snaps = self.volume._sort_snapshots(volumes, snapshots)
|
||||
i = 0
|
||||
for vol in volumes:
|
||||
snap = sorted_snaps[i]
|
||||
i += 1
|
||||
self.assertEqual(vol['snapshot_id'], snap.id)
|
||||
|
||||
snapshots[2]['id'] = fake.WILL_NOT_BE_FOUND_ID
|
||||
self.assertRaises(exception.SnapshotNotFound,
|
||||
self.volume._sort_snapshots,
|
||||
volumes, snapshots)
|
||||
|
||||
self.assertRaises(exception.InvalidInput,
|
||||
self.volume._sort_snapshots,
|
||||
volumes, [])
|
||||
|
||||
def test_sort_source_vols(self):
|
||||
vol1 = {'id': '1', 'name': 'volume 1',
|
||||
'source_volid': '1',
|
||||
'consistencygroup_id': '2'}
|
||||
vol2 = {'id': '2', 'name': 'volume 2',
|
||||
'source_volid': '2',
|
||||
'consistencygroup_id': '2'}
|
||||
vol3 = {'id': '3', 'name': 'volume 3',
|
||||
'source_volid': '3',
|
||||
'consistencygroup_id': '2'}
|
||||
src_vol1 = {'id': '1', 'name': 'source vol 1',
|
||||
'consistencygroup_id': '1'}
|
||||
src_vol2 = {'id': '2', 'name': 'source vol 2',
|
||||
'consistencygroup_id': '1'}
|
||||
src_vol3 = {'id': '3', 'name': 'source vol 3',
|
||||
'consistencygroup_id': '1'}
|
||||
volumes = []
|
||||
src_vols = []
|
||||
volumes.append(vol1)
|
||||
volumes.append(vol2)
|
||||
volumes.append(vol3)
|
||||
src_vols.append(src_vol2)
|
||||
src_vols.append(src_vol3)
|
||||
src_vols.append(src_vol1)
|
||||
i = 0
|
||||
for vol in volumes:
|
||||
src_vol = src_vols[i]
|
||||
i += 1
|
||||
self.assertNotEqual(vol['source_volid'], src_vol['id'])
|
||||
sorted_src_vols = self.volume._sort_source_vols(volumes, src_vols)
|
||||
i = 0
|
||||
for vol in volumes:
|
||||
src_vol = sorted_src_vols[i]
|
||||
i += 1
|
||||
self.assertEqual(vol['source_volid'], src_vol['id'])
|
||||
|
||||
src_vols[2]['id'] = '9999'
|
||||
self.assertRaises(exception.VolumeNotFound,
|
||||
self.volume._sort_source_vols,
|
||||
volumes, src_vols)
|
||||
|
||||
self.assertRaises(exception.InvalidInput,
|
||||
self.volume._sort_source_vols,
|
||||
volumes, [])
|
||||
|
||||
def _create_cgsnapshot(self, group_id, volume_ids, size='0'):
|
||||
"""Create a cgsnapshot object."""
|
||||
cgsnap = objects.CGSnapshot(self.context)
|
||||
cgsnap.user_id = fake.USER_ID
|
||||
cgsnap.project_id = fake.PROJECT_ID
|
||||
cgsnap.consistencygroup_id = group_id
|
||||
cgsnap.status = "creating"
|
||||
cgsnap.create()
|
||||
|
||||
# Create snapshot list
|
||||
for volume_id in volume_ids:
|
||||
snaps = []
|
||||
snap = objects.Snapshot(context.get_admin_context())
|
||||
snap.volume_size = size
|
||||
snap.user_id = fake.USER_ID
|
||||
snap.project_id = fake.PROJECT_ID
|
||||
snap.volume_id = volume_id
|
||||
snap.status = fields.SnapshotStatus.AVAILABLE
|
||||
snap.cgsnapshot_id = cgsnap.id
|
||||
snap.create()
|
||||
snaps.append(snap)
|
||||
|
||||
return cgsnap, snaps
|
||||
|
||||
@ddt.data((CONF.host, None), (CONF.host + 'fake', 'mycluster'))
|
||||
@ddt.unpack
|
||||
@mock.patch('cinder.tests.unit.fake_notifier.FakeNotifier._notify')
|
||||
@mock.patch('cinder.volume.driver.VolumeDriver.create_consistencygroup',
|
||||
autospec=True,
|
||||
return_value={'status': 'available'})
|
||||
@mock.patch('cinder.volume.driver.VolumeDriver.delete_consistencygroup',
|
||||
autospec=True,
|
||||
return_value=({'status': 'deleted'}, []))
|
||||
@mock.patch('cinder.volume.driver.VolumeDriver.create_cgsnapshot',
|
||||
autospec=True,
|
||||
return_value=({'status': 'available'}, []))
|
||||
@mock.patch('cinder.volume.driver.VolumeDriver.delete_cgsnapshot',
|
||||
autospec=True,
|
||||
return_value=({'status': 'deleted'}, []))
|
||||
def test_create_delete_cgsnapshot(self, host, cluster,
|
||||
mock_del_cgsnap, mock_create_cgsnap,
|
||||
mock_del_cg, _mock_create_cg,
|
||||
mock_notify):
|
||||
"""Test cgsnapshot can be created and deleted."""
|
||||
|
||||
self.volume.cluster = cluster
|
||||
group = tests_utils.create_consistencygroup(
|
||||
self.context,
|
||||
host=host,
|
||||
cluster_name=cluster,
|
||||
availability_zone=CONF.storage_availability_zone,
|
||||
volume_type='type1,type2')
|
||||
self.volume_params['host'] = host
|
||||
volume = tests_utils.create_volume(
|
||||
self.context,
|
||||
cluster_name=cluster,
|
||||
consistencygroup_id=group.id,
|
||||
**self.volume_params)
|
||||
self.volume.create_volume(self.context, volume)
|
||||
|
||||
self.assert_notify_called(mock_notify,
|
||||
(['INFO', 'volume.create.start'],
|
||||
['INFO', 'volume.create.end']))
|
||||
|
||||
cgsnapshot_returns = self._create_cgsnapshot(group.id, [volume.id])
|
||||
cgsnapshot = cgsnapshot_returns[0]
|
||||
self.volume.create_cgsnapshot(self.context, cgsnapshot)
|
||||
self.assertEqual(cgsnapshot.id,
|
||||
objects.CGSnapshot.get_by_id(
|
||||
context.get_admin_context(),
|
||||
cgsnapshot.id).id)
|
||||
|
||||
self.assert_notify_called(mock_notify,
|
||||
(['INFO', 'volume.create.start'],
|
||||
['INFO', 'volume.create.end'],
|
||||
['INFO', 'cgsnapshot.create.start'],
|
||||
['INFO', 'snapshot.create.start'],
|
||||
['INFO', 'cgsnapshot.create.end'],
|
||||
['INFO', 'snapshot.create.end']))
|
||||
|
||||
self.volume.delete_cgsnapshot(self.context, cgsnapshot)
|
||||
|
||||
self.assert_notify_called(mock_notify,
|
||||
(['INFO', 'volume.create.start'],
|
||||
['INFO', 'volume.create.end'],
|
||||
['INFO', 'cgsnapshot.create.start'],
|
||||
['INFO', 'snapshot.create.start'],
|
||||
['INFO', 'cgsnapshot.create.end'],
|
||||
['INFO', 'snapshot.create.end'],
|
||||
['INFO', 'cgsnapshot.delete.start'],
|
||||
['INFO', 'snapshot.delete.start'],
|
||||
['INFO', 'cgsnapshot.delete.end'],
|
||||
['INFO', 'snapshot.delete.end']))
|
||||
|
||||
cgsnap = objects.CGSnapshot.get_by_id(
|
||||
context.get_admin_context(read_deleted='yes'),
|
||||
cgsnapshot.id)
|
||||
self.assertEqual('deleted', cgsnap.status)
|
||||
self.assertRaises(exception.NotFound,
|
||||
objects.CGSnapshot.get_by_id,
|
||||
self.context,
|
||||
cgsnapshot.id)
|
||||
|
||||
self.volume.delete_consistencygroup(self.context, group)
|
||||
|
||||
self.assertTrue(mock_create_cgsnap.called)
|
||||
self.assertTrue(mock_del_cgsnap.called)
|
||||
self.assertTrue(mock_del_cg.called)
|
||||
|
||||
@mock.patch('cinder.volume.driver.VolumeDriver.create_consistencygroup',
|
||||
return_value={'status': 'available'})
|
||||
@mock.patch('cinder.volume.driver.VolumeDriver.delete_consistencygroup',
|
||||
return_value=({'status': 'deleted'}, []))
|
||||
def test_delete_consistencygroup_correct_host(self,
|
||||
mock_del_cg,
|
||||
_mock_create_cg):
|
||||
"""Test consistencygroup can be deleted.
|
||||
|
||||
Test consistencygroup can be deleted when volumes are on
|
||||
the correct volume node.
|
||||
"""
|
||||
|
||||
group = tests_utils.create_consistencygroup(
|
||||
self.context,
|
||||
availability_zone=CONF.storage_availability_zone,
|
||||
volume_type='type1,type2')
|
||||
|
||||
volume = tests_utils.create_volume(
|
||||
self.context,
|
||||
consistencygroup_id=group.id,
|
||||
host='host1@backend1#pool1',
|
||||
status='creating',
|
||||
size=1)
|
||||
self.volume.host = 'host1@backend1'
|
||||
self.volume.create_volume(self.context, volume)
|
||||
|
||||
self.volume.delete_consistencygroup(self.context, group)
|
||||
cg = objects.ConsistencyGroup.get_by_id(
|
||||
context.get_admin_context(read_deleted='yes'),
|
||||
group.id)
|
||||
self.assertEqual(fields.ConsistencyGroupStatus.DELETED, cg.status)
|
||||
self.assertRaises(exception.NotFound,
|
||||
objects.ConsistencyGroup.get_by_id,
|
||||
self.context,
|
||||
group.id)
|
||||
|
||||
self.assertTrue(mock_del_cg.called)
|
||||
|
||||
@mock.patch('cinder.volume.driver.VolumeDriver.create_consistencygroup',
|
||||
mock.Mock(return_value={'status': 'available'}))
|
||||
@mock.patch('cinder.volume.driver.VolumeDriver.delete_consistencygroup',
|
||||
return_value=({'status': 'deleted'}, []))
|
||||
def test_delete_consistencygroup_cluster(self, mock_del_cg):
|
||||
"""Test consistencygroup can be deleted.
|
||||
|
||||
Test consistencygroup can be deleted when volumes are on
|
||||
the correct volume node.
|
||||
"""
|
||||
cluster_name = 'cluster@backend1'
|
||||
self.volume.host = 'host2@backend1'
|
||||
self.volume.cluster = cluster_name
|
||||
group = tests_utils.create_consistencygroup(
|
||||
self.context,
|
||||
host=CONF.host + 'fake',
|
||||
cluster_name=cluster_name,
|
||||
availability_zone=CONF.storage_availability_zone,
|
||||
volume_type='type1,type2')
|
||||
|
||||
volume = tests_utils.create_volume(
|
||||
self.context,
|
||||
consistencygroup_id=group.id,
|
||||
host='host1@backend1#pool1',
|
||||
cluster_name=cluster_name,
|
||||
status='creating',
|
||||
size=1)
|
||||
self.volume.create_volume(self.context, volume)
|
||||
|
||||
self.volume.delete_consistencygroup(self.context, group)
|
||||
cg = objects.ConsistencyGroup.get_by_id(
|
||||
context.get_admin_context(read_deleted='yes'),
|
||||
group.id)
|
||||
self.assertEqual(fields.ConsistencyGroupStatus.DELETED, cg.status)
|
||||
self.assertRaises(exception.NotFound,
|
||||
objects.ConsistencyGroup.get_by_id,
|
||||
self.context,
|
||||
group.id)
|
||||
|
||||
self.assertTrue(mock_del_cg.called)
|
||||
|
||||
@mock.patch('cinder.volume.driver.VolumeDriver.create_consistencygroup',
|
||||
return_value={'status': 'available'})
|
||||
def test_delete_consistencygroup_wrong_host(self, *_mock_create_cg):
|
||||
"""Test consistencygroup cannot be deleted.
|
||||
|
||||
Test consistencygroup cannot be deleted when volumes in the
|
||||
group are not local to the volume node.
|
||||
"""
|
||||
|
||||
group = tests_utils.create_consistencygroup(
|
||||
self.context,
|
||||
availability_zone=CONF.storage_availability_zone,
|
||||
volume_type='type1,type2')
|
||||
|
||||
volume = tests_utils.create_volume(
|
||||
self.context,
|
||||
consistencygroup_id=group.id,
|
||||
host='host1@backend1#pool1',
|
||||
status='creating',
|
||||
size=1)
|
||||
self.volume.host = 'host1@backend2'
|
||||
self.volume.create_volume(self.context, volume)
|
||||
|
||||
self.assertRaises(exception.Invalid,
|
||||
self.volume.delete_consistencygroup,
|
||||
self.context,
|
||||
group)
|
||||
cg = objects.ConsistencyGroup.get_by_id(self.context, group.id)
|
||||
# Group is not deleted
|
||||
self.assertEqual(fields.ConsistencyGroupStatus.AVAILABLE, cg.status)
|
||||
|
||||
def test_create_volume_with_consistencygroup_invalid_type(self):
|
||||
"""Test volume creation with ConsistencyGroup & invalid volume type."""
|
||||
vol_type = db.volume_type_create(
|
||||
context.get_admin_context(),
|
||||
dict(name=conf_fixture.def_vol_type, extra_specs={})
|
||||
)
|
||||
vol_type = objects.VolumeType.get_by_id(self.context,
|
||||
vol_type.id)
|
||||
cg = objects.ConsistencyGroup(self.context,
|
||||
id=fake.CONSISTENCY_GROUP_ID,
|
||||
name='cg1',
|
||||
volume_type_id=vol_type.id)
|
||||
fake_type = fake_volume.fake_volume_type_obj(
|
||||
self.context,
|
||||
id=fake.VOLUME_TYPE_ID,
|
||||
name='fake')
|
||||
vol_api = cinder.volume.api.API()
|
||||
|
||||
# Volume type must be provided when creating a volume in a
|
||||
# consistency group.
|
||||
self.assertRaises(exception.InvalidInput,
|
||||
vol_api.create,
|
||||
self.context, 1, 'vol1', 'volume 1',
|
||||
consistencygroup=cg)
|
||||
|
||||
# Volume type must be valid.
|
||||
self.assertRaises(exception.InvalidInput,
|
||||
vol_api.create,
|
||||
self.context, 1, 'vol1', 'volume 1',
|
||||
volume_type=fake_type,
|
||||
consistencygroup=cg)
|
||||
|
||||
@mock.patch('cinder.volume.driver.VolumeDriver.create_cgsnapshot',
|
||||
autospec=True,
|
||||
return_value=({'status': 'available'}, []))
|
||||
def test_create_cgsnapshot_with_bootable_volumes(self, mock_create_cgsnap):
|
||||
"""Test cgsnapshot can be created and deleted."""
|
||||
|
||||
group = tests_utils.create_consistencygroup(
|
||||
self.context,
|
||||
availability_zone=CONF.storage_availability_zone,
|
||||
volume_type='type1,type2')
|
||||
volume = tests_utils.create_volume(
|
||||
self.context,
|
||||
consistencygroup_id=group.id,
|
||||
**self.volume_params)
|
||||
self.volume.create_volume(self.context, volume)
|
||||
# Create a bootable volume
|
||||
bootable_vol_params = {'status': 'creating', 'host': CONF.host,
|
||||
'size': 1, 'bootable': True}
|
||||
bootable_vol = tests_utils.create_volume(self.context,
|
||||
consistencygroup_id=group.id,
|
||||
**bootable_vol_params)
|
||||
# Create a common volume
|
||||
self.volume.create_volume(self.context, bootable_vol)
|
||||
|
||||
volume_ids = [volume.id, bootable_vol.id]
|
||||
cgsnapshot_returns = self._create_cgsnapshot(group.id, volume_ids)
|
||||
cgsnapshot = cgsnapshot_returns[0]
|
||||
self.volume.create_cgsnapshot(self.context, cgsnapshot)
|
||||
self.assertEqual(cgsnapshot.id,
|
||||
objects.CGSnapshot.get_by_id(
|
||||
context.get_admin_context(),
|
||||
cgsnapshot.id).id)
|
||||
self.assertTrue(mock_create_cgsnap.called)
|
@ -77,7 +77,7 @@ class FakeHostManager(host_manager.HostManager):
|
||||
'reserved_percentage': 5,
|
||||
'volume_backend_name': 'lvm4',
|
||||
'timestamp': UTC_NOW,
|
||||
'consistencygroup_support': True},
|
||||
'consistent_group_snapshot_enabled': True},
|
||||
'host5': {'total_capacity_gb': 'infinite',
|
||||
'free_capacity_gb': 'unknown',
|
||||
'allocated_capacity_gb': 1548,
|
||||
|
@ -16,6 +16,7 @@
|
||||
Tests For Filter Scheduler.
|
||||
"""
|
||||
|
||||
import ddt
|
||||
import mock
|
||||
|
||||
from cinder import context
|
||||
@ -29,6 +30,7 @@ from cinder.tests.unit.scheduler import test_scheduler
|
||||
from cinder.volume import utils
|
||||
|
||||
|
||||
@ddt.ddt
|
||||
class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
||||
"""Test case for Filter Scheduler."""
|
||||
|
||||
@ -56,8 +58,12 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
||||
fake_context, 'faki-id1', group_spec,
|
||||
request_spec_list, {}, [])
|
||||
|
||||
@ddt.data(
|
||||
{'capabilities:consistent_group_snapshot_enabled': '<is> True'},
|
||||
{'consistent_group_snapshot_enabled': '<is> True'}
|
||||
)
|
||||
@mock.patch('cinder.db.service_get_all')
|
||||
def test_schedule_group(self, _mock_service_get_all):
|
||||
def test_schedule_group(self, specs, _mock_service_get_all):
|
||||
# Make sure _schedule_group() can find host successfully.
|
||||
sched = fakes.FakeFilterScheduler()
|
||||
sched.host_manager = fakes.FakeHostManager()
|
||||
@ -66,7 +72,6 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
||||
|
||||
fakes.mock_host_manager_db_calls(_mock_service_get_all)
|
||||
|
||||
specs = {'capabilities:consistencygroup_support': '<is> True'}
|
||||
request_spec = {'volume_properties': {'project_id': 1,
|
||||
'size': 0},
|
||||
'volume_type': {'name': 'Type1',
|
||||
@ -86,80 +91,6 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
||||
self.assertIsNotNone(weighed_host.obj)
|
||||
self.assertTrue(_mock_service_get_all.called)
|
||||
|
||||
def test_create_consistencygroup_no_hosts(self):
|
||||
# Ensure empty hosts result in NoValidBackend exception.
|
||||
sched = fakes.FakeFilterScheduler()
|
||||
|
||||
fake_context = context.RequestContext('user', 'project')
|
||||
request_spec = {'volume_properties': {'project_id': 1,
|
||||
'size': 0},
|
||||
'volume_type': {'name': 'Type1',
|
||||
'extra_specs': {}}}
|
||||
request_spec2 = {'volume_properties': {'project_id': 1,
|
||||
'size': 0},
|
||||
'volume_type': {'name': 'Type2',
|
||||
'extra_specs': {}}}
|
||||
request_spec_list = [request_spec, request_spec2]
|
||||
self.assertRaises(exception.NoValidBackend,
|
||||
sched.schedule_create_consistencygroup,
|
||||
fake_context, 'faki-id1', request_spec_list, {})
|
||||
|
||||
@mock.patch('cinder.db.service_get_all')
|
||||
def test_schedule_consistencygroup(self,
|
||||
_mock_service_get_all):
|
||||
# Make sure _schedule_group() can find host successfully.
|
||||
sched = fakes.FakeFilterScheduler()
|
||||
sched.host_manager = fakes.FakeHostManager()
|
||||
fake_context = context.RequestContext('user', 'project',
|
||||
is_admin=True)
|
||||
|
||||
fakes.mock_host_manager_db_calls(_mock_service_get_all)
|
||||
|
||||
specs = {'capabilities:consistencygroup_support': '<is> True'}
|
||||
request_spec = {'volume_properties': {'project_id': 1,
|
||||
'size': 0},
|
||||
'volume_type': {'name': 'Type1',
|
||||
'extra_specs': specs}}
|
||||
request_spec2 = {'volume_properties': {'project_id': 1,
|
||||
'size': 0},
|
||||
'volume_type': {'name': 'Type2',
|
||||
'extra_specs': specs}}
|
||||
request_spec_list = [request_spec, request_spec2]
|
||||
weighed_host = sched._schedule_group(fake_context,
|
||||
request_spec_list,
|
||||
{})
|
||||
self.assertIsNotNone(weighed_host.obj)
|
||||
self.assertTrue(_mock_service_get_all.called)
|
||||
|
||||
@mock.patch('cinder.db.service_get_all')
|
||||
def test_schedule_consistencygroup_no_cg_support_in_extra_specs(
|
||||
self,
|
||||
_mock_service_get_all):
|
||||
# Make sure _schedule_group() can find host successfully even
|
||||
# when consistencygroup_support is not specified in volume type's
|
||||
# extra specs
|
||||
sched = fakes.FakeFilterScheduler()
|
||||
sched.host_manager = fakes.FakeHostManager()
|
||||
fake_context = context.RequestContext('user', 'project',
|
||||
is_admin=True)
|
||||
|
||||
fakes.mock_host_manager_db_calls(_mock_service_get_all)
|
||||
|
||||
request_spec = {'volume_properties': {'project_id': 1,
|
||||
'size': 0},
|
||||
'volume_type': {'name': 'Type1',
|
||||
'extra_specs': {}}}
|
||||
request_spec2 = {'volume_properties': {'project_id': 1,
|
||||
'size': 0},
|
||||
'volume_type': {'name': 'Type2',
|
||||
'extra_specs': {}}}
|
||||
request_spec_list = [request_spec, request_spec2]
|
||||
weighed_host = sched._schedule_group(fake_context,
|
||||
request_spec_list,
|
||||
{})
|
||||
self.assertIsNotNone(weighed_host.obj)
|
||||
self.assertTrue(_mock_service_get_all.called)
|
||||
|
||||
def test_create_volume_no_hosts(self):
|
||||
# Ensure empty hosts/child_zones result in NoValidBackend exception.
|
||||
sched = fakes.FakeFilterScheduler()
|
||||
@ -284,39 +215,6 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
||||
weighed_host = sched._schedule(fake_context, request_spec, {})
|
||||
self.assertEqual('host1#lvm1', weighed_host.obj.host)
|
||||
|
||||
@mock.patch('cinder.db.service_get_all')
|
||||
def test_create_volume_clear_host_different_with_cg(self,
|
||||
_mock_service_get_all):
|
||||
# Ensure we clear those hosts whose backend is not same as
|
||||
# consistencygroup's backend.
|
||||
sched = fakes.FakeFilterScheduler()
|
||||
sched.host_manager = fakes.FakeHostManager()
|
||||
fakes.mock_host_manager_db_calls(_mock_service_get_all)
|
||||
fake_context = context.RequestContext('user', 'project')
|
||||
request_spec = {'volume_properties': {'project_id': 1,
|
||||
'size': 1},
|
||||
'volume_type': {'name': 'LVM_iSCSI'},
|
||||
'CG_backend': 'host@lvmdriver'}
|
||||
request_spec = objects.RequestSpec.from_primitives(request_spec)
|
||||
weighed_host = sched._schedule(fake_context, request_spec, {})
|
||||
self.assertIsNone(weighed_host)
|
||||
|
||||
@mock.patch('cinder.db.service_get_all')
|
||||
def test_create_volume_host_same_as_cg(self, _mock_service_get_all):
|
||||
# Ensure we don't clear the host whose backend is same as
|
||||
# consistencygroup's backend.
|
||||
sched = fakes.FakeFilterScheduler()
|
||||
sched.host_manager = fakes.FakeHostManager()
|
||||
fakes.mock_host_manager_db_calls(_mock_service_get_all)
|
||||
fake_context = context.RequestContext('user', 'project')
|
||||
request_spec = {'volume_properties': {'project_id': 1,
|
||||
'size': 1},
|
||||
'volume_type': {'name': 'LVM_iSCSI'},
|
||||
'CG_backend': 'host1'}
|
||||
request_spec = objects.RequestSpec.from_primitives(request_spec)
|
||||
weighed_host = sched._schedule(fake_context, request_spec, {})
|
||||
self.assertEqual('host1#lvm1', weighed_host.obj.host)
|
||||
|
||||
def test_max_attempts(self):
|
||||
self.flags(scheduler_max_attempts=4)
|
||||
|
||||
|
@ -27,7 +27,6 @@ from cinder import objects
|
||||
from cinder.scheduler import rpcapi as scheduler_rpcapi
|
||||
from cinder import test
|
||||
from cinder.tests.unit import fake_constants
|
||||
from cinder.tests.unit import fake_group
|
||||
from cinder.tests.unit import fake_volume
|
||||
|
||||
|
||||
@ -41,7 +40,6 @@ class SchedulerRPCAPITestCase(test.RPCAPITestCase):
|
||||
self.fake_volume = fake_volume.fake_volume_obj(
|
||||
self.context, expected_attrs=['metadata', 'admin_metadata',
|
||||
'glance_metadata'])
|
||||
self.fake_consistencygroup = fake_group
|
||||
self.fake_rs_obj = objects.RequestSpec.from_primitives({})
|
||||
self.fake_rs_dict = {'volume_id': self.volume_id}
|
||||
self.fake_fp_dict = {'availability_zone': 'fake_az'}
|
||||
@ -184,13 +182,6 @@ class SchedulerRPCAPITestCase(test.RPCAPITestCase):
|
||||
'capabilities': {},
|
||||
}])
|
||||
|
||||
def test_create_consistencygroup(self):
|
||||
self._test_rpc_api('create_consistencygroup',
|
||||
rpc_method='cast',
|
||||
group='group',
|
||||
request_spec_list=[self.fake_rs_dict],
|
||||
filter_properties_list=[self.fake_fp_dict])
|
||||
|
||||
def test_create_group(self):
|
||||
self._test_rpc_api('create_group',
|
||||
rpc_method='cast',
|
||||
|
@ -24,16 +24,12 @@ import mock
|
||||
from oslo_config import cfg
|
||||
|
||||
from cinder import context
|
||||
from cinder import db
|
||||
from cinder import exception
|
||||
from cinder.message import defined_messages
|
||||
from cinder import objects
|
||||
from cinder.objects import fields
|
||||
from cinder.scheduler import driver
|
||||
from cinder.scheduler import filter_scheduler
|
||||
from cinder.scheduler import manager
|
||||
from cinder import test
|
||||
from cinder.tests.unit.consistencygroup import fake_consistencygroup
|
||||
from cinder.tests.unit import fake_constants as fake
|
||||
from cinder.tests.unit import fake_volume
|
||||
from cinder.tests.unit import utils as tests_utils
|
||||
@ -341,43 +337,6 @@ class SchedulerManagerTestCase(test.TestCase):
|
||||
{'status': 'in-use'})
|
||||
self.manager.driver.find_retype_host = orig_retype
|
||||
|
||||
def test_create_consistencygroup_exceptions(self):
|
||||
with mock.patch.object(filter_scheduler.FilterScheduler,
|
||||
'schedule_create_consistencygroup') as mock_cg:
|
||||
original_driver = self.manager.driver
|
||||
consistencygroup_obj = \
|
||||
fake_consistencygroup.fake_consistencyobject_obj(self.context)
|
||||
self.manager.driver = filter_scheduler.FilterScheduler
|
||||
LOG = self.mock_object(manager, 'LOG')
|
||||
self.mock_object(db, 'consistencygroup_update')
|
||||
|
||||
ex = exception.CinderException('test')
|
||||
mock_cg.side_effect = ex
|
||||
group_id = fake.CONSISTENCY_GROUP_ID
|
||||
self.assertRaises(exception.CinderException,
|
||||
self.manager.create_consistencygroup,
|
||||
self.context,
|
||||
consistencygroup_obj)
|
||||
self.assertGreater(LOG.exception.call_count, 0)
|
||||
db.consistencygroup_update.assert_called_once_with(
|
||||
self.context, group_id, {'status': (
|
||||
fields.ConsistencyGroupStatus.ERROR)})
|
||||
|
||||
mock_cg.reset_mock()
|
||||
LOG.exception.reset_mock()
|
||||
db.consistencygroup_update.reset_mock()
|
||||
|
||||
mock_cg.side_effect = exception.NoValidBackend(
|
||||
reason="No weighed hosts available")
|
||||
self.manager.create_consistencygroup(
|
||||
self.context, consistencygroup_obj)
|
||||
self.assertGreater(LOG.error.call_count, 0)
|
||||
db.consistencygroup_update.assert_called_once_with(
|
||||
self.context, group_id, {'status': (
|
||||
fields.ConsistencyGroupStatus.ERROR)})
|
||||
|
||||
self.manager.driver = original_driver
|
||||
|
||||
def test_do_cleanup(self):
|
||||
vol = tests_utils.create_volume(self.context, status='creating')
|
||||
self.manager._do_cleanup(self.context, vol)
|
||||
|
@ -61,23 +61,6 @@ class VolumeRPCAPITestCase(test.RPCAPITestCase):
|
||||
snapshot = tests_utils.create_snapshot(self.context, vol['id'],
|
||||
**kwargs)
|
||||
|
||||
source_group = tests_utils.create_consistencygroup(
|
||||
self.context,
|
||||
availability_zone=CONF.storage_availability_zone,
|
||||
volume_type='type1,type2',
|
||||
host='fakehost@fakedrv#fakepool')
|
||||
|
||||
cgsnapshot = tests_utils.create_cgsnapshot(
|
||||
self.context,
|
||||
consistencygroup_id=source_group.id)
|
||||
|
||||
cg = tests_utils.create_consistencygroup(
|
||||
self.context,
|
||||
availability_zone=CONF.storage_availability_zone,
|
||||
volume_type='type1,type2',
|
||||
host='fakehost@fakedrv#fakepool',
|
||||
cgsnapshot_id=cgsnapshot.id)
|
||||
|
||||
generic_group = tests_utils.create_group(
|
||||
self.context,
|
||||
availability_zone=CONF.storage_availability_zone,
|
||||
@ -89,15 +72,10 @@ class VolumeRPCAPITestCase(test.RPCAPITestCase):
|
||||
group_id=generic_group.id,
|
||||
group_type_id=fake.GROUP_TYPE_ID)
|
||||
|
||||
cg = objects.ConsistencyGroup.get_by_id(self.context, cg.id)
|
||||
cgsnapshot = objects.CGSnapshot.get_by_id(self.context, cgsnapshot.id)
|
||||
self.fake_volume = jsonutils.to_primitive(volume)
|
||||
self.fake_volume_obj = fake_volume.fake_volume_obj(self.context, **vol)
|
||||
self.fake_snapshot = snapshot
|
||||
self.fake_reservations = ["RESERVATION"]
|
||||
self.fake_cg = cg
|
||||
self.fake_src_cg = source_group
|
||||
self.fake_cgsnap = cgsnapshot
|
||||
self.fake_backup_obj = fake_backup.fake_backup_obj(self.context)
|
||||
self.fake_group = generic_group
|
||||
self.fake_group_snapshot = group_snapshot
|
||||
@ -111,48 +89,12 @@ class VolumeRPCAPITestCase(test.RPCAPITestCase):
|
||||
self.fake_volume_obj.destroy()
|
||||
self.fake_group_snapshot.destroy()
|
||||
self.fake_group.destroy()
|
||||
self.fake_cgsnap.destroy()
|
||||
self.fake_cg.destroy()
|
||||
self.fake_src_cg.destroy()
|
||||
self.fake_backup_obj.destroy()
|
||||
|
||||
def _change_cluster_name(self, resource, cluster_name):
|
||||
resource.cluster_name = cluster_name
|
||||
resource.obj_reset_changes()
|
||||
|
||||
def test_create_consistencygroup(self):
|
||||
self._test_rpc_api('create_consistencygroup', rpc_method='cast',
|
||||
server='fakehost@fakedrv', group=self.fake_cg)
|
||||
|
||||
@ddt.data(None, 'my_cluster')
|
||||
def test_delete_consistencygroup(self, cluster_name):
|
||||
self._change_cluster_name(self.fake_cg, cluster_name)
|
||||
self._test_rpc_api('delete_consistencygroup', rpc_method='cast',
|
||||
server=cluster_name or self.fake_cg.host,
|
||||
group=self.fake_cg)
|
||||
|
||||
@ddt.data(None, 'my_cluster')
|
||||
def test_update_consistencygroup(self, cluster_name):
|
||||
self._change_cluster_name(self.fake_cg, cluster_name)
|
||||
self._test_rpc_api('update_consistencygroup', rpc_method='cast',
|
||||
server=cluster_name or self.fake_cg.host,
|
||||
group=self.fake_cg, add_volumes=[fake.VOLUME2_ID],
|
||||
remove_volumes=[fake.VOLUME3_ID])
|
||||
|
||||
def test_create_cgsnapshot(self):
|
||||
self._test_rpc_api('create_cgsnapshot', rpc_method='cast',
|
||||
server=self.fake_cgsnap.consistencygroup.host,
|
||||
cgsnapshot=self.fake_cgsnap)
|
||||
|
||||
@ddt.data(None, 'my_cluster')
|
||||
def test_delete_cgsnapshot(self, cluster_name):
|
||||
self._change_cluster_name(self.fake_cgsnap.consistencygroup,
|
||||
cluster_name)
|
||||
self._test_rpc_api(
|
||||
'delete_cgsnapshot', rpc_method='cast',
|
||||
server=cluster_name or self.fake_cgsnap.consistencygroup.host,
|
||||
cgsnapshot=self.fake_cgsnap)
|
||||
|
||||
def test_create_volume(self):
|
||||
self._test_rpc_api('create_volume',
|
||||
rpc_method='cast',
|
||||
@ -451,22 +393,6 @@ class VolumeRPCAPITestCase(test.RPCAPITestCase):
|
||||
fanout=True, server='fake_host', service=service,
|
||||
updates=mock.sentinel.updates)
|
||||
|
||||
def test_create_consistencygroup_from_src_cgsnapshot(self):
|
||||
self._test_rpc_api('create_consistencygroup_from_src',
|
||||
rpc_method='cast',
|
||||
server=self.fake_cg.host,
|
||||
group=self.fake_cg,
|
||||
cgsnapshot=self.fake_cgsnap,
|
||||
source_cg=None)
|
||||
|
||||
def test_create_consistencygroup_from_src_cg(self):
|
||||
self._test_rpc_api('create_consistencygroup_from_src',
|
||||
rpc_method='cast',
|
||||
server=self.fake_cg.host,
|
||||
group=self.fake_cg,
|
||||
cgsnapshot=None,
|
||||
source_cg=self.fake_src_cg)
|
||||
|
||||
def test_get_capabilities(self):
|
||||
self._test_rpc_api('get_capabilities',
|
||||
rpc_method='call',
|
||||
|
@ -2116,24 +2116,6 @@ class VolumeManager(manager.CleanableManager,
|
||||
context, snapshot, event_suffix,
|
||||
extra_usage_info=extra_usage_info, host=self.host)
|
||||
|
||||
def _notify_about_consistencygroup_usage(self,
|
||||
context,
|
||||
group,
|
||||
event_suffix,
|
||||
volumes=None,
|
||||
extra_usage_info=None):
|
||||
vol_utils.notify_about_consistencygroup_usage(
|
||||
context, group, event_suffix,
|
||||
extra_usage_info=extra_usage_info, host=self.host)
|
||||
|
||||
if not volumes:
|
||||
volumes = self.db.volume_get_all_by_group(context, group.id)
|
||||
if volumes:
|
||||
for volume in volumes:
|
||||
vol_utils.notify_about_volume_usage(
|
||||
context, volume, event_suffix,
|
||||
extra_usage_info=extra_usage_info, host=self.host)
|
||||
|
||||
def _notify_about_group_usage(self,
|
||||
context,
|
||||
group,
|
||||
@ -2153,25 +2135,6 @@ class VolumeManager(manager.CleanableManager,
|
||||
context, volume, event_suffix,
|
||||
extra_usage_info=extra_usage_info, host=self.host)
|
||||
|
||||
def _notify_about_cgsnapshot_usage(self,
|
||||
context,
|
||||
cgsnapshot,
|
||||
event_suffix,
|
||||
snapshots=None,
|
||||
extra_usage_info=None):
|
||||
vol_utils.notify_about_cgsnapshot_usage(
|
||||
context, cgsnapshot, event_suffix,
|
||||
extra_usage_info=extra_usage_info, host=self.host)
|
||||
|
||||
if not snapshots:
|
||||
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
|
||||
context, cgsnapshot.id)
|
||||
if snapshots:
|
||||
for snapshot in snapshots:
|
||||
vol_utils.notify_about_snapshot_usage(
|
||||
context, snapshot, event_suffix,
|
||||
extra_usage_info=extra_usage_info, host=self.host)
|
||||
|
||||
def _notify_about_group_snapshot_usage(self,
|
||||
context,
|
||||
group_snapshot,
|
||||
@ -2518,15 +2481,8 @@ class VolumeManager(manager.CleanableManager,
|
||||
"to driver error.")
|
||||
return driver_entries
|
||||
|
||||
def create_consistencygroup(self, context, group):
|
||||
"""Creates the consistency group."""
|
||||
return self._create_group(context, group, False)
|
||||
|
||||
def create_group(self, context, group):
|
||||
"""Creates the group."""
|
||||
return self._create_group(context, group)
|
||||
|
||||
def _create_group(self, context, group, is_generic_group=True):
|
||||
context = context.elevated()
|
||||
|
||||
# Make sure the host in the DB matches our own when clustered
|
||||
@ -2535,33 +2491,23 @@ class VolumeManager(manager.CleanableManager,
|
||||
status = fields.GroupStatus.AVAILABLE
|
||||
model_update = None
|
||||
|
||||
if is_generic_group:
|
||||
self._notify_about_group_usage(
|
||||
context, group, "create.start")
|
||||
else:
|
||||
self._notify_about_consistencygroup_usage(
|
||||
context, group, "create.start")
|
||||
self._notify_about_group_usage(context, group, "create.start")
|
||||
|
||||
try:
|
||||
utils.require_driver_initialized(self.driver)
|
||||
|
||||
LOG.info("Group %s: creating", group.name)
|
||||
if is_generic_group:
|
||||
try:
|
||||
model_update = self.driver.create_group(context,
|
||||
group)
|
||||
except NotImplementedError:
|
||||
cgsnap_type = group_types.get_default_cgsnapshot_type()
|
||||
if group.group_type_id != cgsnap_type['id']:
|
||||
model_update = self._create_group_generic(context,
|
||||
group)
|
||||
else:
|
||||
cg, __ = self._convert_group_to_cg(group, [])
|
||||
model_update = self.driver.create_consistencygroup(
|
||||
context, cg)
|
||||
else:
|
||||
model_update = self.driver.create_consistencygroup(context,
|
||||
group)
|
||||
|
||||
try:
|
||||
model_update = self.driver.create_group(context, group)
|
||||
except NotImplementedError:
|
||||
cgsnap_type = group_types.get_default_cgsnapshot_type()
|
||||
if group.group_type_id != cgsnap_type['id']:
|
||||
model_update = self._create_group_generic(context, group)
|
||||
else:
|
||||
cg, __ = self._convert_group_to_cg(group, [])
|
||||
model_update = self.driver.create_consistencygroup(
|
||||
context, cg)
|
||||
|
||||
if model_update:
|
||||
if (model_update['status'] ==
|
||||
@ -2586,153 +2532,13 @@ class VolumeManager(manager.CleanableManager,
|
||||
group.save()
|
||||
LOG.info("Group %s: created successfully", group.name)
|
||||
|
||||
if is_generic_group:
|
||||
self._notify_about_group_usage(
|
||||
context, group, "create.end")
|
||||
else:
|
||||
self._notify_about_consistencygroup_usage(
|
||||
context, group, "create.end")
|
||||
self._notify_about_group_usage(context, group, "create.end")
|
||||
|
||||
LOG.info("Create group completed successfully.",
|
||||
resource={'type': 'group',
|
||||
'id': group.id})
|
||||
return group
|
||||
|
||||
def create_consistencygroup_from_src(self, context, group,
|
||||
cgsnapshot=None, source_cg=None):
|
||||
"""Creates the consistency group from source.
|
||||
|
||||
The source can be a CG snapshot or a source CG.
|
||||
"""
|
||||
source_name = None
|
||||
snapshots = None
|
||||
source_vols = None
|
||||
try:
|
||||
volumes = self.db.volume_get_all_by_group(context, group.id)
|
||||
|
||||
if cgsnapshot:
|
||||
try:
|
||||
# Check if cgsnapshot still exists
|
||||
cgsnapshot = objects.CGSnapshot.get_by_id(
|
||||
context, cgsnapshot.id)
|
||||
except exception.CgSnapshotNotFound:
|
||||
LOG.error("Create consistency group "
|
||||
"from snapshot-%(snap)s failed: "
|
||||
"SnapshotNotFound.",
|
||||
{'snap': cgsnapshot.id},
|
||||
resource={'type': 'consistency_group',
|
||||
'id': group.id})
|
||||
raise
|
||||
|
||||
source_name = _("snapshot-%s") % cgsnapshot.id
|
||||
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
|
||||
context, cgsnapshot.id)
|
||||
for snap in snapshots:
|
||||
if (snap.status not in
|
||||
VALID_CREATE_CG_SRC_SNAP_STATUS):
|
||||
msg = (_("Cannot create consistency group "
|
||||
"%(group)s because snapshot %(snap)s is "
|
||||
"not in a valid state. Valid states are: "
|
||||
"%(valid)s.") %
|
||||
{'group': group.id,
|
||||
'snap': snap['id'],
|
||||
'valid': VALID_CREATE_CG_SRC_SNAP_STATUS})
|
||||
raise exception.InvalidConsistencyGroup(reason=msg)
|
||||
|
||||
if source_cg:
|
||||
try:
|
||||
source_cg = objects.ConsistencyGroup.get_by_id(
|
||||
context, source_cg.id)
|
||||
except exception.ConsistencyGroupNotFound:
|
||||
LOG.error("Create consistency group "
|
||||
"from source cg-%(cg)s failed: "
|
||||
"ConsistencyGroupNotFound.",
|
||||
{'cg': source_cg.id},
|
||||
resource={'type': 'consistency_group',
|
||||
'id': group.id})
|
||||
raise
|
||||
|
||||
source_name = _("cg-%s") % source_cg.id
|
||||
source_vols = self.db.volume_get_all_by_group(
|
||||
context, source_cg.id)
|
||||
for source_vol in source_vols:
|
||||
if (source_vol['status'] not in
|
||||
VALID_CREATE_CG_SRC_CG_STATUS):
|
||||
msg = (_("Cannot create consistency group "
|
||||
"%(group)s because source volume "
|
||||
"%(source_vol)s is not in a valid "
|
||||
"state. Valid states are: "
|
||||
"%(valid)s.") %
|
||||
{'group': group.id,
|
||||
'source_vol': source_vol['id'],
|
||||
'valid': VALID_CREATE_CG_SRC_CG_STATUS})
|
||||
raise exception.InvalidConsistencyGroup(reason=msg)
|
||||
|
||||
# Sort source snapshots so that they are in the same order as their
|
||||
# corresponding target volumes.
|
||||
sorted_snapshots = None
|
||||
if cgsnapshot and snapshots:
|
||||
sorted_snapshots = self._sort_snapshots(volumes, snapshots)
|
||||
|
||||
# Sort source volumes so that they are in the same order as their
|
||||
# corresponding target volumes.
|
||||
sorted_source_vols = None
|
||||
if source_cg and source_vols:
|
||||
sorted_source_vols = self._sort_source_vols(volumes,
|
||||
source_vols)
|
||||
|
||||
self._notify_about_consistencygroup_usage(
|
||||
context, group, "create.start")
|
||||
|
||||
utils.require_driver_initialized(self.driver)
|
||||
|
||||
model_update, volumes_model_update = (
|
||||
self.driver.create_consistencygroup_from_src(
|
||||
context, group, volumes, cgsnapshot,
|
||||
sorted_snapshots, source_cg, sorted_source_vols))
|
||||
|
||||
if volumes_model_update:
|
||||
for update in volumes_model_update:
|
||||
self.db.volume_update(context, update['id'], update)
|
||||
|
||||
if model_update:
|
||||
group.update(model_update)
|
||||
group.save()
|
||||
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
group.status = 'error'
|
||||
group.save()
|
||||
LOG.error("Create consistency group "
|
||||
"from source %(source)s failed.",
|
||||
{'source': source_name},
|
||||
resource={'type': 'consistency_group',
|
||||
'id': group.id})
|
||||
# Update volume status to 'error' as well.
|
||||
for vol in volumes:
|
||||
self.db.volume_update(
|
||||
context, vol['id'], {'status': 'error'})
|
||||
|
||||
now = timeutils.utcnow()
|
||||
status = 'available'
|
||||
for vol in volumes:
|
||||
update = {'status': status, 'created_at': now}
|
||||
self._update_volume_from_src(context, vol, update, group=group)
|
||||
self._update_allocated_capacity(vol)
|
||||
|
||||
group.status = status
|
||||
group.created_at = now
|
||||
group.save()
|
||||
|
||||
self._notify_about_consistencygroup_usage(
|
||||
context, group, "create.end")
|
||||
LOG.info("Create consistency group "
|
||||
"from source-%(source)s completed successfully.",
|
||||
{'source': source_name},
|
||||
resource={'type': 'consistency_group',
|
||||
'id': group.id})
|
||||
return group
|
||||
|
||||
def create_group_from_src(self, context, group,
|
||||
group_snapshot=None, source_group=None):
|
||||
"""Creates the group from source.
|
||||
@ -3066,125 +2872,6 @@ class VolumeManager(manager.CleanableManager,
|
||||
self.stats['pools'][pool] = dict(
|
||||
allocated_capacity_gb=vol['size'])
|
||||
|
||||
def delete_consistencygroup(self, context, group):
|
||||
"""Deletes consistency group and the volumes in the group."""
|
||||
context = context.elevated()
|
||||
project_id = group.project_id
|
||||
|
||||
if context.project_id != group.project_id:
|
||||
project_id = group.project_id
|
||||
else:
|
||||
project_id = context.project_id
|
||||
|
||||
volumes = objects.VolumeList.get_all_by_group(context, group.id)
|
||||
|
||||
for volume in volumes:
|
||||
if (volume.attach_status ==
|
||||
fields.VolumeAttachStatus.ATTACHED):
|
||||
# Volume is still attached, need to detach first
|
||||
raise exception.VolumeAttached(volume_id=volume.id)
|
||||
self._check_is_our_resource(volume)
|
||||
|
||||
self._notify_about_consistencygroup_usage(
|
||||
context, group, "delete.start")
|
||||
|
||||
volumes_model_update = None
|
||||
model_update = None
|
||||
try:
|
||||
utils.require_driver_initialized(self.driver)
|
||||
|
||||
model_update, volumes_model_update = (
|
||||
self.driver.delete_consistencygroup(context, group, volumes))
|
||||
|
||||
if volumes_model_update:
|
||||
for volume in volumes_model_update:
|
||||
update = {'status': volume['status']}
|
||||
self.db.volume_update(context, volume['id'],
|
||||
update)
|
||||
# If we failed to delete a volume, make sure the status
|
||||
# for the cg is set to error as well
|
||||
if (volume['status'] in ['error_deleting', 'error'] and
|
||||
model_update['status'] not in
|
||||
['error_deleting', 'error']):
|
||||
model_update['status'] = volume['status']
|
||||
|
||||
if model_update:
|
||||
if model_update['status'] in ['error_deleting', 'error']:
|
||||
msg = (_('Delete consistency group failed.'))
|
||||
LOG.error(msg,
|
||||
resource={'type': 'consistency_group',
|
||||
'id': group.id})
|
||||
raise exception.VolumeDriverException(message=msg)
|
||||
else:
|
||||
group.update(model_update)
|
||||
group.save()
|
||||
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
group.status = 'error'
|
||||
group.save()
|
||||
# Update volume status to 'error' if driver returns
|
||||
# None for volumes_model_update.
|
||||
if not volumes_model_update:
|
||||
for vol in volumes:
|
||||
vol.status = 'error'
|
||||
vol.save()
|
||||
|
||||
# Get reservations for group
|
||||
try:
|
||||
reserve_opts = {'consistencygroups': -1}
|
||||
cgreservations = CGQUOTAS.reserve(context,
|
||||
project_id=project_id,
|
||||
**reserve_opts)
|
||||
except Exception:
|
||||
cgreservations = None
|
||||
LOG.exception("Delete consistency group "
|
||||
"failed to update usages.",
|
||||
resource={'type': 'consistency_group',
|
||||
'id': group.id})
|
||||
|
||||
for volume in volumes:
|
||||
# Get reservations for volume
|
||||
try:
|
||||
reserve_opts = {'volumes': -1,
|
||||
'gigabytes': -volume.size}
|
||||
QUOTAS.add_volume_type_opts(context,
|
||||
reserve_opts,
|
||||
volume.volume_type_id)
|
||||
reservations = QUOTAS.reserve(context,
|
||||
project_id=project_id,
|
||||
**reserve_opts)
|
||||
except Exception:
|
||||
reservations = None
|
||||
LOG.exception("Delete consistency group "
|
||||
"failed to update usages.",
|
||||
resource={'type': 'consistency_group',
|
||||
'id': group.id})
|
||||
|
||||
# Delete glance metadata if it exists
|
||||
self.db.volume_glance_metadata_delete_by_volume(context, volume.id)
|
||||
|
||||
self.db.volume_destroy(context, volume.id)
|
||||
|
||||
# Commit the reservations
|
||||
if reservations:
|
||||
QUOTAS.commit(context, reservations, project_id=project_id)
|
||||
|
||||
self.stats['allocated_capacity_gb'] -= volume.size
|
||||
|
||||
if cgreservations:
|
||||
CGQUOTAS.commit(context, cgreservations,
|
||||
project_id=project_id)
|
||||
|
||||
group.destroy()
|
||||
self._notify_about_consistencygroup_usage(
|
||||
context, group, "delete.end", volumes)
|
||||
self.publish_service_capabilities(context)
|
||||
LOG.info("Delete consistency group "
|
||||
"completed successfully.",
|
||||
resource={'type': 'consistency_group',
|
||||
'id': group.id})
|
||||
|
||||
def delete_group(self, context, group):
|
||||
"""Deletes group and the volumes in the group."""
|
||||
context = context.elevated()
|
||||
@ -3393,143 +3080,6 @@ class VolumeManager(manager.CleanableManager,
|
||||
# anything in the backend storage.
|
||||
return None, None, None
|
||||
|
||||
def update_consistencygroup(self, context, group,
|
||||
add_volumes=None, remove_volumes=None):
|
||||
"""Updates consistency group.
|
||||
|
||||
Update consistency group by adding volumes to the group,
|
||||
or removing volumes from the group.
|
||||
"""
|
||||
|
||||
add_volumes_ref = []
|
||||
remove_volumes_ref = []
|
||||
add_volumes_list = []
|
||||
remove_volumes_list = []
|
||||
if add_volumes:
|
||||
add_volumes_list = add_volumes.split(',')
|
||||
if remove_volumes:
|
||||
remove_volumes_list = remove_volumes.split(',')
|
||||
for add_vol in add_volumes_list:
|
||||
try:
|
||||
add_vol_ovo = objects.Volume.get_by_id(context, add_vol)
|
||||
except exception.VolumeNotFound:
|
||||
LOG.error("Update consistency group "
|
||||
"failed to add volume-%(volume_id)s: "
|
||||
"VolumeNotFound.",
|
||||
{'volume_id': add_vol},
|
||||
resource={'type': 'consistency_group',
|
||||
'id': group.id})
|
||||
raise
|
||||
if add_vol_ovo.status not in VALID_ADD_VOL_TO_CG_STATUS:
|
||||
msg = (_("Cannot add volume %(volume_id)s to consistency "
|
||||
"group %(group_id)s because volume is in an invalid "
|
||||
"state: %(status)s. Valid states are: %(valid)s.") %
|
||||
{'volume_id': add_vol_ovo.id,
|
||||
'group_id': group.id,
|
||||
'status': add_vol_ovo.status,
|
||||
'valid': VALID_ADD_VOL_TO_CG_STATUS})
|
||||
raise exception.InvalidVolume(reason=msg)
|
||||
self._check_is_our_resource(add_vol_ovo)
|
||||
add_volumes_ref.append(add_vol_ovo)
|
||||
|
||||
for remove_vol in remove_volumes_list:
|
||||
try:
|
||||
remove_vol_ref = self.db.volume_get(context, remove_vol)
|
||||
except exception.VolumeNotFound:
|
||||
LOG.error("Update consistency group "
|
||||
"failed to remove volume-%(volume_id)s: "
|
||||
"VolumeNotFound.",
|
||||
{'volume_id': remove_vol},
|
||||
resource={'type': 'consistency_group',
|
||||
'id': group.id})
|
||||
raise
|
||||
if remove_vol_ref['status'] not in VALID_REMOVE_VOL_FROM_CG_STATUS:
|
||||
msg = (_("Cannot remove volume %(volume_id)s from consistency "
|
||||
"group %(group_id)s because volume is in an invalid "
|
||||
"state: %(status)s. Valid states are: %(valid)s.") %
|
||||
{'volume_id': remove_vol_ref['id'],
|
||||
'group_id': group.id,
|
||||
'status': remove_vol_ref['status'],
|
||||
'valid': VALID_REMOVE_VOL_FROM_CG_STATUS})
|
||||
raise exception.InvalidVolume(reason=msg)
|
||||
remove_volumes_ref.append(remove_vol_ref)
|
||||
|
||||
self._notify_about_consistencygroup_usage(
|
||||
context, group, "update.start")
|
||||
|
||||
try:
|
||||
utils.require_driver_initialized(self.driver)
|
||||
|
||||
model_update, add_volumes_update, remove_volumes_update = (
|
||||
self.driver.update_consistencygroup(
|
||||
context, group,
|
||||
add_volumes=add_volumes_ref,
|
||||
remove_volumes=remove_volumes_ref))
|
||||
|
||||
if add_volumes_update:
|
||||
for update in add_volumes_update:
|
||||
self.db.volume_update(context, update['id'], update)
|
||||
|
||||
if remove_volumes_update:
|
||||
for update in remove_volumes_update:
|
||||
self.db.volume_update(context, update['id'], update)
|
||||
|
||||
if model_update:
|
||||
if model_update['status'] in (
|
||||
[fields.ConsistencyGroupStatus.ERROR]):
|
||||
msg = (_('Error occurred when updating consistency group '
|
||||
'%s.') % group.id)
|
||||
LOG.error(msg)
|
||||
raise exception.VolumeDriverException(message=msg)
|
||||
group.update(model_update)
|
||||
group.save()
|
||||
|
||||
except exception.VolumeDriverException:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error("Error occurred in the volume driver when "
|
||||
"updating consistency group %(group_id)s.",
|
||||
{'group_id': group.id})
|
||||
group.status = 'error'
|
||||
group.save()
|
||||
for add_vol in add_volumes_ref:
|
||||
self.db.volume_update(context, add_vol['id'],
|
||||
{'status': 'error'})
|
||||
for rem_vol in remove_volumes_ref:
|
||||
self.db.volume_update(context, rem_vol['id'],
|
||||
{'status': 'error'})
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error("Error occurred when updating consistency "
|
||||
"group %(group_id)s.",
|
||||
{'group_id': group.id})
|
||||
group.status = 'error'
|
||||
group.save()
|
||||
for add_vol in add_volumes_ref:
|
||||
self.db.volume_update(context, add_vol['id'],
|
||||
{'status': 'error'})
|
||||
for rem_vol in remove_volumes_ref:
|
||||
self.db.volume_update(context, rem_vol['id'],
|
||||
{'status': 'error'})
|
||||
|
||||
now = timeutils.utcnow()
|
||||
group.status = 'available'
|
||||
group.update_at = now
|
||||
group.save()
|
||||
for add_vol in add_volumes_ref:
|
||||
self.db.volume_update(context, add_vol['id'],
|
||||
{'consistencygroup_id': group.id,
|
||||
'updated_at': now})
|
||||
for rem_vol in remove_volumes_ref:
|
||||
self.db.volume_update(context, rem_vol['id'],
|
||||
{'consistencygroup_id': None,
|
||||
'updated_at': now})
|
||||
|
||||
self._notify_about_consistencygroup_usage(
|
||||
context, group, "update.end")
|
||||
LOG.info("Update consistency group completed successfully.",
|
||||
resource={'type': 'consistency_group',
|
||||
'id': group.id})
|
||||
|
||||
def update_group(self, context, group,
|
||||
add_volumes=None, remove_volumes=None):
|
||||
"""Updates group.
|
||||
@ -3682,116 +3232,6 @@ class VolumeManager(manager.CleanableManager,
|
||||
resource={'type': 'group',
|
||||
'id': group.id})
|
||||
|
||||
def create_cgsnapshot(self, context, cgsnapshot):
|
||||
"""Creates the cgsnapshot."""
|
||||
caller_context = context
|
||||
context = context.elevated()
|
||||
|
||||
LOG.info("Cgsnapshot %s: creating.", cgsnapshot.id)
|
||||
|
||||
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
|
||||
context, cgsnapshot.id)
|
||||
|
||||
self._notify_about_cgsnapshot_usage(
|
||||
context, cgsnapshot, "create.start")
|
||||
|
||||
snapshots_model_update = None
|
||||
model_update = None
|
||||
try:
|
||||
utils.require_driver_initialized(self.driver)
|
||||
|
||||
LOG.debug("Cgsnapshot %(cgsnap_id)s: creating.",
|
||||
{'cgsnap_id': cgsnapshot.id})
|
||||
|
||||
# Pass context so that drivers that want to use it, can,
|
||||
# but it is not a requirement for all drivers.
|
||||
cgsnapshot.context = caller_context
|
||||
for snapshot in snapshots:
|
||||
snapshot.context = caller_context
|
||||
|
||||
model_update, snapshots_model_update = (
|
||||
self.driver.create_cgsnapshot(context, cgsnapshot,
|
||||
snapshots))
|
||||
|
||||
if snapshots_model_update:
|
||||
for snap_model in snapshots_model_update:
|
||||
# Update db for snapshot.
|
||||
# NOTE(xyang): snapshots is a list of snapshot objects.
|
||||
# snapshots_model_update should be a list of dicts.
|
||||
self.db.snapshot_update(context,
|
||||
snap_model['id'],
|
||||
snap_model)
|
||||
|
||||
if (snap_model['status'] in [
|
||||
fields.SnapshotStatus.ERROR_DELETING,
|
||||
fields.SnapshotStatus.ERROR] and
|
||||
model_update['status'] not in
|
||||
['error_deleting', 'error']):
|
||||
model_update['status'] = snap_model['status']
|
||||
|
||||
if model_update:
|
||||
if model_update['status'] == 'error':
|
||||
msg = (_('Error occurred when creating cgsnapshot '
|
||||
'%s.') % cgsnapshot.id)
|
||||
LOG.error(msg)
|
||||
raise exception.VolumeDriverException(message=msg)
|
||||
|
||||
cgsnapshot.update(model_update)
|
||||
cgsnapshot.save()
|
||||
|
||||
except exception.CinderException:
|
||||
with excutils.save_and_reraise_exception():
|
||||
cgsnapshot.status = 'error'
|
||||
cgsnapshot.save()
|
||||
# Update snapshot status to 'error' if driver returns
|
||||
# None for snapshots_model_update.
|
||||
if not snapshots_model_update:
|
||||
for snapshot in snapshots:
|
||||
snapshot.status = fields.SnapshotStatus.ERROR
|
||||
snapshot.save()
|
||||
|
||||
for snapshot in snapshots:
|
||||
volume_id = snapshot['volume_id']
|
||||
snapshot_id = snapshot['id']
|
||||
vol_ref = self.db.volume_get(context, volume_id)
|
||||
if vol_ref.bootable:
|
||||
try:
|
||||
self.db.volume_glance_metadata_copy_to_snapshot(
|
||||
context, snapshot_id, volume_id)
|
||||
except exception.GlanceMetadataNotFound:
|
||||
# If volume is not created from image, No glance metadata
|
||||
# would be available for that volume in
|
||||
# volume glance metadata table
|
||||
pass
|
||||
except exception.CinderException as ex:
|
||||
LOG.error("Failed updating %(snapshot_id)s"
|
||||
" metadata using the provided volumes"
|
||||
" %(volume_id)s metadata",
|
||||
{'volume_id': volume_id,
|
||||
'snapshot_id': snapshot_id})
|
||||
|
||||
# TODO(thangp): Switch over to use snapshot.update()
|
||||
# after cgsnapshot-objects bugs are fixed
|
||||
self.db.snapshot_update(
|
||||
context, snapshot_id, {
|
||||
'status': fields.SnapshotStatus.ERROR})
|
||||
raise exception.MetadataCopyFailure(
|
||||
reason=six.text_type(ex))
|
||||
|
||||
self.db.snapshot_update(context,
|
||||
snapshot['id'],
|
||||
{'status': fields.SnapshotStatus.AVAILABLE,
|
||||
'progress': '100%'})
|
||||
|
||||
cgsnapshot.status = 'available'
|
||||
cgsnapshot.save()
|
||||
|
||||
LOG.info("cgsnapshot %s: created successfully",
|
||||
cgsnapshot.id)
|
||||
self._notify_about_cgsnapshot_usage(
|
||||
context, cgsnapshot, "create.end")
|
||||
return cgsnapshot
|
||||
|
||||
def create_group_snapshot(self, context, group_snapshot):
|
||||
"""Creates the group_snapshot."""
|
||||
caller_context = context
|
||||
@ -3952,114 +3392,6 @@ class VolumeManager(manager.CleanableManager,
|
||||
|
||||
return model_update, snapshot_model_updates
|
||||
|
||||
def delete_cgsnapshot(self, context, cgsnapshot):
|
||||
"""Deletes cgsnapshot."""
|
||||
caller_context = context
|
||||
context = context.elevated()
|
||||
project_id = cgsnapshot.project_id
|
||||
|
||||
LOG.info("cgsnapshot %s: deleting", cgsnapshot.id)
|
||||
|
||||
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
|
||||
context, cgsnapshot.id)
|
||||
|
||||
self._notify_about_cgsnapshot_usage(
|
||||
context, cgsnapshot, "delete.start")
|
||||
|
||||
snapshots_model_update = None
|
||||
model_update = None
|
||||
try:
|
||||
utils.require_driver_initialized(self.driver)
|
||||
|
||||
LOG.debug("cgsnapshot %(cgsnap_id)s: deleting",
|
||||
{'cgsnap_id': cgsnapshot.id})
|
||||
|
||||
# Pass context so that drivers that want to use it, can,
|
||||
# but it is not a requirement for all drivers.
|
||||
cgsnapshot.context = caller_context
|
||||
for snapshot in snapshots:
|
||||
snapshot.context = caller_context
|
||||
|
||||
model_update, snapshots_model_update = (
|
||||
self.driver.delete_cgsnapshot(context, cgsnapshot,
|
||||
snapshots))
|
||||
|
||||
if snapshots_model_update:
|
||||
for snap_model in snapshots_model_update:
|
||||
# NOTE(xyang): snapshots is a list of snapshot objects.
|
||||
# snapshots_model_update should be a list of dicts.
|
||||
snap = next((item for item in snapshots if
|
||||
item.id == snap_model['id']), None)
|
||||
if snap:
|
||||
snap.status = snap_model['status']
|
||||
snap.save()
|
||||
|
||||
if (snap_model['status'] in
|
||||
[fields.SnapshotStatus.ERROR_DELETING,
|
||||
fields.SnapshotStatus.ERROR] and
|
||||
model_update['status'] not in
|
||||
['error_deleting', 'error']):
|
||||
model_update['status'] = snap_model['status']
|
||||
|
||||
if model_update:
|
||||
if model_update['status'] in ['error_deleting', 'error']:
|
||||
msg = (_('Error occurred when deleting cgsnapshot '
|
||||
'%s.') % cgsnapshot.id)
|
||||
LOG.error(msg)
|
||||
raise exception.VolumeDriverException(message=msg)
|
||||
else:
|
||||
cgsnapshot.update(model_update)
|
||||
cgsnapshot.save()
|
||||
|
||||
except exception.CinderException:
|
||||
with excutils.save_and_reraise_exception():
|
||||
cgsnapshot.status = 'error'
|
||||
cgsnapshot.save()
|
||||
# Update snapshot status to 'error' if driver returns
|
||||
# None for snapshots_model_update.
|
||||
if not snapshots_model_update:
|
||||
for snapshot in snapshots:
|
||||
snapshot.status = fields.SnapshotStatus.ERROR
|
||||
snapshot.save()
|
||||
|
||||
for snapshot in snapshots:
|
||||
# Get reservations
|
||||
try:
|
||||
if CONF.no_snapshot_gb_quota:
|
||||
reserve_opts = {'snapshots': -1}
|
||||
else:
|
||||
reserve_opts = {
|
||||
'snapshots': -1,
|
||||
'gigabytes': -snapshot['volume_size'],
|
||||
}
|
||||
volume_ref = self.db.volume_get(context, snapshot['volume_id'])
|
||||
QUOTAS.add_volume_type_opts(context,
|
||||
reserve_opts,
|
||||
volume_ref.get('volume_type_id'))
|
||||
reservations = QUOTAS.reserve(context,
|
||||
project_id=project_id,
|
||||
**reserve_opts)
|
||||
|
||||
except Exception:
|
||||
reservations = None
|
||||
LOG.exception("Failed to update usages deleting snapshot")
|
||||
|
||||
self.db.volume_glance_metadata_delete_by_snapshot(context,
|
||||
snapshot['id'])
|
||||
|
||||
# TODO(thangp): Switch over to use snapshot.destroy()
|
||||
# after cgsnapshot-objects bugs are fixed
|
||||
self.db.snapshot_destroy(context, snapshot['id'])
|
||||
|
||||
# Commit the reservations
|
||||
if reservations:
|
||||
QUOTAS.commit(context, reservations, project_id=project_id)
|
||||
|
||||
cgsnapshot.destroy()
|
||||
LOG.info("cgsnapshot %s: deleted successfully", cgsnapshot.id)
|
||||
self._notify_about_cgsnapshot_usage(context, cgsnapshot, "delete.end",
|
||||
snapshots)
|
||||
|
||||
def delete_group_snapshot(self, context, group_snapshot):
|
||||
"""Deletes group_snapshot."""
|
||||
caller_context = context
|
||||
|
@ -124,9 +124,12 @@ class VolumeAPI(rpc.RPCAPI):
|
||||
3.9 - Adds new attach/detach methods
|
||||
3.10 - Returning objects instead of raw dictionaries in
|
||||
get_manageable_volumes & get_manageable_snapshots
|
||||
3.11 - Removes create_consistencygroup, delete_consistencygroup,
|
||||
create_cgsnapshot, delete_cgsnapshot, update_consistencygroup,
|
||||
and create_consistencygroup_from_src.
|
||||
"""
|
||||
|
||||
RPC_API_VERSION = '3.10'
|
||||
RPC_API_VERSION = '3.11'
|
||||
RPC_DEFAULT_VERSION = '3.0'
|
||||
TOPIC = constants.VOLUME_TOPIC
|
||||
BINARY = 'cinder-volume'
|
||||
@ -148,38 +151,6 @@ class VolumeAPI(rpc.RPCAPI):
|
||||
|
||||
return super(VolumeAPI, self)._get_cctxt(version=version, **kwargs)
|
||||
|
||||
def create_consistencygroup(self, ctxt, group):
|
||||
cctxt = self._get_cctxt(group.service_topic_queue)
|
||||
cctxt.cast(ctxt, 'create_consistencygroup', group=group)
|
||||
|
||||
def delete_consistencygroup(self, ctxt, group):
|
||||
cctxt = self._get_cctxt(group.service_topic_queue)
|
||||
cctxt.cast(ctxt, 'delete_consistencygroup', group=group)
|
||||
|
||||
def update_consistencygroup(self, ctxt, group, add_volumes=None,
|
||||
remove_volumes=None):
|
||||
cctxt = self._get_cctxt(group.service_topic_queue)
|
||||
cctxt.cast(ctxt, 'update_consistencygroup',
|
||||
group=group,
|
||||
add_volumes=add_volumes,
|
||||
remove_volumes=remove_volumes)
|
||||
|
||||
def create_consistencygroup_from_src(self, ctxt, group, cgsnapshot=None,
|
||||
source_cg=None):
|
||||
cctxt = self._get_cctxt(group.service_topic_queue)
|
||||
cctxt.cast(ctxt, 'create_consistencygroup_from_src',
|
||||
group=group,
|
||||
cgsnapshot=cgsnapshot,
|
||||
source_cg=source_cg)
|
||||
|
||||
def create_cgsnapshot(self, ctxt, cgsnapshot):
|
||||
cctxt = self._get_cctxt(cgsnapshot.service_topic_queue)
|
||||
cctxt.cast(ctxt, 'create_cgsnapshot', cgsnapshot=cgsnapshot)
|
||||
|
||||
def delete_cgsnapshot(self, ctxt, cgsnapshot):
|
||||
cctxt = self._get_cctxt(cgsnapshot.service_topic_queue)
|
||||
cctxt.cast(ctxt, 'delete_cgsnapshot', cgsnapshot=cgsnapshot)
|
||||
|
||||
def create_volume(self, ctxt, volume, request_spec, filter_properties,
|
||||
allow_reschedule=True):
|
||||
cctxt = self._get_cctxt(volume.service_topic_queue)
|
||||
|
Loading…
Reference in New Issue
Block a user