Add generic volume groups

This is the second patch that implements the generic-volume-group
bluerpint. It adds the groups table and introduces create/delete/
update/list/show APIs for groups.

It depends on the first patch which adds group types and group specs:
    https://review.openstack.org/#/c/320165/

Client side patch is here:
    https://review.openstack.org/#/c/322627/

Current microversion is 3.13. The following CLI's are supported:
cinder --os-volume-api-version 3.13 group-create --name my_group
    <group type uuid> <volume type uuid>
cinder --os-volume-api-version 3.13 group-list
cinder --os-volume-api-version 3.13 create --group-id <group uuid>
    --volume-type <volume type uuid> <size>
cinder --os-volume-api-version 3.13 group-update <group uuid>
    --name new_name  description new_description
    --add-volumes <uuid of volume to add>
    --remove-volumes <uuid of volume to remove>
cinder --os-volume-api-version 3.13 group-show <group uuid>
cinder --os-volume-api-version 3.13 group-delete
    --delete-volumes <group uuid>

APIImpact
DocImpact
Change-Id: I35157439071786872bc9976741c4ef75698f7cb7
Partial-Implements: blueprint generic-volume-group
This commit is contained in:
xing-yang 2016-05-15 20:40:40 -04:00
parent 9b9ea77c65
commit 8c74c74695
52 changed files with 4443 additions and 81 deletions

View File

@ -60,6 +60,7 @@ REST_API_VERSION_HISTORY = """
* 3.10 - Add group_id filter to list/detail volumes in _get_volumes.
* 3.11 - Add group types and group specs API.
* 3.12 - Add volumes summary API.
* 3.13 - Add generic volume groups API.
"""
@ -68,7 +69,7 @@ REST_API_VERSION_HISTORY = """
# minimum version of the API supported.
# Explicitly using /v1 or /v2 enpoints will still work
_MIN_API_VERSION = "3.0"
_MAX_API_VERSION = "3.12"
_MAX_API_VERSION = "3.13"
_LEGACY_API_VERSION1 = "1.0"
_LEGACY_API_VERSION2 = "2.0"

View File

@ -178,3 +178,7 @@ user documentation.
3.12
----
Added volumes/summary API.
3.13
----
Added create/delete/update/list/show APIs for generic volume groups.

232
cinder/api/v3/groups.py Normal file
View File

@ -0,0 +1,232 @@
# Copyright (c) 2016 EMC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The groups controller."""
from oslo_log import log as logging
from oslo_utils import strutils
import webob
from webob import exc
from cinder.api import common
from cinder.api.openstack import wsgi
from cinder.api.v3.views import groups as views_groups
from cinder import exception
from cinder import group as group_api
from cinder.i18n import _, _LI
LOG = logging.getLogger(__name__)
GROUP_API_VERSION = '3.13'
class GroupsController(wsgi.Controller):
"""The groups API controller for the OpenStack API."""
_view_builder_class = views_groups.ViewBuilder
def __init__(self):
self.group_api = group_api.API()
super(GroupsController, self).__init__()
@wsgi.Controller.api_version(GROUP_API_VERSION)
def show(self, req, id):
"""Return data about the given group."""
LOG.debug('show called for member %s', id)
context = req.environ['cinder.context']
# Not found exception will be handled at the wsgi level
group = self.group_api.get(
context,
group_id=id)
return self._view_builder.detail(req, group)
@wsgi.Controller.api_version(GROUP_API_VERSION)
@wsgi.action("delete")
def delete_group(self, req, id, body):
return self._delete(req, id, body)
def _delete(self, req, id, body):
"""Delete a group."""
LOG.debug('delete called for group %s', id)
context = req.environ['cinder.context']
del_vol = False
if body:
if not self.is_valid_body(body, 'delete'):
msg = _("Missing required element 'delete' in "
"request body.")
raise exc.HTTPBadRequest(explanation=msg)
grp_body = body['delete']
try:
del_vol = strutils.bool_from_string(
grp_body.get('delete-volumes', False),
strict=True)
except ValueError:
msg = (_("Invalid value '%s' for delete-volumes flag.")
% del_vol)
raise exc.HTTPBadRequest(explanation=msg)
LOG.info(_LI('Delete group with id: %s'), id,
context=context)
try:
group = self.group_api.get(context, id)
self.group_api.delete(context, group, del_vol)
except exception.GroupNotFound:
# Not found exception will be handled at the wsgi level
raise
except exception.InvalidGroup as error:
raise exc.HTTPBadRequest(explanation=error.msg)
return webob.Response(status_int=202)
@wsgi.Controller.api_version(GROUP_API_VERSION)
def index(self, req):
"""Returns a summary list of groups."""
return self._get_groups(req, is_detail=False)
@wsgi.Controller.api_version(GROUP_API_VERSION)
def detail(self, req):
"""Returns a detailed list of groups."""
return self._get_groups(req, is_detail=True)
def _get_groups(self, req, is_detail):
"""Returns a list of groups through view builder."""
context = req.environ['cinder.context']
filters = req.params.copy()
marker, limit, offset = common.get_pagination_params(filters)
sort_keys, sort_dirs = common.get_sort_params(filters)
groups = self.group_api.get_all(
context, filters=filters, marker=marker, limit=limit,
offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs)
if is_detail:
groups = self._view_builder.detail_list(
req, groups)
else:
groups = self._view_builder.summary_list(
req, groups)
return groups
@wsgi.Controller.api_version(GROUP_API_VERSION)
@wsgi.response(202)
def create(self, req, body):
"""Create a new group."""
LOG.debug('Creating new group %s', body)
self.assert_valid_body(body, 'group')
context = req.environ['cinder.context']
group = body['group']
self.validate_name_and_description(group)
name = group.get('name')
description = group.get('description')
group_type = group.get('group_type')
if not group_type:
msg = _("group_type must be provided to create "
"group %(name)s.") % {'name': name}
raise exc.HTTPBadRequest(explanation=msg)
volume_types = group.get('volume_types')
if not volume_types:
msg = _("volume_types must be provided to create "
"group %(name)s.") % {'name': name}
raise exc.HTTPBadRequest(explanation=msg)
availability_zone = group.get('availability_zone')
LOG.info(_LI("Creating group %(name)s."),
{'name': name},
context=context)
try:
new_group = self.group_api.create(
context, name, description, group_type, volume_types,
availability_zone=availability_zone)
except (exception.Invalid, exception.ObjectActionError) as error:
raise exc.HTTPBadRequest(explanation=error.msg)
except exception.NotFound:
# Not found exception will be handled at the wsgi level
raise
retval = self._view_builder.summary(req, new_group)
return retval
@wsgi.Controller.api_version(GROUP_API_VERSION)
def update(self, req, id, body):
"""Update the group.
Expected format of the input parameter 'body':
.. code-block:: json
{
"group":
{
"name": "my_group",
"description": "My group",
"add_volumes": "volume-uuid-1,volume-uuid-2,...",
"remove_volumes": "volume-uuid-8,volume-uuid-9,..."
}
}
"""
LOG.debug('Update called for group %s.', id)
if not body:
msg = _("Missing request body.")
raise exc.HTTPBadRequest(explanation=msg)
self.assert_valid_body(body, 'group')
context = req.environ['cinder.context']
group = body.get('group')
self.validate_name_and_description(group)
name = group.get('name')
description = group.get('description')
add_volumes = group.get('add_volumes')
remove_volumes = group.get('remove_volumes')
# Allow name or description to be changed to an empty string ''.
if (name is None and description is None and not add_volumes
and not remove_volumes):
msg = _("Name, description, add_volumes, and remove_volumes "
"can not be all empty in the request body.")
raise exc.HTTPBadRequest(explanation=msg)
LOG.info(_LI("Updating group %(id)s with name %(name)s "
"description: %(description)s add_volumes: "
"%(add_volumes)s remove_volumes: %(remove_volumes)s."),
{'id': id, 'name': name,
'description': description,
'add_volumes': add_volumes,
'remove_volumes': remove_volumes},
context=context)
try:
group = self.group_api.get(context, id)
self.group_api.update(
context, group, name, description,
add_volumes, remove_volumes)
except exception.GroupNotFound:
# Not found exception will be handled at the wsgi level
raise
except exception.InvalidGroup as error:
raise exc.HTTPBadRequest(explanation=error.msg)
return webob.Response(status_int=202)
def create_resource():
return wsgi.Resource(GroupsController())

View File

@ -31,6 +31,7 @@ from cinder.api.v3 import clusters
from cinder.api.v3 import consistencygroups
from cinder.api.v3 import group_specs
from cinder.api.v3 import group_types
from cinder.api.v3 import groups
from cinder.api.v3 import messages
from cinder.api.v3 import snapshot_manage
from cinder.api.v3 import volume_manage
@ -82,6 +83,17 @@ class APIRouter(cinder.api.openstack.APIRouter):
parent_resource=dict(member_name='group_type',
collection_name='group_types'))
self.resources['groups'] = groups.create_resource()
mapper.resource("group", "groups",
controller=self.resources['groups'],
collection={'detail': 'GET'},
member={'action': 'POST'})
mapper.connect("groups",
"/{project_id}/groups/{id}/action",
controller=self.resources["groups"],
action="action",
conditions={"action": ["POST"]})
self.resources['snapshots'] = snapshots.create_resource(ext_mgr)
mapper.resource("snapshot", "snapshots",
controller=self.resources['snapshots'],

View File

@ -0,0 +1,72 @@
# Copyright (C) 2016 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.api import common
class ViewBuilder(common.ViewBuilder):
"""Model group API responses as a python dictionary."""
_collection_name = "groups"
def __init__(self):
"""Initialize view builder."""
super(ViewBuilder, self).__init__()
def summary_list(self, request, groups):
"""Show a list of groups without many details."""
return self._list_view(self.summary, request, groups)
def detail_list(self, request, groups):
"""Detailed view of a list of groups ."""
return self._list_view(self.detail, request, groups)
def summary(self, request, group):
"""Generic, non-detailed view of a group."""
return {
'group': {
'id': group.id,
'name': group.name
}
}
def detail(self, request, group):
"""Detailed view of a single group."""
return {
'group': {
'id': group.id,
'status': group.status,
'availability_zone': group.availability_zone,
'created_at': group.created_at,
'name': group.name,
'description': group.description,
'group_type': group.group_type_id,
'volume_types': [v_type.id for v_type in group.volume_types],
}
}
def _list_view(self, func, request, groups):
"""Provide a view for a list of groups."""
groups_list = [
func(request, group)['group']
for group in groups]
grp_links = self._get_collection_links(request,
groups,
self._collection_name)
groups_dict = dict(groups=groups_list)
if grp_links:
groups_dict['group_links'] = grp_links
return groups_dict

View File

@ -1,3 +1,6 @@
# Copyright 2016 EMC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@ -10,9 +13,11 @@
# License for the specific language governing permissions and limitations
# under the License.
from cinder.api.v2.views import volumes as views_v2
class ViewBuilder(object):
"""Model a server API response as a python dictionary."""
class ViewBuilder(views_v2.ViewBuilder):
"""Model a volumes API V3 response as a python dictionary."""
def quick_summary(self, volume_count, volume_size):
"""Number of volumes and size of volumes."""
@ -22,3 +27,14 @@ class ViewBuilder(object):
'total_size': volume_size
},
}
def detail(self, request, volume):
"""Detailed view of a single volume."""
volume_ref = super(ViewBuilder, self).detail(request, volume)
req_version = request.api_version_request
# Add group_id if min version is greater than or equal to 3.13.
if req_version.matches("3.13", None):
volume_ref['volume']['group_id'] = volume.get('group_id')
return volume_ref

View File

@ -13,11 +13,21 @@
"""The volumes V3 api."""
from oslo_log import log as logging
from oslo_utils import uuidutils
from webob import exc
from cinder.api import common
from cinder.api.openstack import wsgi
from cinder.api.v2 import volumes as volumes_v2
from cinder.api.v3.views import volumes as volume_views_v3
from cinder import exception
from cinder import group as group_api
from cinder.i18n import _, _LI
from cinder import utils
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
SUMMARY_BASE_MICRO_VERSION = '3.12'
@ -25,6 +35,12 @@ SUMMARY_BASE_MICRO_VERSION = '3.12'
class VolumeController(volumes_v2.VolumeController):
"""The Volumes API controller for the OpenStack API V3."""
_view_builder_class = volume_views_v3.ViewBuilder
def __init__(self, ext_mgr):
self.group_api = group_api.API()
super(VolumeController, self).__init__(ext_mgr)
def _get_volumes(self, req, is_detail):
"""Returns a list of volumes, transformed through view builder."""
@ -88,6 +104,144 @@ class VolumeController(volumes_v2.VolumeController):
volumes = self.volume_api.get_volume_summary(context, filters=filters)
return view_builder_v3.quick_summary(volumes[0], int(volumes[1]))
@wsgi.response(202)
def create(self, req, body):
"""Creates a new volume.
:param req: the request
:param body: the request body
:returns: dict -- the new volume dictionary
:raises: HTTPNotFound, HTTPBadRequest
"""
self.assert_valid_body(body, 'volume')
LOG.debug('Create volume request body: %s', body)
context = req.environ['cinder.context']
req_version = req.api_version_request
# Remove group_id from body if max version is less than 3.13.
if req_version.matches(None, "3.12"):
# NOTE(xyang): The group_id is from a group created with a
# group_type. So with this group_id, we've got a group_type
# for this volume. Also if group_id is passed in, that means
# we already know which backend is hosting the group and the
# volume will be created on the same backend as well. So it
# won't go through the scheduler again if a group_id is
# passed in.
try:
body.get('volume', {}).pop('group_id', None)
except AttributeError:
msg = (_("Invalid body provided for creating volume. "
"Request API version: %s.") % req_version)
raise exc.HTTPBadRequest(explanation=msg)
volume = body['volume']
kwargs = {}
self.validate_name_and_description(volume)
# NOTE(thingee): v2 API allows name instead of display_name
if 'name' in volume:
volume['display_name'] = volume.pop('name')
# NOTE(thingee): v2 API allows description instead of
# display_description
if 'description' in volume:
volume['display_description'] = volume.pop('description')
if 'image_id' in volume:
volume['imageRef'] = volume.pop('image_id')
req_volume_type = volume.get('volume_type', None)
if req_volume_type:
# Not found exception will be handled at the wsgi level
if not uuidutils.is_uuid_like(req_volume_type):
kwargs['volume_type'] = (
volume_types.get_volume_type_by_name(
context, req_volume_type))
else:
kwargs['volume_type'] = volume_types.get_volume_type(
context, req_volume_type)
kwargs['metadata'] = volume.get('metadata', None)
snapshot_id = volume.get('snapshot_id')
if snapshot_id is not None:
# Not found exception will be handled at the wsgi level
kwargs['snapshot'] = self.volume_api.get_snapshot(context,
snapshot_id)
else:
kwargs['snapshot'] = None
source_volid = volume.get('source_volid')
if source_volid is not None:
# Not found exception will be handled at the wsgi level
kwargs['source_volume'] = (
self.volume_api.get_volume(context,
source_volid))
else:
kwargs['source_volume'] = None
source_replica = volume.get('source_replica')
if source_replica is not None:
# Not found exception will be handled at the wsgi level
src_vol = self.volume_api.get_volume(context,
source_replica)
if src_vol['replication_status'] == 'disabled':
explanation = _('source volume id:%s is not'
' replicated') % source_replica
raise exc.HTTPBadRequest(explanation=explanation)
kwargs['source_replica'] = src_vol
else:
kwargs['source_replica'] = None
consistencygroup_id = volume.get('consistencygroup_id')
if consistencygroup_id is not None:
# Not found exception will be handled at the wsgi level
kwargs['consistencygroup'] = (
self.consistencygroup_api.get(context,
consistencygroup_id))
else:
kwargs['consistencygroup'] = None
# Get group_id if volume is in a group.
group_id = volume.get('group_id')
if group_id is not None:
try:
kwargs['group'] = self.group_api.get(context, group_id)
except exception.GroupNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
size = volume.get('size', None)
if size is None and kwargs['snapshot'] is not None:
size = kwargs['snapshot']['volume_size']
elif size is None and kwargs['source_volume'] is not None:
size = kwargs['source_volume']['size']
elif size is None and kwargs['source_replica'] is not None:
size = kwargs['source_replica']['size']
LOG.info(_LI("Create volume of %s GB"), size)
if self.ext_mgr.is_loaded('os-image-create'):
image_ref = volume.get('imageRef')
if image_ref is not None:
image_uuid = self._image_uuid_from_ref(image_ref, context)
kwargs['image_id'] = image_uuid
kwargs['availability_zone'] = volume.get('availability_zone', None)
kwargs['scheduler_hints'] = volume.get('scheduler_hints', None)
multiattach = volume.get('multiattach', False)
kwargs['multiattach'] = multiattach
new_volume = self.volume_api.create(context,
size,
volume.get('display_name'),
volume.get('display_description'),
**kwargs)
retval = self._view_builder.detail(req, new_volume)
return retval
def create_resource(ext_mgr):
return wsgi.Resource(VolumeController(ext_mgr))

View File

@ -190,6 +190,9 @@ global_opts = [
cfg.StrOpt('consistencygroup_api_class',
default='cinder.consistencygroup.api.API',
help='The full class name of the consistencygroup API class'),
cfg.StrOpt('group_api_class',
default='cinder.group.api.API',
help='The full class name of the group API class'),
cfg.StrOpt('os_privileged_user_name',
help='OpenStack privileged account username. Used for requests '
'to other services (such as Nova) that require an account '

View File

@ -269,6 +269,12 @@ def volume_get_all_by_group(context, group_id, filters=None):
return IMPL.volume_get_all_by_group(context, group_id, filters=filters)
def volume_get_all_by_generic_group(context, group_id, filters=None):
"""Get all volumes belonging to a generic volume group."""
return IMPL.volume_get_all_by_generic_group(context, group_id,
filters=filters)
def volume_get_all_by_project(context, project_id, marker, limit,
sort_keys=None, sort_dirs=None, filters=None,
offset=None):
@ -299,6 +305,14 @@ def volume_update(context, volume_id, values):
return IMPL.volume_update(context, volume_id, values)
def volumes_update(context, values_list):
"""Set the given properties on a list of volumes and update them.
Raises NotFound if a volume does not exist.
"""
return IMPL.volumes_update(context, values_list)
def volume_include_in_cluster(context, cluster, partial_rename=True,
**filters):
"""Include all volumes matching the filters into a cluster.
@ -716,6 +730,11 @@ def group_type_access_remove(context, type_id, project_id):
return IMPL.group_type_access_remove(context, type_id, project_id)
def volume_type_get_all_by_group(context, group_id):
"""Get all volumes in a group."""
return IMPL.volume_type_get_all_by_group(context, group_id)
####################
@ -1281,6 +1300,53 @@ def consistencygroup_include_in_cluster(context, cluster, partial_rename=True,
###################
def group_get(context, group_id):
"""Get a group or raise if it does not exist."""
return IMPL.group_get(context, group_id)
def group_get_all(context, filters=None, marker=None, limit=None,
offset=None, sort_keys=None, sort_dirs=None):
"""Get all groups."""
return IMPL.group_get_all(context, filters=filters,
marker=marker, limit=limit,
offset=offset, sort_keys=sort_keys,
sort_dirs=sort_dirs)
def group_create(context, values):
"""Create a group from the values dictionary."""
return IMPL.group_create(context, values)
def group_get_all_by_project(context, project_id, filters=None,
marker=None, limit=None, offset=None,
sort_keys=None, sort_dirs=None):
"""Get all groups belonging to a project."""
return IMPL.group_get_all_by_project(context, project_id,
filters=filters,
marker=marker, limit=limit,
offset=offset,
sort_keys=sort_keys,
sort_dirs=sort_dirs)
def group_update(context, group_id, values):
"""Set the given properties on a group and update it.
Raises NotFound if group does not exist.
"""
return IMPL.group_update(context, group_id, values)
def group_destroy(context, group_id):
"""Destroy the group or raise if it does not exist."""
return IMPL.group_destroy(context, group_id)
###################
def cgsnapshot_get(context, cgsnapshot_id):
"""Get a cgsnapshot or raise if it does not exist."""
return IMPL.cgsnapshot_get(context, cgsnapshot_id)

View File

@ -342,6 +342,15 @@ def _sync_consistencygroups(context, project_id, session,
return {key: groups}
def _sync_groups(context, project_id, session,
volume_type_id=None,
volume_type_name=None):
(_junk, groups) = _group_data_get_for_project(
context, project_id, session=session)
key = 'groups'
return {key: groups}
def _sync_backup_gigabytes(context, project_id, session, volume_type_id=None,
volume_type_name=None):
key = 'backup_gigabytes'
@ -356,7 +365,8 @@ QUOTA_SYNC_FUNCTIONS = {
'_sync_gigabytes': _sync_gigabytes,
'_sync_consistencygroups': _sync_consistencygroups,
'_sync_backups': _sync_backups,
'_sync_backup_gigabytes': _sync_backup_gigabytes
'_sync_backup_gigabytes': _sync_backup_gigabytes,
'_sync_groups': _sync_groups,
}
@ -1662,14 +1672,16 @@ def _volume_get_query(context, session=None, project_only=False,
options(joinedload('volume_admin_metadata')).\
options(joinedload('volume_type')).\
options(joinedload('volume_attachment')).\
options(joinedload('consistencygroup'))
options(joinedload('consistencygroup')).\
options(joinedload('group'))
else:
return model_query(context, models.Volume, session=session,
project_only=project_only).\
options(joinedload('volume_metadata')).\
options(joinedload('volume_type')).\
options(joinedload('volume_attachment')).\
options(joinedload('consistencygroup'))
options(joinedload('consistencygroup')).\
options(joinedload('group'))
@require_context
@ -1832,7 +1844,7 @@ def volume_get_all_by_group(context, group_id, filters=None):
"""Retrieves all volumes associated with the group_id.
:param context: context to query under
:param group_id: group ID for all volumes being retrieved
:param group_id: consistency group ID for all volumes being retrieved
:param filters: dictionary of filters; values that are in lists, tuples,
or sets cause an 'IN' operation, while exact matching
is used for other values, see _process_volume_filters
@ -1848,6 +1860,27 @@ def volume_get_all_by_group(context, group_id, filters=None):
return query.all()
@require_context
def volume_get_all_by_generic_group(context, group_id, filters=None):
"""Retrieves all volumes associated with the group_id.
:param context: context to query under
:param group_id: group ID for all volumes being retrieved
:param filters: dictionary of filters; values that are in lists, tuples,
or sets cause an 'IN' operation, while exact matching
is used for other values, see _process_volume_filters
function for more information
:returns: list of matching volumes
"""
query = _volume_get_query(context).filter_by(group_id=group_id)
if filters:
query = _process_volume_filters(query, filters)
# No volumes would match, return empty list
if query is None:
return []
return query.all()
@require_context
def volume_get_all_by_project(context, project_id, marker, limit,
sort_keys=None, sort_dirs=None, filters=None,
@ -2140,6 +2173,38 @@ def volume_update(context, volume_id, values):
return volume_ref
@handle_db_data_error
@require_context
def volumes_update(context, values_list):
session = get_session()
with session.begin():
volume_refs = []
for values in values_list:
volume_id = values['id']
values.pop('id')
metadata = values.get('metadata')
if metadata is not None:
_volume_user_metadata_update(context,
volume_id,
values.pop('metadata'),
delete=True,
session=session)
admin_metadata = values.get('admin_metadata')
if is_admin_context(context) and admin_metadata is not None:
_volume_admin_metadata_update(context,
volume_id,
values.pop('admin_metadata'),
delete=True,
session=session)
volume_ref = _volume_get(context, volume_id, session=session)
volume_ref.update(values)
volume_refs.append(volume_ref)
return volume_refs
@require_context
def volume_attachment_update(context, attachment_id, values):
session = get_session()
@ -3554,7 +3619,12 @@ def volume_type_destroy(context, id):
_volume_type_get(context, id, session)
results = model_query(context, models.Volume, session=session). \
filter_by(volume_type_id=id).all()
if results:
group_count = model_query(context,
models.GroupVolumeTypeMapping,
read_deleted="no",
session=session).\
filter_by(volume_type_id=id).count()
if results or group_count:
LOG.error(_LE('VolumeType %s deletion failed, '
'VolumeType in use.'), id)
raise exception.VolumeTypeInUse(volume_type_id=id)
@ -3618,7 +3688,8 @@ def volume_get_active_by_window(context,
query = (query.options(joinedload('volume_metadata')).
options(joinedload('volume_type')).
options(joinedload('volume_attachment')).
options(joinedload('consistencygroup')))
options(joinedload('consistencygroup')).
options(joinedload('group')))
if is_admin_context(context):
query = query.options(joinedload('volume_admin_metadata'))
@ -3650,6 +3721,29 @@ def group_type_access_get_all(context, type_id):
filter_by(group_type_id=group_type_id).all()
def _group_volume_type_mapping_query(context, session=None):
return model_query(context, models.GroupVolumeTypeMapping, session=session,
read_deleted="no")
@require_admin_context
def volume_type_get_all_by_group(context, group_id):
# Generic volume group
mappings = (_group_volume_type_mapping_query(context).
filter_by(group_id=group_id).all())
session = get_session()
with session.begin():
volume_type_ids = [mapping.volume_type_id for mapping in mappings]
query = (model_query(context,
models.VolumeTypes,
session=session,
read_deleted='no').
filter(models.VolumeTypes.id.in_(volume_type_ids)).
options(joinedload('extra_specs')).
all())
return query
@require_admin_context
def volume_type_access_add(context, type_id, project_id):
"""Add given tenant to the volume type access list."""
@ -5067,6 +5161,188 @@ def consistencygroup_include_in_cluster(context, cluster,
###############################
@require_admin_context
def _group_data_get_for_project(context, project_id,
session=None):
query = model_query(context,
func.count(models.Group.id),
read_deleted="no",
session=session).\
filter_by(project_id=project_id)
result = query.first()
return (0, result[0] or 0)
@require_context
def _group_get(context, group_id, session=None):
result = (model_query(context, models.Group, session=session,
project_only=True).
filter_by(id=group_id).
first())
if not result:
raise exception.GroupNotFound(group_id=group_id)
return result
@require_context
def group_get(context, group_id):
return _group_get(context, group_id)
def _groups_get_query(context, session=None, project_only=False):
return model_query(context, models.Group, session=session,
project_only=project_only)
def _process_groups_filters(query, filters):
if filters:
# Ensure that filters' keys exist on the model
if not is_valid_model_filters(models.Group, filters):
return
query = query.filter_by(**filters)
return query
def _group_get_all(context, filters=None, marker=None, limit=None,
offset=None, sort_keys=None, sort_dirs=None):
if filters and not is_valid_model_filters(models.Group,
filters):
return []
session = get_session()
with session.begin():
# Generate the paginate query
query = _generate_paginate_query(context, session, marker,
limit, sort_keys, sort_dirs, filters,
offset, models.Group)
return query.all()if query else []
@require_admin_context
def group_get_all(context, filters=None, marker=None, limit=None,
offset=None, sort_keys=None, sort_dirs=None):
"""Retrieves all groups.
If no sort parameters are specified then the returned groups are sorted
first by the 'created_at' key and then by the 'id' key in descending
order.
:param context: context to query under
:param marker: the last item of the previous page, used to determine the
next page of results to return
:param limit: maximum number of items to return
:param sort_keys: list of attributes by which results should be sorted,
paired with corresponding item in sort_dirs
:param sort_dirs: list of directions in which results should be sorted,
paired with corresponding item in sort_keys
:param filters: Filters for the query in the form of key/value.
:returns: list of matching groups
"""
return _group_get_all(context, filters, marker, limit, offset,
sort_keys, sort_dirs)
@require_context
def group_get_all_by_project(context, project_id, filters=None,
marker=None, limit=None, offset=None,
sort_keys=None, sort_dirs=None):
"""Retrieves all groups in a project.
If no sort parameters are specified then the returned groups are sorted
first by the 'created_at' key and then by the 'id' key in descending
order.
:param context: context to query under
:param marker: the last item of the previous page, used to determine the
next page of results to return
:param limit: maximum number of items to return
:param sort_keys: list of attributes by which results should be sorted,
paired with corresponding item in sort_dirs
:param sort_dirs: list of directions in which results should be sorted,
paired with corresponding item in sort_keys
:param filters: Filters for the query in the form of key/value.
:returns: list of matching groups
"""
authorize_project_context(context, project_id)
if not filters:
filters = {}
else:
filters = filters.copy()
filters['project_id'] = project_id
return _group_get_all(context, filters, marker, limit, offset,
sort_keys, sort_dirs)
@handle_db_data_error
@require_context
def group_create(context, values):
group = models.Group()
if not values.get('id'):
values['id'] = six.text_type(uuid.uuid4())
mappings = []
for item in values.get('volume_type_ids') or []:
mapping = models.GroupVolumeTypeMapping()
mapping['volume_type_id'] = item
mapping['group_id'] = values['id']
mappings.append(mapping)
values['volume_types'] = mappings
session = get_session()
with session.begin():
group.update(values)
session.add(group)
return _group_get(context, values['id'], session=session)
@handle_db_data_error
@require_context
def group_update(context, group_id, values):
session = get_session()
with session.begin():
result = (model_query(context, models.Group,
project_only=True).
filter_by(id=group_id).
first())
if not result:
raise exception.GroupNotFound(
_("No group with id %s") % group_id)
result.update(values)
result.save(session=session)
return result
@require_admin_context
def group_destroy(context, group_id):
session = get_session()
with session.begin():
(model_query(context, models.Group, session=session).
filter_by(id=group_id).
update({'status': fields.GroupStatus.DELETED,
'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')}))
(session.query(models.GroupVolumeTypeMapping).
filter_by(group_id=group_id).
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')}))
###############################
@require_context
def _cgsnapshot_get(context, cgsnapshot_id, session=None):
result = model_query(context, models.Cgsnapshot, session=session,
@ -5436,6 +5712,9 @@ PAGINATION_HELPERS = {
_message_get),
models.GroupTypes: (_group_type_get_query, _process_group_types_filters,
_group_type_get_db_object),
models.Group: (_groups_get_query,
_process_groups_filters,
_group_get),
}

View File

@ -0,0 +1,97 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from sqlalchemy import Boolean, Column, DateTime, Integer
from sqlalchemy import ForeignKey, MetaData, String, Table
# Default number of quota groups. We should not read from config file.
DEFAULT_QUOTA_GROUPS = 10
CLASS_NAME = 'default'
CREATED_AT = datetime.datetime.now() # noqa
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# New table
groups = Table(
'groups',
meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean),
Column('id', String(36), primary_key=True, nullable=False),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('cluster_name', String(255)),
Column('host', String(length=255)),
Column('availability_zone', String(length=255)),
Column('name', String(length=255)),
Column('description', String(length=255)),
Column('group_type_id', String(length=36)),
Column('status', String(length=255)),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
groups.create()
# Add column to volumes table
volumes = Table('volumes', meta, autoload=True)
group_id = Column('group_id', String(36),
ForeignKey('groups.id'))
volumes.create_column(group_id)
volumes.update().values(group_id=None).execute()
# New group_volume_type_mapping table
Table('volume_types', meta, autoload=True)
grp_vt_mapping = Table(
'group_volume_type_mapping', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
Column('volume_type_id', String(36), ForeignKey('volume_types.id'),
nullable=False),
Column('group_id', String(36),
ForeignKey('groups.id'), nullable=False),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
grp_vt_mapping.create()
# Add group quota data into DB.
quota_classes = Table('quota_classes', meta, autoload=True)
rows = (quota_classes.count().
where(quota_classes.c.resource == 'groups').
execute().scalar())
# Do not add entries if there are already 'groups' entries.
if rows:
return
# Set groups
qci = quota_classes.insert()
qci.execute({'created_at': CREATED_AT,
'class_name': CLASS_NAME,
'resource': 'groups',
'hard_limit': DEFAULT_QUOTA_GROUPS,
'deleted': False, })

View File

@ -171,6 +171,23 @@ class ConsistencyGroup(BASE, CinderBase):
source_cgid = Column(String(36))
class Group(BASE, CinderBase):
"""Represents a generic volume group."""
__tablename__ = 'groups'
id = Column(String(36), primary_key=True)
user_id = Column(String(255), nullable=False)
project_id = Column(String(255), nullable=False)
cluster_name = Column(String(255))
host = Column(String(255))
availability_zone = Column(String(255))
name = Column(String(255))
description = Column(String(255))
status = Column(String(255))
group_type_id = Column(String(36))
class Cgsnapshot(BASE, CinderBase):
"""Represents a cgsnapshot."""
__tablename__ = 'cgsnapshots'
@ -240,6 +257,7 @@ class Volume(BASE, CinderBase):
encryption_key_id = Column(String(36))
consistencygroup_id = Column(String(36))
group_id = Column(String(36))
bootable = Column(Boolean, default=False)
multiattach = Column(Boolean, default=False)
@ -256,6 +274,12 @@ class Volume(BASE, CinderBase):
foreign_keys=consistencygroup_id,
primaryjoin='Volume.consistencygroup_id == ConsistencyGroup.id')
group = relationship(
Group,
backref="volumes",
foreign_keys=group_id,
primaryjoin='Volume.group_id == Group.id')
class VolumeMetadata(BASE, CinderBase):
"""Represents a metadata key/value pair for a volume."""
@ -330,13 +354,33 @@ class GroupTypes(BASE, CinderBase):
name = Column(String(255))
description = Column(String(255))
is_public = Column(Boolean, default=True)
# TODO(xyang): Uncomment the following after groups table is added.
# groups = relationship(Group,
# backref=backref('group_type', uselist=False),
# foreign_keys=id,
# primaryjoin='and_('
# 'Group.group_type_id == GroupTypes.id, '
# 'GroupTypes.deleted == False)')
groups = relationship(Group,
backref=backref('group_type', uselist=False),
foreign_keys=id,
primaryjoin='and_('
'Group.group_type_id == GroupTypes.id, '
'GroupTypes.deleted == False)')
class GroupVolumeTypeMapping(BASE, CinderBase):
"""Represent mapping between groups and volume_types."""
__tablename__ = "group_volume_type_mapping"
id = Column(Integer, primary_key=True, nullable=False)
volume_type_id = Column(String(36),
ForeignKey('volume_types.id'),
nullable=False)
group_id = Column(String(36),
ForeignKey('groups.id'),
nullable=False)
group = relationship(
Group,
backref="volume_types",
foreign_keys=group_id,
primaryjoin='and_('
'GroupVolumeTypeMapping.group_id == Group.id,'
'GroupVolumeTypeMapping.deleted == False)'
)
class VolumeTypeProjects(BASE, CinderBase):

View File

@ -1044,6 +1044,15 @@ class InvalidConsistencyGroup(Invalid):
message = _("Invalid ConsistencyGroup: %(reason)s")
# Group
class GroupNotFound(NotFound):
message = _("Group %(group_id)s could not be found.")
class InvalidGroup(Invalid):
message = _("Invalid Group: %(reason)s")
# CgSnapshot
class CgSnapshotNotFound(NotFound):
message = _("CgSnapshot %(cgsnapshot_id)s could not be found.")

27
cinder/group/__init__.py Normal file
View File

@ -0,0 +1,27 @@
# Copyright (C) 2016 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Importing full names to not pollute the namespace and cause possible
# collisions with use of 'from cinder.transfer import <foo>' elsewhere.
from oslo_utils import importutils
from cinder.common import config
CONF = config.CONF
API = importutils.import_class(
CONF.group_api_class)

543
cinder/group/api.py Normal file
View File

@ -0,0 +1,543 @@
# Copyright (C) 2016 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handles all requests relating to groups.
"""
import functools
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import timeutils
from cinder.db import base
from cinder import exception
from cinder.i18n import _, _LE, _LW
from cinder import objects
from cinder.objects import base as objects_base
from cinder.objects import fields as c_fields
import cinder.policy
from cinder import quota
from cinder.scheduler import rpcapi as scheduler_rpcapi
from cinder.volume import api as volume_api
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import utils as vol_utils
from cinder.volume import volume_types
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
GROUP_QUOTAS = quota.GROUP_QUOTAS
VALID_REMOVE_VOL_FROM_GROUP_STATUS = (
'available',
'in-use',
'error',
'error_deleting')
VALID_ADD_VOL_TO_GROUP_STATUS = (
'available',
'in-use')
def wrap_check_policy(func):
"""Check policy corresponding to the wrapped methods prior to execution.
This decorator requires the first 3 args of the wrapped function
to be (self, context, group)
"""
@functools.wraps(func)
def wrapped(self, context, target_obj, *args, **kwargs):
check_policy(context, func.__name__, target_obj)
return func(self, context, target_obj, *args, **kwargs)
return wrapped
def check_policy(context, action, target_obj=None):
target = {
'project_id': context.project_id,
'user_id': context.user_id,
}
if isinstance(target_obj, objects_base.CinderObject):
# Turn object into dict so target.update can work
target.update(
target_obj.obj_to_primitive()['versioned_object.data'] or {})
else:
target.update(target_obj or {})
_action = 'group:%s' % action
cinder.policy.enforce(context, _action, target)
class API(base.Base):
"""API for interacting with the volume manager for groups."""
def __init__(self, db_driver=None):
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
self.volume_rpcapi = volume_rpcapi.VolumeAPI()
self.volume_api = volume_api.API()
super(API, self).__init__(db_driver)
def _extract_availability_zone(self, availability_zone):
raw_zones = self.volume_api.list_availability_zones(enable_cache=True)
availability_zones = set([az['name'] for az in raw_zones])
if CONF.storage_availability_zone:
availability_zones.add(CONF.storage_availability_zone)
if availability_zone is None:
if CONF.default_availability_zone:
availability_zone = CONF.default_availability_zone
else:
# For backwards compatibility use the storage_availability_zone
availability_zone = CONF.storage_availability_zone
if availability_zone not in availability_zones:
if CONF.allow_availability_zone_fallback:
original_az = availability_zone
availability_zone = (
CONF.default_availability_zone or
CONF.storage_availability_zone)
LOG.warning(_LW("Availability zone '%(s_az)s' "
"not found, falling back to "
"'%(s_fallback_az)s'."),
{'s_az': original_az,
's_fallback_az': availability_zone})
else:
msg = _("Availability zone '%(s_az)s' is invalid.")
msg = msg % {'s_az': availability_zone}
raise exception.InvalidInput(reason=msg)
return availability_zone
def create(self, context, name, description, group_type,
volume_types, availability_zone=None):
check_policy(context, 'create')
req_volume_types = []
# NOTE: Admin context is required to get extra_specs of volume_types.
req_volume_types = (self.db.volume_types_get_by_name_or_id(
context.elevated(), volume_types))
req_group_type = self.db.group_type_get(context, group_type)
availability_zone = self._extract_availability_zone(availability_zone)
kwargs = {'user_id': context.user_id,
'project_id': context.project_id,
'availability_zone': availability_zone,
'status': c_fields.GroupStatus.CREATING,
'name': name,
'description': description,
'volume_type_ids': volume_types,
'group_type_id': group_type}
group = None
try:
group = objects.Group(context=context, **kwargs)
group.create()
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred when creating group"
" %s."), name)
request_spec_list = []
filter_properties_list = []
for req_volume_type in req_volume_types:
request_spec = {'volume_type': req_volume_type.copy(),
'group_id': group.id}
filter_properties = {}
request_spec_list.append(request_spec)
filter_properties_list.append(filter_properties)
group_spec = {'group_type': req_group_type.copy(),
'group_id': group.id}
group_filter_properties = {}
# Update quota for groups
self.update_quota(context, group, 1)
self._cast_create_group(context, group,
group_spec,
request_spec_list,
group_filter_properties,
filter_properties_list)
return group
def _cast_create_group(self, context, group,
group_spec,
request_spec_list,
group_filter_properties,
filter_properties_list):
try:
for request_spec in request_spec_list:
volume_type = request_spec.get('volume_type')
volume_type_id = None
if volume_type:
volume_type_id = volume_type.get('id')
specs = {}
if volume_type_id:
qos_specs = volume_types.get_volume_type_qos_specs(
volume_type_id)
specs = qos_specs['qos_specs']
if not specs:
# to make sure we don't pass empty dict
specs = None
volume_properties = {
'size': 0, # Need to populate size for the scheduler
'user_id': context.user_id,
'project_id': context.project_id,
'status': 'creating',
'attach_status': 'detached',
'encryption_key_id': request_spec.get('encryption_key_id'),
'display_description': request_spec.get('description'),
'display_name': request_spec.get('name'),
'volume_type_id': volume_type_id,
'group_type_id': group.group_type_id,
}
request_spec['volume_properties'] = volume_properties
request_spec['qos_specs'] = specs
group_properties = {
'size': 0, # Need to populate size for the scheduler
'user_id': context.user_id,
'project_id': context.project_id,
'status': 'creating',
'display_description': group_spec.get('description'),
'display_name': group_spec.get('name'),
'group_type_id': group.group_type_id,
}
group_spec['volume_properties'] = group_properties
group_spec['qos_specs'] = None
except Exception:
with excutils.save_and_reraise_exception():
try:
group.destroy()
finally:
LOG.error(_LE("Error occurred when building "
"request spec list for group "
"%s."), group.id)
# Cast to the scheduler and let it handle whatever is needed
# to select the target host for this group.
self.scheduler_rpcapi.create_group(
context,
CONF.volume_topic,
group,
group_spec=group_spec,
request_spec_list=request_spec_list,
group_filter_properties=group_filter_properties,
filter_properties_list=filter_properties_list)
def update_quota(self, context, group, num, project_id=None):
reserve_opts = {'groups': num}
try:
reservations = GROUP_QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
if reservations:
GROUP_QUOTAS.commit(context, reservations)
except Exception:
with excutils.save_and_reraise_exception():
try:
group.destroy()
finally:
LOG.error(_LE("Failed to update quota for "
"group %s."), group.id)
@wrap_check_policy
def delete(self, context, group, delete_volumes=False):
if not group.host:
self.update_quota(context, group, -1, group.project_id)
LOG.debug("No host for group %s. Deleting from "
"the database.", group.id)
group.destroy()
return
if not delete_volumes and group.status not in (
[c_fields.GroupStatus.AVAILABLE,
c_fields.GroupStatus.ERROR]):
msg = _("Group status must be available or error, "
"but current status is: %s") % group.status
raise exception.InvalidGroup(reason=msg)
volumes = self.db.volume_get_all_by_generic_group(context.elevated(),
group.id)
if volumes and not delete_volumes:
msg = (_("Group %s still contains volumes. "
"The delete-volumes flag is required to delete it.")
% group.id)
LOG.error(msg)
raise exception.InvalidGroup(reason=msg)
volumes_model_update = []
for volume in volumes:
if volume['attach_status'] == "attached":
msg = _("Volume in group %s is attached. "
"Need to detach first.") % group.id
LOG.error(msg)
raise exception.InvalidGroup(reason=msg)
snapshots = objects.SnapshotList.get_all_for_volume(context,
volume['id'])
if snapshots:
msg = _("Volume in group still has "
"dependent snapshots.")
LOG.error(msg)
raise exception.InvalidGroup(reason=msg)
volumes_model_update.append({'id': volume['id'],
'status': 'deleting'})
self.db.volumes_update(context, volumes_model_update)
group.status = c_fields.GroupStatus.DELETING
group.terminated_at = timeutils.utcnow()
group.save()
self.volume_rpcapi.delete_group(context, group)
def update(self, context, group, name, description,
add_volumes, remove_volumes):
"""Update group."""
if group.status != c_fields.GroupStatus.AVAILABLE:
msg = _("Group status must be available, "
"but current status is: %s.") % group.status
raise exception.InvalidGroup(reason=msg)
add_volumes_list = []
remove_volumes_list = []
if add_volumes:
add_volumes = add_volumes.strip(',')
add_volumes_list = add_volumes.split(',')
if remove_volumes:
remove_volumes = remove_volumes.strip(',')
remove_volumes_list = remove_volumes.split(',')
invalid_uuids = []
for uuid in add_volumes_list:
if uuid in remove_volumes_list:
invalid_uuids.append(uuid)
if invalid_uuids:
msg = _("UUIDs %s are in both add and remove volume "
"list.") % invalid_uuids
raise exception.InvalidVolume(reason=msg)
volumes = self.db.volume_get_all_by_generic_group(context, group.id)
# Validate name.
if name == group.name:
name = None
# Validate description.
if description == group.description:
description = None
# Validate volumes in add_volumes and remove_volumes.
add_volumes_new = ""
remove_volumes_new = ""
if add_volumes_list:
add_volumes_new = self._validate_add_volumes(
context, volumes, add_volumes_list, group)
if remove_volumes_list:
remove_volumes_new = self._validate_remove_volumes(
volumes, remove_volumes_list, group)
if (name is None and description is None and not add_volumes_new and
not remove_volumes_new):
msg = (_("Cannot update group %(group_id)s "
"because no valid name, description, add_volumes, "
"or remove_volumes were provided.") %
{'group_id': group.id})
raise exception.InvalidGroup(reason=msg)
fields = {'updated_at': timeutils.utcnow()}
# Update name and description in db now. No need to
# to send them over through an RPC call.
if name is not None:
fields['name'] = name
if description is not None:
fields['description'] = description
if not add_volumes_new and not remove_volumes_new:
# Only update name or description. Set status to available.
fields['status'] = 'available'
else:
fields['status'] = 'updating'
group.update(fields)
group.save()
# Do an RPC call only if the update request includes
# adding/removing volumes. add_volumes_new and remove_volumes_new
# are strings of volume UUIDs separated by commas with no spaces
# in between.
if add_volumes_new or remove_volumes_new:
self.volume_rpcapi.update_group(
context, group,
add_volumes=add_volumes_new,
remove_volumes=remove_volumes_new)
def _validate_remove_volumes(self, volumes, remove_volumes_list, group):
# Validate volumes in remove_volumes.
remove_volumes_new = ""
for volume in volumes:
if volume['id'] in remove_volumes_list:
if volume['status'] not in VALID_REMOVE_VOL_FROM_GROUP_STATUS:
msg = (_("Cannot remove volume %(volume_id)s from "
"group %(group_id)s because volume "
"is in an invalid state: %(status)s. Valid "
"states are: %(valid)s.") %
{'volume_id': volume['id'],
'group_id': group.id,
'status': volume['status'],
'valid': VALID_REMOVE_VOL_FROM_GROUP_STATUS})
raise exception.InvalidVolume(reason=msg)
# Volume currently in group. It will be removed from group.
if remove_volumes_new:
remove_volumes_new += ","
remove_volumes_new += volume['id']
for rem_vol in remove_volumes_list:
if rem_vol not in remove_volumes_new:
msg = (_("Cannot remove volume %(volume_id)s from "
"group %(group_id)s because it "
"is not in the group.") %
{'volume_id': rem_vol,
'group_id': group.id})
raise exception.InvalidVolume(reason=msg)
return remove_volumes_new
def _validate_add_volumes(self, context, volumes, add_volumes_list, group):
add_volumes_new = ""
for volume in volumes:
if volume['id'] in add_volumes_list:
# Volume already in group. Remove from add_volumes.
add_volumes_list.remove(volume['id'])
for add_vol in add_volumes_list:
try:
add_vol_ref = self.db.volume_get(context, add_vol)
except exception.VolumeNotFound:
msg = (_("Cannot add volume %(volume_id)s to "
"group %(group_id)s because volume cannot be "
"found.") %
{'volume_id': add_vol,
'group_id': group.id})
raise exception.InvalidVolume(reason=msg)
orig_group = add_vol_ref.get('group_id', None)
if orig_group:
# If volume to be added is already in the group to be updated,
# it should have been removed from the add_volumes_list in the
# beginning of this function. If we are here, it means it is
# in a different group.
msg = (_("Cannot add volume %(volume_id)s to group "
"%(group_id)s because it is already in "
"group %(orig_group)s.") %
{'volume_id': add_vol_ref['id'],
'group_id': group.id,
'orig_group': orig_group})
raise exception.InvalidVolume(reason=msg)
if add_vol_ref:
add_vol_type_id = add_vol_ref.get('volume_type_id', None)
if not add_vol_type_id:
msg = (_("Cannot add volume %(volume_id)s to group "
"%(group_id)s because it has no volume "
"type.") %
{'volume_id': add_vol_ref['id'],
'group_id': group.id})
raise exception.InvalidVolume(reason=msg)
vol_type_ids = [v_type.id for v_type in group.volume_types]
if add_vol_type_id not in vol_type_ids:
msg = (_("Cannot add volume %(volume_id)s to group "
"%(group_id)s because volume type "
"%(volume_type)s is not supported by the "
"group.") %
{'volume_id': add_vol_ref['id'],
'group_id': group.id,
'volume_type': add_vol_type_id})
raise exception.InvalidVolume(reason=msg)
if (add_vol_ref['status'] not in
VALID_ADD_VOL_TO_GROUP_STATUS):
msg = (_("Cannot add volume %(volume_id)s to group "
"%(group_id)s because volume is in an "
"invalid state: %(status)s. Valid states are: "
"%(valid)s.") %
{'volume_id': add_vol_ref['id'],
'group_id': group.id,
'status': add_vol_ref['status'],
'valid': VALID_ADD_VOL_TO_GROUP_STATUS})
raise exception.InvalidVolume(reason=msg)
# group.host and add_vol_ref['host'] are in this format:
# 'host@backend#pool'. Extract host (host@backend) before
# doing comparison.
vol_host = vol_utils.extract_host(add_vol_ref['host'])
group_host = vol_utils.extract_host(group.host)
if group_host != vol_host:
raise exception.InvalidVolume(
reason=_("Volume is not local to this node."))
# Volume exists. It will be added to CG.
if add_volumes_new:
add_volumes_new += ","
add_volumes_new += add_vol_ref['id']
else:
msg = (_("Cannot add volume %(volume_id)s to group "
"%(group_id)s because volume does not exist.") %
{'volume_id': add_vol_ref['id'],
'group_id': group.id})
raise exception.InvalidVolume(reason=msg)
return add_volumes_new
def get(self, context, group_id):
group = objects.Group.get_by_id(context, group_id)
check_policy(context, 'get', group)
return group
def get_all(self, context, filters=None, marker=None, limit=None,
offset=None, sort_keys=None, sort_dirs=None):
check_policy(context, 'get_all')
if filters is None:
filters = {}
if filters:
LOG.debug("Searching by: %s", filters)
if (context.is_admin and 'all_tenants' in filters):
del filters['all_tenants']
groups = objects.GroupList.get_all(
context, filters=filters, marker=marker, limit=limit,
offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs)
else:
groups = objects.GroupList.get_all_by_project(
context, context.project_id, filters=filters, marker=marker,
limit=limit, offset=offset, sort_keys=sort_keys,
sort_dirs=sort_dirs)
return groups

View File

@ -36,3 +36,4 @@ def register_all():
__import__('cinder.objects.volume_attachment')
__import__('cinder.objects.volume_type')
__import__('cinder.objects.group_type')
__import__('cinder.objects.group')

View File

@ -112,6 +112,8 @@ OBJ_VERSIONS.add('1.7', {'Cluster': '1.0', 'ClusterList': '1.0',
'ConsistencyGroup': '1.3'})
OBJ_VERSIONS.add('1.8', {'RequestSpec': '1.0', 'VolumeProperties': '1.0'})
OBJ_VERSIONS.add('1.9', {'GroupType': '1.0', 'GroupTypeList': '1.0'})
OBJ_VERSIONS.add('1.10', {'Group': '1.0', 'GroupList': '1.0', 'Volume': '1.5',
'RequestSpec': '1.1', 'VolumeProperties': '1.1'})
class CinderObjectRegistry(base.VersionedObjectRegistry):

View File

@ -62,6 +62,24 @@ class ConsistencyGroupStatusField(BaseEnumField):
AUTO_TYPE = ConsistencyGroupStatus()
class GroupStatus(BaseCinderEnum):
ERROR = 'error'
AVAILABLE = 'available'
CREATING = 'creating'
DELETING = 'deleting'
DELETED = 'deleted'
UPDATING = 'updating'
IN_USE = 'in-use'
ERROR_DELETING = 'error_deleting'
ALL = (ERROR, AVAILABLE, CREATING, DELETING, DELETED,
UPDATING, IN_USE, ERROR_DELETING)
class GroupStatusField(BaseEnumField):
AUTO_TYPE = GroupStatus()
class ReplicationStatus(BaseCinderEnum):
ERROR = 'error'
ENABLED = 'enabled'

168
cinder/objects/group.py Normal file
View File

@ -0,0 +1,168 @@
# Copyright 2016 EMC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder.objects import base
from cinder.objects import fields as c_fields
from oslo_versionedobjects import fields
OPTIONAL_FIELDS = ['volumes', 'volume_types']
@base.CinderObjectRegistry.register
class Group(base.CinderPersistentObject, base.CinderObject,
base.CinderObjectDictCompat):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'id': fields.UUIDField(),
'user_id': fields.StringField(),
'project_id': fields.StringField(),
'cluster_name': fields.StringField(nullable=True),
'host': fields.StringField(nullable=True),
'availability_zone': fields.StringField(nullable=True),
'name': fields.StringField(nullable=True),
'description': fields.StringField(nullable=True),
'group_type_id': fields.StringField(),
'volume_type_ids': fields.ListOfStringsField(nullable=True),
'status': c_fields.GroupStatusField(nullable=True),
'volumes': fields.ObjectField('VolumeList', nullable=True),
'volume_types': fields.ObjectField('VolumeTypeList',
nullable=True),
}
@staticmethod
def _from_db_object(context, group, db_group,
expected_attrs=None):
if expected_attrs is None:
expected_attrs = []
for name, field in group.fields.items():
if name in OPTIONAL_FIELDS:
continue
value = db_group.get(name)
setattr(group, name, value)
if 'volumes' in expected_attrs:
volumes = base.obj_make_list(
context, objects.VolumeList(context),
objects.Volume,
db_group['volumes'])
group.volumes = volumes
if 'volume_types' in expected_attrs:
volume_types = base.obj_make_list(
context, objects.VolumeTypeList(context),
objects.VolumeType,
db_group['volume_types'])
group.volume_types = volume_types
group._context = context
group.obj_reset_changes()
return group
def create(self):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason=_('already_created'))
updates = self.cinder_obj_get_changes()
if 'volume_types' in updates:
raise exception.ObjectActionError(
action='create',
reason=_('volume_types assigned'))
if 'volumes' in updates:
raise exception.ObjectActionError(action='create',
reason=_('volumes assigned'))
db_groups = db.group_create(self._context,
updates)
self._from_db_object(self._context, self, db_groups)
def obj_load_attr(self, attrname):
if attrname not in OPTIONAL_FIELDS:
raise exception.ObjectActionError(
action='obj_load_attr',
reason=_('attribute %s not lazy-loadable') % attrname)
if not self._context:
raise exception.OrphanedObjectError(method='obj_load_attr',
objtype=self.obj_name())
if attrname == 'volume_types':
self.volume_types = objects.VolumeTypeList.get_all_by_group(
self._context, self.id)
if attrname == 'volumes':
self.volumes = objects.VolumeList.get_all_by_generic_group(
self._context, self.id)
self.obj_reset_changes(fields=[attrname])
def save(self):
updates = self.cinder_obj_get_changes()
if updates:
if 'volume_types' in updates:
msg = _('Cannot save volume_types changes in group object '
'update.')
raise exception.ObjectActionError(
action='save', reason=msg)
if 'volumes' in updates:
msg = _('Cannot save volumes changes in group object update.')
raise exception.ObjectActionError(
action='save', reason=msg)
db.group_update(self._context, self.id, updates)
self.obj_reset_changes()
def destroy(self):
with self.obj_as_admin():
db.group_destroy(self._context, self.id)
@base.CinderObjectRegistry.register
class GroupList(base.ObjectListBase, base.CinderObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'objects': fields.ListOfObjectsField('Group')
}
child_version = {
'1.0': '1.0',
}
@classmethod
def get_all(cls, context, filters=None, marker=None, limit=None,
offset=None, sort_keys=None, sort_dirs=None):
groups = db.group_get_all(
context, filters=filters, marker=marker, limit=limit,
offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs)
return base.obj_make_list(context, cls(context),
objects.Group,
groups)
@classmethod
def get_all_by_project(cls, context, project_id, filters=None, marker=None,
limit=None, offset=None, sort_keys=None,
sort_dirs=None):
groups = db.group_get_all_by_project(
context, project_id, filters=filters, marker=marker, limit=limit,
offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs)
return base.obj_make_list(context, cls(context),
objects.Group,
groups)

View File

@ -25,10 +25,12 @@ CONF = cfg.CONF
class RequestSpec(base.CinderObject, base.CinderObjectDictCompat,
base.CinderComparableObject):
# Version 1.0: Initial version
VERSION = '1.0'
# Version 1.1: Added group_id and group_backend
VERSION = '1.1'
fields = {
'consistencygroup_id': fields.UUIDField(nullable=True),
'group_id': fields.UUIDField(nullable=True),
'cgsnapshot_id': fields.UUIDField(nullable=True),
'image_id': fields.UUIDField(nullable=True),
'snapshot_id': fields.UUIDField(nullable=True),
@ -40,6 +42,7 @@ class RequestSpec(base.CinderObject, base.CinderObjectDictCompat,
'volume_properties': fields.ObjectField('VolumeProperties',
nullable=True),
'CG_backend': fields.StringField(nullable=True),
'group_backend': fields.StringField(nullable=True),
}
obj_extra_fields = ['resource_properties']
@ -90,7 +93,8 @@ class RequestSpec(base.CinderObject, base.CinderObjectDictCompat,
@base.CinderObjectRegistry.register
class VolumeProperties(base.CinderObject, base.CinderObjectDictCompat):
# Version 1.0: Initial version
VERSION = '1.0'
# Version 1.1: Added group_id and group_type_id
VERSION = '1.1'
# TODO(dulek): We should add this to initially move volume_properites to
# ovo, but this should be removed as soon as possible. Most of the data
@ -105,6 +109,7 @@ class VolumeProperties(base.CinderObject, base.CinderObjectDictCompat):
'availability_zone': fields.StringField(nullable=True),
'cgsnapshot_id': fields.UUIDField(nullable=True),
'consistencygroup_id': fields.UUIDField(nullable=True),
'group_id': fields.UUIDField(nullable=True),
'display_description': fields.StringField(nullable=True),
'display_name': fields.StringField(nullable=True),
'encryption_key_id': fields.UUIDField(nullable=True),
@ -121,4 +126,5 @@ class VolumeProperties(base.CinderObject, base.CinderObjectDictCompat):
'status': fields.StringField(nullable=True),
'user_id': fields.StringField(nullable=True),
'volume_type_id': fields.UUIDField(nullable=True),
'group_type_id': fields.UUIDField(nullable=True),
}

View File

@ -57,11 +57,12 @@ class Volume(base.CinderPersistentObject, base.CinderObject,
# Version 1.2: Added glance_metadata, consistencygroup and snapshots
# Version 1.3: Added finish_volume_migration()
# Version 1.4: Added cluster fields
VERSION = '1.4'
# Version 1.5: Added group
VERSION = '1.5'
OPTIONAL_FIELDS = ('metadata', 'admin_metadata', 'glance_metadata',
'volume_type', 'volume_attachment', 'consistencygroup',
'snapshots', 'cluster')
'snapshots', 'cluster', 'group')
fields = {
'id': fields.UUIDField(),
@ -99,6 +100,7 @@ class Volume(base.CinderPersistentObject, base.CinderObject,
'encryption_key_id': fields.UUIDField(nullable=True),
'consistencygroup_id': fields.UUIDField(nullable=True),
'group_id': fields.UUIDField(nullable=True),
'deleted': fields.BooleanField(default=False, nullable=True),
'bootable': fields.BooleanField(default=False, nullable=True),
@ -119,6 +121,7 @@ class Volume(base.CinderPersistentObject, base.CinderObject,
'consistencygroup': fields.ObjectField('ConsistencyGroup',
nullable=True),
'snapshots': fields.ObjectField('SnapshotList', nullable=True),
'group': fields.ObjectField('Group', nullable=True),
}
# NOTE(thangp): obj_extra_fields is used to hold properties that are not
@ -298,6 +301,12 @@ class Volume(base.CinderPersistentObject, base.CinderObject,
db_cluster)
else:
volume.cluster = None
if 'group' in expected_attrs:
group = objects.Group(context)
group._from_db_object(context,
group,
db_volume['group'])
volume.group = group
volume._context = context
volume.obj_reset_changes()
@ -318,6 +327,9 @@ class Volume(base.CinderPersistentObject, base.CinderObject,
if 'cluster' in updates:
raise exception.ObjectActionError(
action='create', reason=_('cluster assigned'))
if 'group' in updates:
raise exception.ObjectActionError(
action='create', reason=_('group assigned'))
db_volume = db.volume_create(self._context, updates)
self._from_db_object(self._context, self, db_volume)
@ -328,6 +340,9 @@ class Volume(base.CinderPersistentObject, base.CinderObject,
if 'consistencygroup' in updates:
raise exception.ObjectActionError(
action='save', reason=_('consistencygroup changed'))
if 'group' in updates:
raise exception.ObjectActionError(
action='save', reason=_('group changed'))
if 'glance_metadata' in updates:
raise exception.ObjectActionError(
action='save', reason=_('glance_metadata changed'))
@ -410,6 +425,10 @@ class Volume(base.CinderPersistentObject, base.CinderObject,
self._context, name=self.cluster_name)
else:
self.cluster = None
elif attrname == 'group':
group = objects.Group.get_by_id(
self._context, self.group_id)
self.group = group
self.obj_reset_changes(fields=[attrname])
@ -522,11 +541,21 @@ class VolumeList(base.ObjectListBase, base.CinderObject):
@classmethod
def get_all_by_group(cls, context, group_id, filters=None):
# Consistency group
volumes = db.volume_get_all_by_group(context, group_id, filters)
expected_attrs = cls._get_expected_attrs(context)
return base.obj_make_list(context, cls(context), objects.Volume,
volumes, expected_attrs=expected_attrs)
@classmethod
def get_all_by_generic_group(cls, context, group_id, filters=None):
# Generic volume group
volumes = db.volume_get_all_by_generic_group(context, group_id,
filters)
expected_attrs = cls._get_expected_attrs(context)
return base.obj_make_list(context, cls(context), objects.Volume,
volumes, expected_attrs=expected_attrs)
@classmethod
def get_all_by_project(cls, context, project_id, marker, limit,
sort_keys=None, sort_dirs=None, filters=None,

View File

@ -145,3 +145,13 @@ class VolumeTypeList(base.ObjectListBase, base.CinderObject):
types = db.qos_specs_associations_get(context, qos_id)
return base.obj_make_list(context, cls(context), objects.VolumeType,
types)
@classmethod
def get_all_by_group(cls, context, group_id):
# Generic volume group
types = volume_types.get_all_types_by_group(
context.elevated(), group_id)
expected_attrs = VolumeType._get_expected_attrs(context)
return base.obj_make_list(context, cls(context),
objects.VolumeType, types,
expected_attrs=expected_attrs)

View File

@ -45,6 +45,9 @@ quota_opts = [
cfg.IntOpt('quota_consistencygroups',
default=10,
help='Number of consistencygroups allowed per project'),
cfg.IntOpt('quota_groups',
default=10,
help='Number of groups allowed per project'),
cfg.IntOpt('quota_gigabytes',
default=1000,
help='Total amount of storage, in gigabytes, allowed '
@ -1202,5 +1205,30 @@ class CGQuotaEngine(QuotaEngine):
def register_resources(self, resources):
raise NotImplementedError(_("Cannot register resources"))
class GroupQuotaEngine(QuotaEngine):
"""Represent the group quotas."""
@property
def resources(self):
"""Fetches all possible quota resources."""
result = {}
# Global quotas.
argses = [('groups', '_sync_groups',
'quota_groups'), ]
for args in argses:
resource = ReservableResource(*args)
result[resource.name] = resource
return result
def register_resource(self, resource):
raise NotImplementedError(_("Cannot register resource"))
def register_resources(self, resources):
raise NotImplementedError(_("Cannot register resources"))
QUOTAS = VolumeTypeQuotaEngine()
CGQUOTAS = CGQuotaEngine()
GROUP_QUOTAS = GroupQuotaEngine()

View File

@ -66,6 +66,16 @@ def group_update_db(context, group, host):
return group
def generic_group_update_db(context, group, host):
"""Set the host and the scheduled_at field of a group.
:returns: A Group with the updated fields set properly.
"""
group.update({'host': host, 'updated_at': timeutils.utcnow()})
group.save()
return group
class Scheduler(object):
"""The base class that all Scheduler classes should inherit from."""
@ -118,6 +128,15 @@ class Scheduler(object):
raise NotImplementedError(_(
"Must implement schedule_create_consistencygroup"))
def schedule_create_group(self, context, group,
group_spec,
request_spec_list,
group_filter_properties,
filter_properties_list):
"""Must override schedule method for scheduler to work."""
raise NotImplementedError(_(
"Must implement schedule_create_group"))
def get_pools(self, context, filters):
"""Must override schedule method for scheduler to work."""
raise NotImplementedError(_(

View File

@ -81,6 +81,28 @@ class FilterScheduler(driver.Scheduler):
self.volume_rpcapi.create_consistencygroup(context,
updated_group, host)
def schedule_create_group(self, context, group,
group_spec,
request_spec_list,
group_filter_properties,
filter_properties_list):
weighed_host = self._schedule_generic_group(
context,
group_spec,
request_spec_list,
group_filter_properties,
filter_properties_list)
if not weighed_host:
raise exception.NoValidHost(reason=_("No weighed hosts available"))
host = weighed_host.obj.host
updated_group = driver.generic_group_update_db(context, group, host)
self.volume_rpcapi.create_group(context,
updated_group, host)
def schedule_create_volume(self, context, request_spec, filter_properties):
weighed_host = self._schedule(context, request_spec,
filter_properties)
@ -407,18 +429,194 @@ class FilterScheduler(driver.Scheduler):
return weighed_hosts
def _get_weighted_candidates_generic_group(
self, context, group_spec, request_spec_list,
group_filter_properties=None,
filter_properties_list=None):
"""Finds hosts that supports the group.
Returns a list of hosts that meet the required specs,
ordered by their fitness.
"""
elevated = context.elevated()
hosts_by_group_type = self._get_weighted_candidates_by_group_type(
context, group_spec, group_filter_properties)
weighed_hosts = []
hosts_by_vol_type = []
index = 0
for request_spec in request_spec_list:
volume_properties = request_spec['volume_properties']
# Since Cinder is using mixed filters from Oslo and it's own, which
# takes 'resource_XX' and 'volume_XX' as input respectively,
# copying 'volume_XX' to 'resource_XX' will make both filters
# happy.
resource_properties = volume_properties.copy()
volume_type = request_spec.get("volume_type", None)
resource_type = request_spec.get("volume_type", None)
request_spec.update({'resource_properties': resource_properties})
config_options = self._get_configuration_options()
filter_properties = {}
if filter_properties_list:
filter_properties = filter_properties_list[index]
if filter_properties is None:
filter_properties = {}
self._populate_retry(filter_properties, resource_properties)
# Add group_support in extra_specs if it is not there.
# Make sure it is populated in filter_properties
# if 'group_support' not in resource_type.get(
# 'extra_specs', {}):
# resource_type['extra_specs'].update(
# group_support='<is> True')
filter_properties.update({'context': context,
'request_spec': request_spec,
'config_options': config_options,
'volume_type': volume_type,
'resource_type': resource_type})
self.populate_filter_properties(request_spec,
filter_properties)
# Find our local list of acceptable hosts by filtering and
# weighing our options. we virtually consume resources on
# it so subsequent selections can adjust accordingly.
# Note: remember, we are using an iterator here. So only
# traverse this list once.
all_hosts = self.host_manager.get_all_host_states(elevated)
if not all_hosts:
return []
# Filter local hosts based on requirements ...
hosts = self.host_manager.get_filtered_hosts(all_hosts,
filter_properties)
if not hosts:
return []
LOG.debug("Filtered %s", hosts)
# weighted_host = WeightedHost() ... the best
# host for the job.
temp_weighed_hosts = self.host_manager.get_weighed_hosts(
hosts,
filter_properties)
if not temp_weighed_hosts:
return []
if index == 0:
hosts_by_vol_type = temp_weighed_hosts
else:
hosts_by_vol_type = self._find_valid_hosts(
hosts_by_vol_type, temp_weighed_hosts)
if not hosts_by_vol_type:
return []
index += 1
# Find hosts selected by both the group type and volume types.
weighed_hosts = self._find_valid_hosts(hosts_by_vol_type,
hosts_by_group_type)
return weighed_hosts
def _find_valid_hosts(self, host_list1, host_list2):
new_hosts = []
for host1 in host_list1:
for host2 in host_list2:
# Should schedule creation of group on backend level,
# not pool level.
if (utils.extract_host(host1.obj.host) ==
utils.extract_host(host2.obj.host)):
new_hosts.append(host1)
if not new_hosts:
return []
return new_hosts
def _get_weighted_candidates_by_group_type(
self, context, group_spec,
group_filter_properties=None):
"""Finds hosts that supports the group type.
Returns a list of hosts that meet the required specs,
ordered by their fitness.
"""
elevated = context.elevated()
weighed_hosts = []
volume_properties = group_spec['volume_properties']
# Since Cinder is using mixed filters from Oslo and it's own, which
# takes 'resource_XX' and 'volume_XX' as input respectively,
# copying 'volume_XX' to 'resource_XX' will make both filters
# happy.
resource_properties = volume_properties.copy()
group_type = group_spec.get("group_type", None)
resource_type = group_spec.get("group_type", None)
group_spec.update({'resource_properties': resource_properties})
config_options = self._get_configuration_options()
if group_filter_properties is None:
group_filter_properties = {}
self._populate_retry(group_filter_properties, resource_properties)
group_filter_properties.update({'context': context,
'request_spec': group_spec,
'config_options': config_options,
'group_type': group_type,
'resource_type': resource_type})
self.populate_filter_properties(group_spec,
group_filter_properties)
# Find our local list of acceptable hosts by filtering and
# weighing our options. we virtually consume resources on
# it so subsequent selections can adjust accordingly.
# Note: remember, we are using an iterator here. So only
# traverse this list once.
all_hosts = self.host_manager.get_all_host_states(elevated)
if not all_hosts:
return []
# Filter local hosts based on requirements ...
hosts = self.host_manager.get_filtered_hosts(all_hosts,
group_filter_properties)
if not hosts:
return []
LOG.debug("Filtered %s", hosts)
# weighted_host = WeightedHost() ... the best
# host for the job.
weighed_hosts = self.host_manager.get_weighed_hosts(
hosts,
group_filter_properties)
if not weighed_hosts:
return []
return weighed_hosts
def _schedule(self, context, request_spec, filter_properties=None):
weighed_hosts = self._get_weighted_candidates(context, request_spec,
filter_properties)
# When we get the weighed_hosts, we clear those hosts whose backend
# is not same as consistencygroup's backend.
CG_backend = request_spec.get('CG_backend')
if weighed_hosts and CG_backend:
if request_spec.get('CG_backend'):
group_backend = request_spec.get('CG_backend')
else:
group_backend = request_spec.get('group_backend')
if weighed_hosts and group_backend:
# Get host name including host@backend#pool info from
# weighed_hosts.
for host in weighed_hosts[::-1]:
backend = utils.extract_host(host.obj.host)
if backend != CG_backend:
if backend != group_backend:
weighed_hosts.remove(host)
if not weighed_hosts:
LOG.warning(_LW('No weighed hosts found for volume '
@ -437,6 +635,19 @@ class FilterScheduler(driver.Scheduler):
return None
return self._choose_top_host_group(weighed_hosts, request_spec_list)
def _schedule_generic_group(self, context, group_spec, request_spec_list,
group_filter_properties=None,
filter_properties_list=None):
weighed_hosts = self._get_weighted_candidates_generic_group(
context,
group_spec,
request_spec_list,
group_filter_properties,
filter_properties_list)
if not weighed_hosts:
return None
return self._choose_top_host_generic_group(weighed_hosts)
def _choose_top_host(self, weighed_hosts, request_spec):
top_host = weighed_hosts[0]
host_state = top_host.obj
@ -450,3 +661,9 @@ class FilterScheduler(driver.Scheduler):
host_state = top_host.obj
LOG.debug("Choosing %s", host_state.host)
return top_host
def _choose_top_host_generic_group(self, weighed_hosts):
top_host = weighed_hosts[0]
host_state = top_host.obj
LOG.debug("Choosing %s", host_state.host)
return top_host

View File

@ -124,6 +124,35 @@ class SchedulerManager(manager.Manager):
group.status = 'error'
group.save()
def create_group(self, context, topic,
group,
group_spec=None,
group_filter_properties=None,
request_spec_list=None,
filter_properties_list=None):
self._wait_for_scheduler()
try:
self.driver.schedule_create_group(
context, group,
group_spec,
request_spec_list,
group_filter_properties,
filter_properties_list)
except exception.NoValidHost:
LOG.error(_LE("Could not find a host for group "
"%(group_id)s."),
{'group_id': group.id})
group.status = 'error'
group.save()
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Failed to create generic group "
"%(group_id)s."),
{'group_id': group.id})
group.status = 'error'
group.save()
def create_volume(self, context, topic, volume_id, snapshot_id=None,
image_id=None, request_spec=None,
filter_properties=None, volume=None):

View File

@ -54,9 +54,10 @@ class SchedulerAPI(rpc.RPCAPI):
2.0 - Remove 1.x compatibility
2.1 - Adds support for sending objects over RPC in manage_existing()
2.2 - Sends request_spec as object in create_volume()
2.3 - Add create_group method
"""
RPC_API_VERSION = '2.2'
RPC_API_VERSION = '2.3'
TOPIC = CONF.scheduler_topic
BINARY = 'cinder-scheduler'
@ -80,6 +81,27 @@ class SchedulerAPI(rpc.RPCAPI):
request_spec_list=request_spec_p_list,
filter_properties_list=filter_properties_list)
def create_group(self, ctxt, topic, group,
group_spec=None,
request_spec_list=None,
group_filter_properties=None,
filter_properties_list=None):
version = '2.3'
cctxt = self.client.prepare(version=version)
request_spec_p_list = []
for request_spec in request_spec_list:
request_spec_p = jsonutils.to_primitive(request_spec)
request_spec_p_list.append(request_spec_p)
group_spec_p = jsonutils.to_primitive(group_spec)
return cctxt.cast(ctxt, 'create_group',
topic=topic,
group=group,
group_spec=group_spec_p,
request_spec_list=request_spec_p_list,
group_filter_properties=group_filter_properties,
filter_properties_list=filter_properties_list)
def create_volume(self, ctxt, topic, volume_id, snapshot_id=None,
image_id=None, request_spec=None,
filter_properties=None, volume=None):

View File

@ -0,0 +1,806 @@
# Copyright (C) 2016 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for group code.
"""
import ddt
import mock
import webob
from cinder.api.v3 import groups as v3_groups
from cinder import context
from cinder import db
from cinder import exception
import cinder.group
from cinder import objects
from cinder.objects import fields
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import utils
GROUP_MICRO_VERSION = '3.13'
@ddt.ddt
class GroupsAPITestCase(test.TestCase):
"""Test Case for groups API."""
def setUp(self):
super(GroupsAPITestCase, self).setUp()
self.controller = v3_groups.GroupsController()
self.group_api = cinder.group.API()
self.ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID,
auth_token=True,
is_admin=True)
self.user_ctxt = context.RequestContext(
fake.USER_ID, fake.PROJECT_ID, auth_token=True)
self.volume_type1 = self._create_volume_type(id=fake.VOLUME_TYPE_ID)
self.group1 = self._create_group()
self.group2 = self._create_group()
self.group3 = self._create_group(ctxt=self.user_ctxt)
self.addCleanup(self._cleanup)
def _cleanup(self):
self.group1.destroy()
self.group2.destroy()
self.group3.destroy()
db.volume_type_destroy(self.ctxt, self.volume_type1.id)
def _create_group(
self,
ctxt=None,
name='test_group',
description='this is a test group',
group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID],
availability_zone='az1',
host='fakehost',
status=fields.GroupStatus.CREATING,
**kwargs):
"""Create a group object."""
ctxt = ctxt or self.ctxt
group = objects.Group(ctxt)
group.user_id = fake.USER_ID
group.project_id = fake.PROJECT_ID
group.availability_zone = availability_zone
group.name = name
group.description = description
group.group_type_id = group_type_id
group.volume_type_ids = volume_type_ids
group.host = host
group.status = status
group.update(kwargs)
group.create()
return group
def _create_volume_type(
self,
ctxt=None,
id=fake.VOLUME_TYPE_ID,
name='test_volume_type',
description='this is a test volume type',
extra_specs={"test_key": "test_val"},
testcase_instance=None,
**kwargs):
"""Create a volume type."""
ctxt = ctxt or self.ctxt
vol_type = utils.create_volume_type(
ctxt,
testcase_instance=testcase_instance,
id=id,
name=name,
description=description,
extra_specs=extra_specs,
**kwargs)
return vol_type
@mock.patch('cinder.objects.volume_type.VolumeTypeList.get_all_by_group')
@mock.patch('cinder.objects.volume.VolumeList.get_all_by_generic_group')
def test_show_group(self, mock_vol_get_all_by_group,
mock_vol_type_get_all_by_group):
volume_objs = [objects.Volume(context=self.ctxt, id=i)
for i in [fake.VOLUME_ID]]
volumes = objects.VolumeList(context=self.ctxt, objects=volume_objs)
mock_vol_get_all_by_group.return_value = volumes
vol_type_objs = [objects.VolumeType(context=self.ctxt, id=i)
for i in [fake.VOLUME_TYPE_ID]]
vol_types = objects.VolumeTypeList(context=self.ctxt,
objects=vol_type_objs)
mock_vol_type_get_all_by_group.return_value = vol_types
req = fakes.HTTPRequest.blank('/v3/%s/groups/%s' %
(fake.PROJECT_ID, self.group1.id),
version=GROUP_MICRO_VERSION)
res_dict = self.controller.show(req, self.group1.id)
self.assertEqual(1, len(res_dict))
self.assertEqual('az1',
res_dict['group']['availability_zone'])
self.assertEqual('this is a test group',
res_dict['group']['description'])
self.assertEqual('test_group',
res_dict['group']['name'])
self.assertEqual('creating',
res_dict['group']['status'])
self.assertEqual([fake.VOLUME_TYPE_ID],
res_dict['group']['volume_types'])
def test_show_group_with_group_NotFound(self):
req = fakes.HTTPRequest.blank('/v3/%s/groups/%s' %
(fake.PROJECT_ID,
fake.WILL_NOT_BE_FOUND_ID),
version=GROUP_MICRO_VERSION)
self.assertRaises(exception.GroupNotFound, self.controller.show,
req, fake.WILL_NOT_BE_FOUND_ID)
def test_list_groups_json(self):
self.group2.group_type_id = fake.GROUP_TYPE2_ID
self.group2.volume_type_ids = [fake.VOLUME_TYPE2_ID]
self.group2.save()
self.group3.group_type_id = fake.GROUP_TYPE3_ID
self.group3.volume_type_ids = [fake.VOLUME_TYPE3_ID]
self.group3.save()
req = fakes.HTTPRequest.blank('/v3/%s/groups' % fake.PROJECT_ID,
version=GROUP_MICRO_VERSION)
res_dict = self.controller.index(req)
self.assertEqual(1, len(res_dict))
self.assertEqual(self.group3.id,
res_dict['groups'][0]['id'])
self.assertEqual('test_group',
res_dict['groups'][0]['name'])
self.assertEqual(self.group2.id,
res_dict['groups'][1]['id'])
self.assertEqual('test_group',
res_dict['groups'][1]['name'])
self.assertEqual(self.group1.id,
res_dict['groups'][2]['id'])
self.assertEqual('test_group',
res_dict['groups'][2]['name'])
@ddt.data(False, True)
def test_list_groups_with_limit(self, is_detail):
url = '/v3/%s/groups?limit=1' % fake.PROJECT_ID
if is_detail:
url = '/v3/%s/groups/detail?limit=1' % fake.PROJECT_ID
req = fakes.HTTPRequest.blank(url, version=GROUP_MICRO_VERSION)
res_dict = self.controller.index(req)
self.assertEqual(2, len(res_dict))
self.assertEqual(1, len(res_dict['groups']))
self.assertEqual(self.group3.id,
res_dict['groups'][0]['id'])
next_link = (
'http://localhost/v3/%s/groups?limit='
'1&marker=%s' %
(fake.PROJECT_ID, res_dict['groups'][0]['id']))
self.assertEqual(next_link,
res_dict['group_links'][0]['href'])
@ddt.data(False, True)
def test_list_groups_with_offset(self, is_detail):
url = '/v3/%s/groups?offset=1' % fake.PROJECT_ID
if is_detail:
url = '/v3/%s/groups/detail?offset=1' % fake.PROJECT_ID
req = fakes.HTTPRequest.blank(url, version=GROUP_MICRO_VERSION)
res_dict = self.controller.index(req)
self.assertEqual(1, len(res_dict))
self.assertEqual(2, len(res_dict['groups']))
self.assertEqual(self.group2.id,
res_dict['groups'][0]['id'])
self.assertEqual(self.group1.id,
res_dict['groups'][1]['id'])
@ddt.data(False, True)
def test_list_groups_with_offset_out_of_range(self, is_detail):
url = ('/v3/%s/groups?offset=234523423455454' %
fake.PROJECT_ID)
if is_detail:
url = ('/v3/%s/groups/detail?offset=234523423455454' %
fake.PROJECT_ID)
req = fakes.HTTPRequest.blank(url, version=GROUP_MICRO_VERSION)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index,
req)
@ddt.data(False, True)
def test_list_groups_with_limit_and_offset(self, is_detail):
url = '/v3/%s/groups?limit=2&offset=1' % fake.PROJECT_ID
if is_detail:
url = ('/v3/%s/groups/detail?limit=2&offset=1' %
fake.PROJECT_ID)
req = fakes.HTTPRequest.blank(url, version=GROUP_MICRO_VERSION)
res_dict = self.controller.index(req)
self.assertEqual(2, len(res_dict))
self.assertEqual(2, len(res_dict['groups']))
self.assertEqual(self.group2.id,
res_dict['groups'][0]['id'])
self.assertEqual(self.group1.id,
res_dict['groups'][1]['id'])
@ddt.data(False, True)
def test_list_groups_with_filter(self, is_detail):
# Create a group with user context
url = ('/v3/%s/groups?'
'all_tenants=True&id=%s') % (fake.PROJECT_ID,
self.group3.id)
if is_detail:
url = ('/v3/%s/groups/detail?'
'all_tenants=True&id=%s') % (fake.PROJECT_ID,
self.group3.id)
req = fakes.HTTPRequest.blank(url, version=GROUP_MICRO_VERSION,
use_admin_context=True)
res_dict = self.controller.index(req)
self.assertEqual(1, len(res_dict))
self.assertEqual(1, len(res_dict['groups']))
self.assertEqual(self.group3.id,
res_dict['groups'][0]['id'])
@ddt.data(False, True)
def test_list_groups_with_sort(self, is_detail):
url = '/v3/%s/groups?sort=id:asc' % fake.PROJECT_ID
if is_detail:
url = ('/v3/%s/groups/detail?sort=id:asc' %
fake.PROJECT_ID)
req = fakes.HTTPRequest.blank(url, version=GROUP_MICRO_VERSION)
expect_result = [self.group1.id, self.group2.id,
self.group3.id]
expect_result.sort()
res_dict = self.controller.index(req)
self.assertEqual(1, len(res_dict))
self.assertEqual(3, len(res_dict['groups']))
self.assertEqual(expect_result[0],
res_dict['groups'][0]['id'])
self.assertEqual(expect_result[1],
res_dict['groups'][1]['id'])
self.assertEqual(expect_result[2],
res_dict['groups'][2]['id'])
@mock.patch('cinder.objects.volume_type.VolumeTypeList.get_all_by_group')
def test_list_groups_detail_json(self, mock_vol_type_get_all_by_group):
volume_type_ids = [fake.VOLUME_TYPE_ID, fake.VOLUME_TYPE2_ID]
vol_type_objs = [objects.VolumeType(context=self.ctxt, id=i)
for i in volume_type_ids]
vol_types = objects.VolumeTypeList(context=self.ctxt,
objects=vol_type_objs)
mock_vol_type_get_all_by_group.return_value = vol_types
self.group1.volume_type_ids = volume_type_ids
self.group1.save()
self.group2.volume_type_ids = volume_type_ids
self.group2.save()
self.group3.volume_type_ids = volume_type_ids
self.group3.save()
req = fakes.HTTPRequest.blank('/v3/%s/groups/detail' %
fake.PROJECT_ID,
version=GROUP_MICRO_VERSION)
res_dict = self.controller.detail(req)
self.assertEqual(1, len(res_dict))
self.assertEqual('az1',
res_dict['groups'][0]['availability_zone'])
self.assertEqual('this is a test group',
res_dict['groups'][0]['description'])
self.assertEqual('test_group',
res_dict['groups'][0]['name'])
self.assertEqual(self.group3.id,
res_dict['groups'][0]['id'])
self.assertEqual('creating',
res_dict['groups'][0]['status'])
self.assertEqual([fake.VOLUME_TYPE_ID, fake.VOLUME_TYPE2_ID],
res_dict['groups'][0]['volume_types'])
self.assertEqual('az1',
res_dict['groups'][1]['availability_zone'])
self.assertEqual('this is a test group',
res_dict['groups'][1]['description'])
self.assertEqual('test_group',
res_dict['groups'][1]['name'])
self.assertEqual(self.group2.id,
res_dict['groups'][1]['id'])
self.assertEqual('creating',
res_dict['groups'][1]['status'])
self.assertEqual([fake.VOLUME_TYPE_ID, fake.VOLUME_TYPE2_ID],
res_dict['groups'][1]['volume_types'])
self.assertEqual('az1',
res_dict['groups'][2]['availability_zone'])
self.assertEqual('this is a test group',
res_dict['groups'][2]['description'])
self.assertEqual('test_group',
res_dict['groups'][2]['name'])
self.assertEqual(self.group1.id,
res_dict['groups'][2]['id'])
self.assertEqual('creating',
res_dict['groups'][2]['status'])
self.assertEqual([fake.VOLUME_TYPE_ID, fake.VOLUME_TYPE2_ID],
res_dict['groups'][2]['volume_types'])
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
def test_create_group_json(self, mock_validate):
# Create volume types and group type
vol_type = 'test'
vol_type_id = db.volume_type_create(
self.ctxt,
{'name': vol_type, 'extra_specs': {}}).get('id')
grp_type = 'grp_type'
grp_type_id = db.group_type_create(
self.ctxt,
{'name': grp_type, 'group_specs': {}}).get('id')
body = {"group": {"name": "group1",
"volume_types": [vol_type_id],
"group_type": grp_type_id,
"description":
"Group 1", }}
req = fakes.HTTPRequest.blank('/v3/%s/groups' % fake.PROJECT_ID,
version=GROUP_MICRO_VERSION)
res_dict = self.controller.create(req, body)
self.assertEqual(1, len(res_dict))
self.assertIn('id', res_dict['group'])
self.assertTrue(mock_validate.called)
group_id = res_dict['group']['id']
objects.Group.get_by_id(self.ctxt, group_id)
def test_create_group_with_no_body(self):
# omit body from the request
req = fakes.HTTPRequest.blank('/v3/%s/groups' % fake.PROJECT_ID,
version=GROUP_MICRO_VERSION)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, None)
def test_delete_group_available(self):
self.group1.status = fields.GroupStatus.AVAILABLE
self.group1.save()
req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' %
(fake.PROJECT_ID, self.group1.id),
version=GROUP_MICRO_VERSION)
body = {"delete": {"delete-volumes": False}}
res_dict = self.controller.delete_group(
req, self.group1.id, body)
group = objects.Group.get_by_id(
self.ctxt, self.group1.id)
self.assertEqual(202, res_dict.status_int)
self.assertEqual('deleting', group.status)
def test_delete_group_available_no_delete_volumes(self):
self.group1.status = fields.GroupStatus.AVAILABLE
self.group1.save()
req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' %
(fake.PROJECT_ID, self.group1.id),
version=GROUP_MICRO_VERSION)
body = {"delete": {"delete-volumes": False}}
res_dict = self.controller.delete_group(
req, self.group1.id, body)
group = objects.Group.get_by_id(
self.ctxt, self.group1.id)
self.assertEqual(202, res_dict.status_int)
self.assertEqual(fields.GroupStatus.DELETING,
group.status)
def test_delete_group_with_group_NotFound(self):
req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' %
(fake.PROJECT_ID,
fake.WILL_NOT_BE_FOUND_ID),
version=GROUP_MICRO_VERSION)
body = {"delete": {"delete-volumes": False}}
self.assertRaises(exception.GroupNotFound,
self.controller.delete_group,
req, fake.WILL_NOT_BE_FOUND_ID, body)
def test_delete_group_with_invalid_group(self):
req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' %
(fake.PROJECT_ID,
self.group1.id),
version=GROUP_MICRO_VERSION)
body = {"delete": {"delete-volumes": False}}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.delete_group,
req, self.group1.id, body)
def test_delete_group_invalid_delete_volumes(self):
req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' %
(fake.PROJECT_ID,
self.group1.id),
version=GROUP_MICRO_VERSION)
body = {"delete": {"delete-volumes": True}}
res_dict = self.controller.delete_group(
req, self.group1.id, body)
group = objects.Group.get_by_id(
self.ctxt, self.group1.id)
self.assertEqual(202, res_dict.status_int)
self.assertEqual('deleting', group.status)
def test_delete_group_no_host(self):
self.group1.host = None
self.group1.status = fields.GroupStatus.ERROR
self.group1.save()
req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' %
(fake.PROJECT_ID,
self.group1.id),
version=GROUP_MICRO_VERSION)
body = {"delete": {"delete-volumes": True}}
res_dict = self.controller.delete_group(
req, self.group1.id, body)
self.assertEqual(202, res_dict.status_int)
group = objects.Group.get_by_id(
context.get_admin_context(read_deleted='yes'),
self.group1.id)
self.assertEqual(fields.GroupStatus.DELETED, group.status)
self.assertIsNone(group.host)
def test_create_delete_group_update_quota(self):
name = 'mygroup'
description = 'group 1'
grp_type = {'id': fake.GROUP_TYPE_ID, 'name': 'group_type'}
fake_type = {'id': fake.VOLUME_TYPE_ID, 'name': 'fake_type'}
self.mock_object(db, 'volume_types_get_by_name_or_id',
mock.Mock(return_value=[fake_type]))
self.mock_object(db, 'group_type_get',
mock.Mock(return_value=grp_type))
self.mock_object(self.group_api,
'_cast_create_group',
mock.Mock())
self.mock_object(self.group_api, 'update_quota',
mock.Mock())
group = self.group_api.create(self.ctxt, name, description,
grp_type['id'], [fake_type['id']])
self.group_api.update_quota.assert_called_once_with(
self.ctxt, group, 1)
self.assertEqual(fields.GroupStatus.CREATING, group.status)
self.assertIsNone(group.host)
self.group_api.update_quota.reset_mock()
group.status = fields.GroupStatus.ERROR
self.group_api.delete(self.ctxt, group)
self.group_api.update_quota.assert_called_once_with(
self.ctxt, group, -1, self.ctxt.project_id)
group = objects.Group.get_by_id(
context.get_admin_context(read_deleted='yes'),
group.id)
self.assertEqual(fields.GroupStatus.DELETED, group.status)
def test_delete_group_with_invalid_body(self):
self.group1.status = fields.GroupStatus.AVAILABLE
self.group1.save()
req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' %
(fake.PROJECT_ID, self.group1.id),
version=GROUP_MICRO_VERSION)
body = {"invalid_request_element": {"delete-volumes": False}}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.delete_group,
req, self.group1.id, body)
def test_delete_group_with_invalid_delete_volumes_value_in_body(self):
self.group1.status = fields.GroupStatus.AVAILABLE
self.group1.save()
req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' %
(fake.PROJECT_ID, self.group1.id),
version=GROUP_MICRO_VERSION)
body = {"delete": {"delete-volumes": "abcd"}}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.delete_group,
req, self.group1.id, body)
def test_delete_group_with_empty_delete_volumes_value_in_body(self):
self.group1.status = fields.GroupStatus.AVAILABLE
self.group1.save()
req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' %
(fake.PROJECT_ID, self.group1.id),
version=GROUP_MICRO_VERSION)
body = {"delete": {"delete-volumes": ""}}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.delete_group,
req, self.group1.id, body)
def test_delete_group_delete_volumes(self):
self.group1.status = fields.GroupStatus.AVAILABLE
self.group1.save()
vol = utils.create_volume(self.ctxt, group_id=self.group1.id)
req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' %
(fake.PROJECT_ID, self.group1.id),
version=GROUP_MICRO_VERSION)
body = {"delete": {"delete-volumes": True}}
res_dict = self.controller.delete_group(
req, self.group1.id, body)
group = objects.Group.get_by_id(
self.ctxt, self.group1.id)
self.assertEqual(202, res_dict.status_int)
self.assertEqual('deleting', group.status)
vol.destroy()
def test_delete_group_delete_volumes_with_attached_volumes(self):
self.group1.status = fields.GroupStatus.AVAILABLE
self.group1.save()
vol = utils.create_volume(self.ctxt, group_id=self.group1.id,
attach_status='attached')
req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' %
(fake.PROJECT_ID, self.group1.id),
version=GROUP_MICRO_VERSION)
body = {"delete": {"delete-volumes": True}}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.delete_group,
req, self.group1.id, body)
vol.destroy()
def test_delete_group_delete_volumes_with_snapshots(self):
self.group1.status = fields.GroupStatus.AVAILABLE
self.group1.save()
vol = utils.create_volume(self.ctxt, group_id=self.group1.id)
utils.create_snapshot(self.ctxt, vol.id)
req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' %
(fake.PROJECT_ID, self.group1.id),
version=GROUP_MICRO_VERSION)
body = {"delete": {"delete-volumes": True}}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.delete_group,
req, self.group1.id, body)
vol.destroy()
def test_delete_group_delete_volumes_with_deleted_snapshots(self):
self.group1.status = fields.GroupStatus.AVAILABLE
self.group1.save()
vol = utils.create_volume(self.ctxt, group_id=self.group1.id)
utils.create_snapshot(self.ctxt, vol.id, status='deleted',
deleted=True)
req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' %
(fake.PROJECT_ID, self.group1.id),
version=GROUP_MICRO_VERSION)
body = {"delete": {"delete-volumes": True}}
res_dict = self.controller.delete_group(
req, self.group1.id, body)
group = objects.Group.get_by_id(
self.ctxt, self.group1.id)
self.assertEqual(202, res_dict.status_int)
self.assertEqual('deleting', group.status)
vol.destroy()
def test_create_group_failed_no_group_type(self):
name = 'group1'
body = {"group": {"volume_types": [fake.VOLUME_TYPE_ID],
"name": name,
"description":
"Group 1", }}
req = fakes.HTTPRequest.blank('/v3/%s/groups' % fake.PROJECT_ID,
version=GROUP_MICRO_VERSION)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req, body)
def test_create_group_failed_no_volume_types(self):
name = 'group1'
body = {"group": {"group_type": fake.GROUP_TYPE_ID,
"name": name,
"description":
"Group 1", }}
req = fakes.HTTPRequest.blank('/v3/%s/groups' % fake.PROJECT_ID,
version=GROUP_MICRO_VERSION)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req, body)
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
def test_update_group_success(self, mock_validate):
volume_type_id = fake.VOLUME_TYPE_ID
self.group1.status = fields.GroupStatus.AVAILABLE
self.group1.host = 'test_host'
self.group1.volume_type_ids = [volume_type_id]
self.group1.save()
remove_volume = utils.create_volume(
self.ctxt,
volume_type_id=volume_type_id,
group_id=self.group1.id)
remove_volume2 = utils.create_volume(
self.ctxt,
volume_type_id=volume_type_id,
group_id=self.group1.id,
status='error')
remove_volume3 = utils.create_volume(
self.ctxt,
volume_type_id=volume_type_id,
group_id=self.group1.id,
status='error_deleting')
self.assertEqual(fields.GroupStatus.AVAILABLE,
self.group1.status)
group_volumes = db.volume_get_all_by_generic_group(
self.ctxt.elevated(),
self.group1.id)
group_vol_ids = [group_vol['id'] for group_vol in group_volumes]
self.assertIn(remove_volume.id, group_vol_ids)
self.assertIn(remove_volume2.id, group_vol_ids)
self.assertIn(remove_volume3.id, group_vol_ids)
add_volume = utils.create_volume(
self.ctxt,
volume_type_id=volume_type_id)
add_volume2 = utils.create_volume(
self.ctxt,
volume_type_id=volume_type_id)
req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/update' %
(fake.PROJECT_ID, self.group1.id),
version=GROUP_MICRO_VERSION)
name = 'newgroup'
description = 'New Group Description'
add_volumes = add_volume.id + "," + add_volume2.id
remove_volumes = ','.join(
[remove_volume.id, remove_volume2.id, remove_volume3.id])
body = {"group": {"name": name,
"description": description,
"add_volumes": add_volumes,
"remove_volumes": remove_volumes, }}
res_dict = self.controller.update(
req, self.group1.id, body)
group = objects.Group.get_by_id(
self.ctxt, self.group1.id)
self.assertEqual(202, res_dict.status_int)
self.assertTrue(mock_validate.called)
self.assertEqual(fields.GroupStatus.UPDATING,
group.status)
remove_volume.destroy()
remove_volume2.destroy()
remove_volume3.destroy()
add_volume.destroy()
add_volume2.destroy()
def test_update_group_add_volume_not_found(self):
self.group1.status = fields.GroupStatus.AVAILABLE
self.group1.save()
req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/update' %
(fake.PROJECT_ID, self.group1.id),
version=GROUP_MICRO_VERSION)
body = {"group": {"name": None,
"description": None,
"add_volumes": "fake-volume-uuid",
"remove_volumes": None, }}
self.assertRaises(exception.InvalidVolume,
self.controller.update,
req, self.group1.id, body)
def test_update_group_remove_volume_not_found(self):
self.group1.status = fields.GroupStatus.AVAILABLE
self.group1.save()
req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/update' %
(fake.PROJECT_ID, self.group1.id),
version=GROUP_MICRO_VERSION)
body = {"group": {"name": None,
"description": "new description",
"add_volumes": None,
"remove_volumes": "fake-volume-uuid", }}
self.assertRaises(exception.InvalidVolume,
self.controller.update,
req, self.group1.id, body)
def test_update_group_empty_parameters(self):
self.group1.status = fields.GroupStatus.AVAILABLE
self.group1.save()
req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/update' %
(fake.PROJECT_ID, self.group1.id),
version=GROUP_MICRO_VERSION)
body = {"group": {"name": None,
"description": None,
"add_volumes": None,
"remove_volumes": None, }}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update,
req, self.group1.id, body)
def test_update_group_add_volume_invalid_state(self):
self.group1.status = fields.GroupStatus.AVAILABLE
self.group1.save()
add_volume = utils.create_volume(
self.ctxt,
volume_type_id=fake.VOLUME_TYPE_ID,
status='wrong_status')
req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/update' %
(fake.PROJECT_ID, self.group1.id),
version=GROUP_MICRO_VERSION)
add_volumes = add_volume.id
body = {"group": {"name": "group1",
"description": "",
"add_volumes": add_volumes,
"remove_volumes": None, }}
self.assertRaises(exception.InvalidVolume,
self.controller.update,
req, self.group1.id, body)
add_volume.destroy()
def test_update_group_add_volume_invalid_volume_type(self):
self.group1.status = fields.GroupStatus.AVAILABLE
self.group1.save()
wrong_type = fake.VOLUME_TYPE2_ID
add_volume = utils.create_volume(
self.ctxt,
volume_type_id=wrong_type)
req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/update' %
(fake.PROJECT_ID, self.group1.id),
version=GROUP_MICRO_VERSION)
add_volumes = add_volume.id
body = {"group": {"name": "group1",
"description": "",
"add_volumes": add_volumes,
"remove_volumes": None, }}
self.assertRaises(exception.InvalidVolume,
self.controller.update,
req, self.group1.id, body)
add_volume.destroy()
def test_update_group_add_volume_already_in_group(self):
self.group1.status = fields.GroupStatus.AVAILABLE
self.group1.save()
add_volume = utils.create_volume(
self.ctxt,
group_id=fake.GROUP2_ID)
req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/update' %
(fake.PROJECT_ID, self.group1.id),
version=GROUP_MICRO_VERSION)
add_volumes = add_volume.id
body = {"group": {"name": "group1",
"description": "",
"add_volumes": add_volumes,
"remove_volumes": None, }}
self.assertRaises(exception.InvalidVolume,
self.controller.update,
req, self.group1.id, body)
add_volume.destroy()
def test_update_group_invalid_state(self):
req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/update' %
(fake.PROJECT_ID, self.group1.id),
version=GROUP_MICRO_VERSION)
body = {"group": {"name": "new name",
"description": None,
"add_volumes": None,
"remove_volumes": None, }}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update,
req, self.group1.id, body)

View File

@ -11,6 +11,9 @@
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import ddt
import iso8601
import mock
from oslo_config import cfg
@ -21,17 +24,23 @@ from cinder.api.v3 import volumes
from cinder import context
from cinder import db
from cinder import exception
from cinder.group import api as group_api
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit.api.v2 import stubs
from cinder.tests.unit.api.v2 import test_volumes as v2_test_volumes
from cinder.tests.unit import fake_constants as fake
from cinder.volume import api as volume_api
from cinder.volume.api import API as vol_get
version_header_name = 'OpenStack-API-Version'
CONF = cfg.CONF
DEFAULT_AZ = "zone1:host1"
@ddt.ddt
class VolumeApiTest(test.TestCase):
def setUp(self):
super(VolumeApiTest, self).setUp()
@ -177,3 +186,178 @@ class VolumeApiTest(test.TestCase):
res_dict = self.controller.summary(req)
expected = {'volume-summary': {'total_size': 1.0, 'total_count': 1}}
self.assertEqual(expected, res_dict)
def _vol_in_request_body(self,
size=stubs.DEFAULT_VOL_SIZE,
name=stubs.DEFAULT_VOL_NAME,
description=stubs.DEFAULT_VOL_DESCRIPTION,
availability_zone=DEFAULT_AZ,
snapshot_id=None,
source_volid=None,
source_replica=None,
consistencygroup_id=None,
volume_type=None,
image_ref=None,
image_id=None,
group_id=None):
vol = {"size": size,
"name": name,
"description": description,
"availability_zone": availability_zone,
"snapshot_id": snapshot_id,
"source_volid": source_volid,
"source_replica": source_replica,
"consistencygroup_id": consistencygroup_id,
"volume_type": volume_type,
"group_id": group_id,
}
if image_id is not None:
vol['image_id'] = image_id
elif image_ref is not None:
vol['imageRef'] = image_ref
return vol
def _expected_vol_from_controller(
self,
size=stubs.DEFAULT_VOL_SIZE,
availability_zone=DEFAULT_AZ,
description=stubs.DEFAULT_VOL_DESCRIPTION,
name=stubs.DEFAULT_VOL_NAME,
consistencygroup_id=None,
source_volid=None,
snapshot_id=None,
metadata=None,
attachments=None,
volume_type=stubs.DEFAULT_VOL_TYPE,
status=stubs.DEFAULT_VOL_STATUS,
with_migration_status=False,
group_id=None,
req_version=None):
metadata = metadata or {}
attachments = attachments or []
volume = {'volume':
{'attachments': attachments,
'availability_zone': availability_zone,
'bootable': 'false',
'consistencygroup_id': consistencygroup_id,
'group_id': group_id,
'created_at': datetime.datetime(
1900, 1, 1, 1, 1, 1, tzinfo=iso8601.iso8601.Utc()),
'updated_at': datetime.datetime(
1900, 1, 1, 1, 1, 1, tzinfo=iso8601.iso8601.Utc()),
'description': description,
'id': stubs.DEFAULT_VOL_ID,
'links':
[{'href': 'http://localhost/v3/%s/volumes/%s' % (
fake.PROJECT_ID, fake.VOLUME_ID),
'rel': 'self'},
{'href': 'http://localhost/%s/volumes/%s' % (
fake.PROJECT_ID, fake.VOLUME_ID),
'rel': 'bookmark'}],
'metadata': metadata,
'name': name,
'replication_status': 'disabled',
'multiattach': False,
'size': size,
'snapshot_id': snapshot_id,
'source_volid': source_volid,
'status': status,
'user_id': fake.USER_ID,
'volume_type': volume_type,
'encrypted': False}}
if with_migration_status:
volume['volume']['migration_status'] = None
# Remove group_id if max version is less than 3.13.
if req_version and req_version.matches(None, "3.12"):
volume['volume'].pop('group_id')
return volume
def _expected_volume_api_create_kwargs(self, snapshot=None,
availability_zone=DEFAULT_AZ,
source_volume=None,
test_group=None,
req_version=None):
volume = {
'metadata': None,
'snapshot': snapshot,
'source_volume': source_volume,
'source_replica': None,
'consistencygroup': None,
'availability_zone': availability_zone,
'scheduler_hints': None,
'multiattach': False,
'group': test_group,
}
# Remove group_id if max version is less than 3.13.
if req_version and req_version.matches(None, "3.12"):
volume.pop('group')
return volume
@ddt.data('3.13', '3.12')
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
def test_volume_create(self, max_ver, mock_validate):
self.mock_object(volume_api.API, 'get', stubs.stub_volume_get)
self.mock_object(volume_api.API, "create",
stubs.stub_volume_api_create)
self.mock_object(db.sqlalchemy.api, '_volume_type_get_full',
stubs.stub_volume_type_get)
vol = self._vol_in_request_body()
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v3/volumes')
req.api_version_request = api_version.APIVersionRequest(max_ver)
res_dict = self.controller.create(req, body)
ex = self._expected_vol_from_controller(
req_version=req.api_version_request)
self.assertEqual(ex, res_dict)
self.assertTrue(mock_validate.called)
@ddt.data('3.13', '3.12')
@mock.patch.object(group_api.API, 'get')
@mock.patch.object(db.sqlalchemy.api, '_volume_type_get_full',
autospec=True)
@mock.patch.object(volume_api.API, 'get_snapshot', autospec=True)
@mock.patch.object(volume_api.API, 'create', autospec=True)
def test_volume_creation_from_snapshot(self, max_ver, create, get_snapshot,
volume_type_get, group_get):
create.side_effect = stubs.stub_volume_api_create
get_snapshot.side_effect = stubs.stub_snapshot_get
volume_type_get.side_effect = stubs.stub_volume_type_get
fake_group = {
'id': fake.GROUP_ID,
'group_type_id': fake.GROUP_TYPE_ID,
'name': 'fake_group'
}
group_get.return_value = fake_group
snapshot_id = fake.SNAPSHOT_ID
vol = self._vol_in_request_body(snapshot_id=snapshot_id,
group_id=fake.GROUP_ID)
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v3/volumes')
req.api_version_request = api_version.APIVersionRequest(max_ver)
res_dict = self.controller.create(req, body)
ex = self._expected_vol_from_controller(
snapshot_id=snapshot_id,
req_version=req.api_version_request)
self.assertEqual(ex, res_dict)
context = req.environ['cinder.context']
get_snapshot.assert_called_once_with(self.controller.volume_api,
context, snapshot_id)
kwargs = self._expected_volume_api_create_kwargs(
stubs.stub_snapshot(snapshot_id),
test_group=fake_group,
req_version=req.api_version_request)
create.assert_called_once_with(self.controller.volume_api, context,
vol['size'], stubs.DEFAULT_VOL_NAME,
stubs.DEFAULT_VOL_DESCRIPTION, **kwargs)

View File

@ -72,3 +72,7 @@ VOLUME_TYPE4_ID = '69943076-754d-4da8-8718-0b0117e9cab1'
VOLUME_TYPE5_ID = '1c450d81-8aab-459e-b338-a6569139b835'
WILL_NOT_BE_FOUND_ID = 'ce816f65-c5aa-46d6-bd62-5272752d584a'
GROUP_TYPE_ID = '29514915-5208-46ab-9ece-1cc4688ad0c1'
GROUP_TYPE2_ID = 'f8645498-1323-47a2-9442-5c57724d2e3c'
GROUP_TYPE3_ID = '1b7915f4-b899-4510-9eff-bd67508c3334'
GROUP_ID = '9a965cc6-ee3a-468d-a721-cebb193f696f'
GROUP2_ID = '40a85639-abc3-4461-9230-b131abd8ee07'

View File

View File

@ -0,0 +1,176 @@
# Copyright (C) 2016 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for group API.
"""
import ddt
import mock
from cinder import context
import cinder.group
from cinder import objects
from cinder.objects import fields
from cinder import test
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import utils
@ddt.ddt
class GroupAPITestCase(test.TestCase):
"""Test Case for group API."""
def setUp(self):
super(GroupAPITestCase, self).setUp()
self.group_api = cinder.group.API()
self.ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID,
auth_token=True,
is_admin=True)
self.user_ctxt = context.RequestContext(
fake.USER_ID, fake.PROJECT_ID, auth_token=True)
@mock.patch('cinder.objects.Group.get_by_id')
@mock.patch('cinder.group.api.check_policy')
def test_get(self, mock_policy, mock_group_get):
fake_group = 'fake_group'
mock_group_get.return_value = fake_group
grp = self.group_api.get(self.ctxt, fake.GROUP_ID)
self.assertEqual(fake_group, grp)
@ddt.data(True, False)
@mock.patch('cinder.objects.GroupList.get_all')
@mock.patch('cinder.objects.GroupList.get_all_by_project')
@mock.patch('cinder.group.api.check_policy')
def test_get_all(self, is_admin, mock_policy, mock_get_all_by_project,
mock_get_all):
self.group_api.LOG = mock.Mock()
fake_groups = ['fake_group1', 'fake_group2']
fake_groups_by_project = ['fake_group1']
mock_get_all.return_value = fake_groups
mock_get_all_by_project.return_value = fake_groups_by_project
if is_admin:
grps = self.group_api.get_all(self.ctxt,
filters={'all_tenants': True})
self.assertEqual(fake_groups, grps)
else:
grps = self.group_api.get_all(self.user_ctxt)
self.assertEqual(fake_groups_by_project, grps)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.delete_group')
@mock.patch('cinder.db.volume_get_all_by_generic_group')
@mock.patch('cinder.db.volumes_update')
@mock.patch('cinder.group.api.API._cast_create_group')
@mock.patch('cinder.group.api.API.update_quota')
@mock.patch('cinder.objects.Group')
@mock.patch('cinder.db.group_type_get')
@mock.patch('cinder.db.volume_types_get_by_name_or_id')
@mock.patch('cinder.group.api.check_policy')
def test_create_delete(self, mock_policy, mock_volume_types_get,
mock_group_type_get, mock_group,
mock_update_quota, mock_cast_create_group,
mock_volumes_update, mock_volume_get_all,
mock_rpc_delete_group):
mock_volume_types_get.return_value = [{'id': fake.VOLUME_TYPE_ID}]
mock_group_type_get.return_value = {'id': fake.GROUP_TYPE_ID}
name = "test_group"
description = "this is a test group"
grp = utils.create_group(self.ctxt, group_type_id = fake.GROUP_TYPE_ID,
volume_type_ids = [fake.VOLUME_TYPE_ID],
availability_zone = 'nova', host = None,
name = name, description = description,
status = fields.GroupStatus.CREATING)
mock_group.return_value = grp
ret_group = self.group_api.create(self.ctxt, name, description,
fake.GROUP_TYPE_ID,
[fake.VOLUME_TYPE_ID],
availability_zone = 'nova')
self.assertEqual(grp.obj_to_primitive(), ret_group.obj_to_primitive())
ret_group.host = "test_host@fakedrv#fakepool"
ret_group.status = fields.GroupStatus.AVAILABLE
self.group_api.delete(self.ctxt, ret_group, delete_volumes = True)
mock_volume_get_all.assert_called_once_with(mock.ANY, ret_group.id)
mock_volumes_update.assert_called_once_with(self.ctxt, [])
mock_rpc_delete_group.assert_called_once_with(self.ctxt, ret_group)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.update_group')
@mock.patch('cinder.db.volume_get_all_by_generic_group')
@mock.patch('cinder.group.api.API._cast_create_group')
@mock.patch('cinder.group.api.API.update_quota')
@mock.patch('cinder.objects.Group')
@mock.patch('cinder.db.group_type_get')
@mock.patch('cinder.db.volume_types_get_by_name_or_id')
@mock.patch('cinder.group.api.check_policy')
def test_update(self, mock_policy, mock_volume_types_get,
mock_group_type_get, mock_group,
mock_update_quota, mock_cast_create_group,
mock_volume_get_all, mock_rpc_update_group):
vol_type_dict = {'id': fake.VOLUME_TYPE_ID,
'name': 'fake_volume_type'}
vol_type = objects.VolumeType(self.ctxt, **vol_type_dict)
mock_volume_types_get.return_value = [{'id': fake.VOLUME_TYPE_ID}]
mock_group_type_get.return_value = {'id': fake.GROUP_TYPE_ID}
name = "test_group"
description = "this is a test group"
grp = utils.create_group(self.ctxt, group_type_id = fake.GROUP_TYPE_ID,
volume_type_ids = [fake.VOLUME_TYPE_ID],
availability_zone = 'nova', host = None,
name = name, description = description,
status = fields.GroupStatus.CREATING)
mock_group.return_value = grp
ret_group = self.group_api.create(self.ctxt, name, description,
fake.GROUP_TYPE_ID,
[fake.VOLUME_TYPE_ID],
availability_zone = 'nova')
self.assertEqual(grp.obj_to_primitive(), ret_group.obj_to_primitive())
ret_group.volume_types = [vol_type]
ret_group.host = "test_host@fakedrv#fakepool"
ret_group.status = fields.GroupStatus.AVAILABLE
ret_group.id = fake.GROUP_ID
vol1 = utils.create_volume(
self.ctxt, host = ret_group.host,
availability_zone = ret_group.availability_zone,
volume_type_id = fake.VOLUME_TYPE_ID)
vol2 = utils.create_volume(
self.ctxt, host = ret_group.host,
availability_zone = ret_group.availability_zone,
volume_type_id = fake.VOLUME_TYPE_ID,
group_id = fake.GROUP_ID)
vol2_dict = {
'id': vol2.id,
'group_id': fake.GROUP_ID,
'volume_type_id': fake.VOLUME_TYPE_ID,
'availability_zone': ret_group.availability_zone,
'host': ret_group.host,
'status': 'available',
}
mock_volume_get_all.return_value = [vol2_dict]
new_name = "new_group_name"
new_desc = "this is a new group"
self.group_api.update(self.ctxt, ret_group, new_name, new_desc,
vol1.id, vol2.id)
mock_volume_get_all.assert_called_once_with(mock.ANY, ret_group.id)
mock_rpc_update_group.assert_called_once_with(self.ctxt, ret_group,
add_volumes = vol1.id,
remove_volumes = vol2.id)

View File

@ -0,0 +1,207 @@
# Copyright 2016 EMC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import six
from cinder import exception
from cinder import objects
from cinder.objects import fields
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_volume
from cinder.tests.unit import objects as test_objects
fake_group = {
'id': fake.GROUP_ID,
'user_id': fake.USER_ID,
'project_id': fake.PROJECT_ID,
'host': 'fake_host',
'availability_zone': 'fake_az',
'name': 'fake_name',
'description': 'fake_description',
'group_type_id': fake.GROUP_TYPE_ID,
'status': fields.GroupStatus.CREATING,
}
class TestGroup(test_objects.BaseObjectsTestCase):
@mock.patch('cinder.db.sqlalchemy.api.group_get',
return_value=fake_group)
def test_get_by_id(self, group_get):
group = objects.Group.get_by_id(
self.context, fake.GROUP_ID)
self._compare(self, fake_group, group)
group_get.assert_called_once_with(
self.context, fake.GROUP_ID)
@mock.patch('cinder.db.sqlalchemy.api.model_query')
def test_get_by_id_no_existing_id(self, model_query):
model_query().filter_by().first.return_value = None
self.assertRaises(exception.GroupNotFound,
objects.Group.get_by_id, self.context,
123)
@mock.patch('cinder.db.group_create',
return_value=fake_group)
def test_create(self, group_create):
fake_grp = fake_group.copy()
del fake_grp['id']
group = objects.Group(context=self.context,
**fake_grp)
group.create()
self._compare(self, fake_group, group)
def test_create_with_id_except_exception(self, ):
group = objects.Group(
context=self.context, **{'id': fake.GROUP_ID})
self.assertRaises(exception.ObjectActionError, group.create)
@mock.patch('cinder.db.group_update')
def test_save(self, group_update):
group = objects.Group._from_db_object(
self.context, objects.Group(), fake_group)
group.status = fields.GroupStatus.AVAILABLE
group.save()
group_update.assert_called_once_with(
self.context,
group.id,
{'status': fields.GroupStatus.AVAILABLE})
def test_save_with_volumes(self):
group = objects.Group._from_db_object(
self.context, objects.Group(), fake_group)
volumes_objs = [objects.Volume(context=self.context, id=i)
for i in [fake.VOLUME_ID, fake.VOLUME2_ID,
fake.VOLUME3_ID]]
volumes = objects.VolumeList(objects=volumes_objs)
group.name = 'foobar'
group.volumes = volumes
self.assertEqual({'name': 'foobar',
'volumes': volumes},
group.obj_get_changes())
self.assertRaises(exception.ObjectActionError, group.save)
@mock.patch('cinder.objects.volume_type.VolumeTypeList.get_all_by_group')
@mock.patch('cinder.objects.volume.VolumeList.get_all_by_generic_group')
def test_obj_load_attr(self, mock_vol_get_all_by_group,
mock_vol_type_get_all_by_group):
group = objects.Group._from_db_object(
self.context, objects.Group(), fake_group)
# Test volumes lazy-loaded field
volume_objs = [objects.Volume(context=self.context, id=i)
for i in [fake.VOLUME_ID, fake.VOLUME2_ID,
fake.VOLUME3_ID]]
volumes = objects.VolumeList(context=self.context, objects=volume_objs)
mock_vol_get_all_by_group.return_value = volumes
self.assertEqual(volumes, group.volumes)
mock_vol_get_all_by_group.assert_called_once_with(self.context,
group.id)
@mock.patch('cinder.db.group_destroy')
def test_destroy(self, group_destroy):
group = objects.Group(
context=self.context, id=fake.GROUP_ID)
group.destroy()
self.assertTrue(group_destroy.called)
admin_context = group_destroy.call_args[0][0]
self.assertTrue(admin_context.is_admin)
@mock.patch('cinder.db.sqlalchemy.api.group_get')
def test_refresh(self, group_get):
db_group1 = fake_group.copy()
db_group2 = db_group1.copy()
db_group2['description'] = 'foobar'
# On the second group_get, return the Group with
# an updated description
group_get.side_effect = [db_group1, db_group2]
group = objects.Group.get_by_id(self.context,
fake.GROUP_ID)
self._compare(self, db_group1, group)
# description was updated, so a Group refresh should have a
# new value for that field
group.refresh()
self._compare(self, db_group2, group)
if six.PY3:
call_bool = mock.call.__bool__()
else:
call_bool = mock.call.__nonzero__()
group_get.assert_has_calls([
mock.call(
self.context,
fake.GROUP_ID),
call_bool,
mock.call(
self.context,
fake.GROUP_ID)])
def test_from_db_object_with_all_expected_attributes(self):
expected_attrs = ['volumes']
db_volumes = [fake_volume.fake_db_volume(admin_metadata={},
volume_metadata={})]
db_group = fake_group.copy()
db_group['volumes'] = db_volumes
group = objects.Group._from_db_object(
self.context, objects.Group(), db_group, expected_attrs)
self.assertEqual(len(db_volumes), len(group.volumes))
self._compare(self, db_volumes[0], group.volumes[0])
class TestGroupList(test_objects.BaseObjectsTestCase):
@mock.patch('cinder.db.group_get_all',
return_value=[fake_group])
def test_get_all(self, group_get_all):
groups = objects.GroupList.get_all(self.context)
self.assertEqual(1, len(groups))
TestGroup._compare(self, fake_group,
groups[0])
@mock.patch('cinder.db.group_get_all_by_project',
return_value=[fake_group])
def test_get_all_by_project(self, group_get_all_by_project):
groups = objects.GroupList.get_all_by_project(
self.context, self.project_id)
self.assertEqual(1, len(groups))
TestGroup._compare(self, fake_group,
groups[0])
@mock.patch('cinder.db.group_get_all',
return_value=[fake_group])
def test_get_all_with_pagination(self, group_get_all):
groups = objects.GroupList.get_all(
self.context, filters={'id': 'fake'}, marker=None, limit=1,
offset=None, sort_keys='id', sort_dirs='asc')
self.assertEqual(1, len(groups))
group_get_all.assert_called_once_with(
self.context, filters={'id': 'fake'}, marker=None, limit=1,
offset=None, sort_keys='id', sort_dirs='asc')
TestGroup._compare(self, fake_group,
groups[0])
@mock.patch('cinder.db.group_get_all_by_project',
return_value=[fake_group])
def test_get_all_by_project_with_pagination(
self, group_get_all_by_project):
groups = objects.GroupList.get_all_by_project(
self.context, self.project_id, filters={'id': 'fake'}, marker=None,
limit=1, offset=None, sort_keys='id', sort_dirs='asc')
self.assertEqual(1, len(groups))
group_get_all_by_project.assert_called_once_with(
self.context, self.project_id, filters={'id': 'fake'}, marker=None,
limit=1, offset=None, sort_keys='id', sort_dirs='asc')
TestGroup._compare(self, fake_group,
groups[0])

View File

@ -34,20 +34,22 @@ object_data = {
'ConsistencyGroupList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
'QualityOfServiceSpecs': '1.0-0b212e0a86ee99092229874e03207fe8',
'QualityOfServiceSpecsList': '1.0-1b54e51ad0fc1f3a8878f5010e7e16dc',
'RequestSpec': '1.0-42685a616bd27c2a4d75cba93a81ed8c',
'RequestSpec': '1.1-b0bd1a28d191d75648901fa853e8a733',
'Service': '1.4-c7d011989d1718ca0496ccf640b42712',
'ServiceList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
'Snapshot': '1.1-37966f7141646eb29e9ad5298ff2ca8a',
'SnapshotList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e',
'Volume': '1.4-cd0fc67e0ea8c9a28d9dce6b21368e01',
'Volume': '1.5-19919d8086d6a38ab9d3ab88139e70e0',
'VolumeList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
'VolumeAttachment': '1.0-b30dacf62b2030dd83d8a1603f1064ff',
'VolumeAttachmentList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e',
'VolumeProperties': '1.0-42f00cf1f6c657377a3e2a7efbed0bca',
'VolumeProperties': '1.1-cadac86b2bdc11eb79d1dcea988ff9e8',
'VolumeType': '1.2-02ecb0baac87528d041f4ddd95b95579',
'VolumeTypeList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
'GroupType': '1.0-d4a7b272199d0b0d6fc3ceed58539d30',
'GroupTypeList': '1.0-1b54e51ad0fc1f3a8878f5010e7e16dc',
'Group': '1.0-fd0a002ba8c1388fe9d94ec20b346f0c',
'GroupList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e',
}
@ -84,7 +86,12 @@ class TestObjectVersions(test.TestCase):
# db model and object match.
def _check_table_matched(db_model, cls):
for column in db_model.__table__.columns:
if column.name in cls.fields:
# NOTE(xyang): Skip the comparison of the colume name
# group_type_id in table Group because group_type_id
# is in the object Group but it is stored in a different
# table in the database, not in the Group table.
if (column.name in cls.fields and
(column.name != 'group_type_id' and name != 'Group')):
self.assertEqual(
column.nullable,
cls.fields[column.name].nullable,

View File

@ -46,7 +46,8 @@ class TestVolume(test_objects.BaseObjectsTestCase):
@mock.patch('cinder.db.sqlalchemy.api.model_query')
def test_get_by_id_no_existing_id(self, model_query):
pf = model_query().options().options().options().options().options()
pf = (model_query().options().options().options().options().options().
options())
pf.filter_by().first.return_value = None
self.assertRaises(exception.VolumeNotFound,
objects.Volume.get_by_id, self.context, 123)

View File

@ -118,6 +118,12 @@
"group:access_group_types_specs": "rule:admin_api",
"group:group_type_access": "rule:admin_or_owner",
"group:create" : "",
"group:delete": "",
"group:update": "",
"group:get": "",
"group:get_all": "",
"scheduler_extension:scheduler_stats:get_pools" : "rule:admin_api",
"message:delete": "rule:admin_or_owner",

View File

@ -34,6 +34,58 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
driver_cls = filter_scheduler.FilterScheduler
def test_create_group_no_hosts(self):
# Ensure empty hosts result in NoValidHosts exception.
sched = fakes.FakeFilterScheduler()
fake_context = context.RequestContext('user', 'project')
request_spec = {'volume_properties': {'project_id': 1,
'size': 0},
'volume_type': {'name': 'Type1',
'extra_specs': {}}}
request_spec2 = {'volume_properties': {'project_id': 1,
'size': 0},
'volume_type': {'name': 'Type2',
'extra_specs': {}}}
request_spec_list = [request_spec, request_spec2]
group_spec = {'group_type': {'name': 'GrpType'},
'volume_properties': {'project_id': 1,
'size': 0}}
self.assertRaises(exception.NoValidHost,
sched.schedule_create_group,
fake_context, 'faki-id1', group_spec,
request_spec_list, {}, [])
@mock.patch('cinder.db.service_get_all')
def test_schedule_group(self, _mock_service_get_all):
# Make sure _schedule_group() can find host successfully.
sched = fakes.FakeFilterScheduler()
sched.host_manager = fakes.FakeHostManager()
fake_context = context.RequestContext('user', 'project',
is_admin=True)
fakes.mock_host_manager_db_calls(_mock_service_get_all)
specs = {'capabilities:consistencygroup_support': '<is> True'}
request_spec = {'volume_properties': {'project_id': 1,
'size': 0},
'volume_type': {'name': 'Type1',
'extra_specs': specs}}
request_spec2 = {'volume_properties': {'project_id': 1,
'size': 0},
'volume_type': {'name': 'Type2',
'extra_specs': specs}}
request_spec_list = [request_spec, request_spec2]
group_spec = {'group_type': {'name': 'GrpType'},
'volume_properties': {'project_id': 1,
'size': 0}}
weighed_host = sched._schedule_generic_group(fake_context,
group_spec,
request_spec_list,
{}, [])
self.assertIsNotNone(weighed_host.obj)
self.assertTrue(_mock_service_get_all.called)
def test_create_consistencygroup_no_hosts(self):
# Ensure empty hosts result in NoValidHosts exception.
sched = fakes.FakeFilterScheduler()
@ -199,6 +251,37 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.assertIsNotNone(weighed_host.obj)
self.assertTrue(_mock_service_get_all.called)
@mock.patch('cinder.db.service_get_all')
def test_create_volume_clear_host_different_with_group(
self, _mock_service_get_all):
# Ensure we clear those hosts whose backend is not same as
# group's backend.
sched = fakes.FakeFilterScheduler()
sched.host_manager = fakes.FakeHostManager()
fakes.mock_host_manager_db_calls(_mock_service_get_all)
fake_context = context.RequestContext('user', 'project')
request_spec = {'volume_properties': {'project_id': 1,
'size': 1},
'volume_type': {'name': 'LVM_iSCSI'},
'group_backend': 'host@lvmdriver'}
weighed_host = sched._schedule(fake_context, request_spec, {})
self.assertIsNone(weighed_host)
@mock.patch('cinder.db.service_get_all')
def test_create_volume_host_same_as_group(self, _mock_service_get_all):
# Ensure we don't clear the host whose backend is same as
# group's backend.
sched = fakes.FakeFilterScheduler()
sched.host_manager = fakes.FakeHostManager()
fakes.mock_host_manager_db_calls(_mock_service_get_all)
fake_context = context.RequestContext('user', 'project')
request_spec = {'volume_properties': {'project_id': 1,
'size': 1},
'volume_type': {'name': 'LVM_iSCSI'},
'group_backend': 'host1'}
weighed_host = sched._schedule(fake_context, request_spec, {})
self.assertEqual('host1#lvm1', weighed_host.obj.host)
@mock.patch('cinder.db.service_get_all')
def test_create_volume_clear_host_different_with_cg(self,
_mock_service_get_all):

View File

@ -156,3 +156,16 @@ class SchedulerRpcAPITestCase(test.TestCase):
rpc_method='call',
filters=None,
version='2.0')
def test_create_group(self):
self._test_scheduler_api('create_group',
rpc_method='cast',
topic='topic',
group='group',
group_spec='group_spec_p',
request_spec_list=['fake_request_spec_list'],
group_filter_properties=
'fake_group_filter_properties',
filter_properties_list=
['fake_filter_properties_list'],
version='2.3')

View File

@ -941,6 +941,68 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
self.assertIsInstance(type_projects.c.project_id.type,
self.VARCHAR_TYPE)
def _check_078(self, engine, data):
"""Test adding groups tables."""
self.assertTrue(engine.dialect.has_table(engine.connect(),
"groups"))
groups = db_utils.get_table(engine, 'groups')
self.assertIsInstance(groups.c.id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(groups.c.name.type,
self.VARCHAR_TYPE)
self.assertIsInstance(groups.c.description.type,
self.VARCHAR_TYPE)
self.assertIsInstance(groups.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(groups.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(groups.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(groups.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(groups.c.user_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(groups.c.project_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(groups.c.host.type,
self.VARCHAR_TYPE)
self.assertIsInstance(groups.c.availability_zone.type,
self.VARCHAR_TYPE)
self.assertIsInstance(groups.c.group_type_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(groups.c.status.type,
self.VARCHAR_TYPE)
self.assertTrue(engine.dialect.has_table(engine.connect(),
"group_volume_type_mapping"))
mapping = db_utils.get_table(engine, 'group_volume_type_mapping')
self.assertIsInstance(mapping.c.id.type,
self.INTEGER_TYPE)
self.assertIsInstance(mapping.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(mapping.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(mapping.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(mapping.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(mapping.c.volume_type_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(mapping.c.group_id.type,
self.VARCHAR_TYPE)
volumes = db_utils.get_table(engine, 'volumes')
self.assertIsInstance(volumes.c.group_id.type,
self.VARCHAR_TYPE)
quota_classes = db_utils.get_table(engine, 'quota_classes')
rows = quota_classes.count().\
where(quota_classes.c.resource == 'groups').\
execute().scalar()
self.assertEqual(1, rows)
def test_walk_versions(self):
self.walk_versions(False, False)

View File

@ -86,6 +86,12 @@ class VolumeRpcAPITestCase(test.TestCase):
host='fakehost@fakedrv#fakepool',
source_cgid=source_group.id)
generic_group = tests_utils.create_group(
self.context,
availability_zone=CONF.storage_availability_zone,
group_type_id='group_type1',
host='fakehost@fakedrv#fakepool')
group = objects.ConsistencyGroup.get_by_id(self.context, group.id)
group2 = objects.ConsistencyGroup.get_by_id(self.context, group2.id)
cgsnapshot = objects.CGSnapshot.get_by_id(self.context, cgsnapshot.id)
@ -99,6 +105,7 @@ class VolumeRpcAPITestCase(test.TestCase):
self.fake_src_cg = jsonutils.to_primitive(source_group)
self.fake_cgsnap = cgsnapshot
self.fake_backup_obj = fake_backup.fake_backup_obj(self.context)
self.fake_group = generic_group
def test_serialized_volume_has_id(self):
self.assertIn('id', self.fake_volume)
@ -223,6 +230,69 @@ class VolumeRpcAPITestCase(test.TestCase):
else:
self.assertEqual(expected_msg[kwarg], value)
def _test_group_api(self, method, rpc_method, **kwargs):
ctxt = context.RequestContext('fake_user', 'fake_project')
if 'rpcapi_class' in kwargs:
rpcapi_class = kwargs['rpcapi_class']
del kwargs['rpcapi_class']
else:
rpcapi_class = volume_rpcapi.VolumeAPI
rpcapi = rpcapi_class()
expected_retval = 'foo' if method == 'call' else None
target = {
"version": kwargs.pop('version', rpcapi.RPC_API_VERSION)
}
if 'request_spec' in kwargs:
spec = jsonutils.to_primitive(kwargs['request_spec'])
kwargs['request_spec'] = spec
expected_msg = copy.deepcopy(kwargs)
if 'host' in expected_msg:
del expected_msg['host']
if 'host' in kwargs:
host = kwargs['host']
elif 'group' in kwargs:
host = kwargs['group']['host']
target['server'] = utils.extract_host(host)
target['topic'] = '%s.%s' % (CONF.volume_topic, host)
self.fake_args = None
self.fake_kwargs = None
def _fake_prepare_method(*args, **kwds):
for kwd in kwds:
self.assertEqual(kwds[kwd], target[kwd])
return rpcapi.client
def _fake_rpc_method(*args, **kwargs):
self.fake_args = args
self.fake_kwargs = kwargs
if expected_retval:
return expected_retval
self.stubs.Set(rpcapi.client, "prepare", _fake_prepare_method)
self.stubs.Set(rpcapi.client, rpc_method, _fake_rpc_method)
retval = getattr(rpcapi, method)(ctxt, **kwargs)
self.assertEqual(expected_retval, retval)
expected_args = [ctxt, method]
for arg, expected_arg in zip(self.fake_args, expected_args):
self.assertEqual(expected_arg, arg)
for kwarg, value in self.fake_kwargs.items():
if isinstance(value, objects.Group):
expected_group = expected_msg[kwarg].obj_to_primitive()
group = value.obj_to_primitive()
self.assertEqual(expected_group, group)
else:
self.assertEqual(expected_msg[kwarg], value)
def test_create_consistencygroup(self):
self._test_volume_api('create_consistencygroup', rpc_method='cast',
group=self.fake_cg, host='fake_host1',
@ -524,3 +594,17 @@ class VolumeRpcAPITestCase(test.TestCase):
rpc_method='call',
volume=self.fake_volume_obj,
version='2.0')
def test_create_group(self):
self._test_group_api('create_group', rpc_method='cast',
group=self.fake_group, host='fake_host1',
version='2.5')
def test_delete_group(self):
self._test_group_api('delete_group', rpc_method='cast',
group=self.fake_group, version='2.5')
def test_update_group(self):
self._test_group_api('update_group', rpc_method='cast',
group=self.fake_group, add_volumes=['vol1'],
remove_volumes=['vol2'], version='2.5')

View File

@ -47,6 +47,7 @@ def create_volume(ctxt,
replication_extended_status=None,
replication_driver_data=None,
consistencygroup_id=None,
group_id=None,
previous_status=None,
testcase_instance=None,
**kwargs):
@ -65,6 +66,8 @@ def create_volume(ctxt,
vol['availability_zone'] = availability_zone
if consistencygroup_id:
vol['consistencygroup_id'] = consistencygroup_id
if group_id:
vol['group_id'] = group_id
if volume_type_id:
vol['volume_type_id'] = volume_type_id
for key in kwargs:
@ -166,6 +169,38 @@ def create_consistencygroup(ctxt,
return cg
def create_group(ctxt,
host='test_host@fakedrv#fakepool',
name='test_group',
description='this is a test group',
status=fields.GroupStatus.AVAILABLE,
availability_zone='fake_az',
group_type_id=None,
volume_type_ids=None,
**kwargs):
"""Create a group object in the DB."""
grp = objects.Group(ctxt)
grp.host = host
grp.user_id = ctxt.user_id or fake.USER_ID
grp.project_id = ctxt.project_id or fake.PROJECT_ID
grp.status = status
grp.name = name
grp.description = description
grp.availability_zone = availability_zone
if group_type_id:
grp.group_type_id = group_type_id
if volume_type_ids:
grp.volume_type_ids = volume_type_ids
new_id = kwargs.pop('id', None)
grp.update(kwargs)
grp.create()
if new_id and new_id != grp.id:
db.group_update(ctxt, grp.id, {'id': new_id})
grp = objects.Group.get_by_id(ctxt, new_id)
return grp
def create_cgsnapshot(ctxt,
consistencygroup_id,
name='test_cgsnapshot',

View File

@ -70,7 +70,8 @@ class CreateVolumeFlowTestCase(test.TestCase):
'image_id': None,
'source_replicaid': None,
'consistencygroup_id': None,
'cgsnapshot_id': None}
'cgsnapshot_id': None,
'group_id': None, }
# Fake objects assert specs
task = create_volume.VolumeCastTask(
@ -87,7 +88,8 @@ class CreateVolumeFlowTestCase(test.TestCase):
'image_id': 4,
'source_replicaid': 5,
'consistencygroup_id': 5,
'cgsnapshot_id': None}
'cgsnapshot_id': None,
'group_id': None, }
# Fake objects assert specs
task = create_volume.VolumeCastTask(
@ -135,7 +137,8 @@ class CreateVolumeFlowTestCase(test.TestCase):
key_manager=fake_key_manager,
source_replica=None,
consistencygroup=None,
cgsnapshot=None)
cgsnapshot=None,
group=None)
@mock.patch('cinder.volume.volume_types.is_encrypted')
@mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs')
@ -176,7 +179,8 @@ class CreateVolumeFlowTestCase(test.TestCase):
key_manager=fake_key_manager,
source_replica=None,
consistencygroup=None,
cgsnapshot=None)
cgsnapshot=None,
group=None)
expected_result = {'size': 1,
'snapshot_id': None,
'source_volid': None,
@ -187,7 +191,8 @@ class CreateVolumeFlowTestCase(test.TestCase):
'qos_specs': None,
'source_replicaid': None,
'consistencygroup_id': None,
'cgsnapshot_id': None, }
'cgsnapshot_id': None,
'group_id': None, }
self.assertEqual(expected_result, result)
@mock.patch('cinder.volume.volume_types.is_encrypted')
@ -230,7 +235,8 @@ class CreateVolumeFlowTestCase(test.TestCase):
key_manager=fake_key_manager,
source_replica=None,
consistencygroup=None,
cgsnapshot=None)
cgsnapshot=None,
group=None)
@mock.patch('cinder.volume.volume_types.is_encrypted')
@mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs')
@ -273,7 +279,8 @@ class CreateVolumeFlowTestCase(test.TestCase):
key_manager=fake_key_manager,
source_replica=None,
consistencygroup=None,
cgsnapshot=None)
cgsnapshot=None,
group=None)
expected_result = {'size': 1,
'snapshot_id': None,
'source_volid': None,
@ -284,7 +291,8 @@ class CreateVolumeFlowTestCase(test.TestCase):
'qos_specs': None,
'source_replicaid': None,
'consistencygroup_id': None,
'cgsnapshot_id': None, }
'cgsnapshot_id': None,
'group_id': None, }
self.assertEqual(expected_result, result)
@mock.patch('cinder.volume.volume_types.is_encrypted')
@ -327,7 +335,8 @@ class CreateVolumeFlowTestCase(test.TestCase):
key_manager=fake_key_manager,
source_replica=None,
consistencygroup=None,
cgsnapshot=None)
cgsnapshot=None,
group=None)
expected_result = {'size': 1,
'snapshot_id': None,
'source_volid': None,
@ -338,7 +347,8 @@ class CreateVolumeFlowTestCase(test.TestCase):
'qos_specs': {'fake_key': 'fake'},
'source_replicaid': None,
'consistencygroup_id': None,
'cgsnapshot_id': None, }
'cgsnapshot_id': None,
'group_id': None, }
self.assertEqual(expected_result, result)
@mock.patch('cinder.volume.volume_types.is_encrypted')
@ -388,7 +398,8 @@ class CreateVolumeFlowTestCase(test.TestCase):
key_manager=fake_key_manager,
source_replica=None,
consistencygroup=None,
cgsnapshot=None)
cgsnapshot=None,
group=None)
expected_result = {'size': 1,
'snapshot_id': None,
'source_volid': None,
@ -399,7 +410,8 @@ class CreateVolumeFlowTestCase(test.TestCase):
'qos_specs': None,
'source_replicaid': None,
'consistencygroup_id': None,
'cgsnapshot_id': None, }
'cgsnapshot_id': None,
'group_id': None, }
self.assertEqual(expected_result, result)
@mock.patch('cinder.db.volume_type_get_by_name')
@ -450,7 +462,8 @@ class CreateVolumeFlowTestCase(test.TestCase):
key_manager=fake_key_manager,
source_replica=None,
consistencygroup=None,
cgsnapshot=None)
cgsnapshot=None,
group=None)
expected_result = {'size': 1,
'snapshot_id': None,
'source_volid': None,
@ -461,7 +474,8 @@ class CreateVolumeFlowTestCase(test.TestCase):
'qos_specs': None,
'source_replicaid': None,
'consistencygroup_id': None,
'cgsnapshot_id': None, }
'cgsnapshot_id': None,
'group_id': None, }
self.assertEqual(expected_result, result)
@mock.patch('cinder.db.volume_type_get_by_name')
@ -511,7 +525,8 @@ class CreateVolumeFlowTestCase(test.TestCase):
key_manager=fake_key_manager,
source_replica=None,
consistencygroup=None,
cgsnapshot=None)
cgsnapshot=None,
group=None)
expected_result = {'size': 1,
'snapshot_id': None,
'source_volid': None,
@ -522,7 +537,8 @@ class CreateVolumeFlowTestCase(test.TestCase):
'qos_specs': None,
'source_replicaid': None,
'consistencygroup_id': None,
'cgsnapshot_id': None, }
'cgsnapshot_id': None,
'group_id': None, }
self.assertEqual(expected_result, result)
@mock.patch('cinder.db.volume_type_get_by_name')
@ -570,7 +586,8 @@ class CreateVolumeFlowTestCase(test.TestCase):
key_manager=fake_key_manager,
source_replica=None,
consistencygroup=None,
cgsnapshot=None)
cgsnapshot=None,
group=None)
class CreateVolumeFlowManagerTestCase(test.TestCase):

View File

@ -210,7 +210,8 @@ class API(base.Base):
availability_zone=None, source_volume=None,
scheduler_hints=None,
source_replica=None, consistencygroup=None,
cgsnapshot=None, multiattach=False, source_cg=None):
cgsnapshot=None, multiattach=False, source_cg=None,
group=None):
check_policy(context, 'create')
@ -242,6 +243,18 @@ class API(base.Base):
"group).") % volume_type
raise exception.InvalidInput(reason=msg)
if group:
if not volume_type:
msg = _("volume_type must be provided when creating "
"a volume in a group.")
raise exception.InvalidInput(reason=msg)
vol_type_ids = [v_type.id for v_type in group.volume_types]
if volume_type.get('id') not in vol_type_ids:
msg = _("Invalid volume_type provided: %s (requested "
"type must be supported by this "
"group).") % volume_type
raise exception.InvalidInput(reason=msg)
if volume_type and 'extra_specs' not in volume_type:
extra_specs = volume_types.get_volume_type_extra_specs(
volume_type['id'])
@ -302,6 +315,7 @@ class API(base.Base):
'consistencygroup': consistencygroup,
'cgsnapshot': cgsnapshot,
'multiattach': multiattach,
'group': group,
}
try:
sched_rpcapi = (self.scheduler_rpcapi if (not cgsnapshot and
@ -370,7 +384,8 @@ class API(base.Base):
# Build required conditions for conditional update
expected = {'attach_status': db.Not('attached'),
'migration_status': self.AVAILABLE_MIGRATION_STATUS,
'consistencygroup_id': None}
'consistencygroup_id': None,
'group_id': None}
# If not force deleting we have status conditions
if not force:
@ -391,7 +406,7 @@ class API(base.Base):
status = utils.build_or_str(expected.get('status'),
_('status must be %s and'))
msg = _('Volume %s must not be migrating, attached, belong to a '
'consistency group or have snapshots.') % status
'group or have snapshots.') % status
LOG.info(msg)
raise exception.InvalidVolume(reason=msg)
@ -1293,6 +1308,7 @@ class API(base.Base):
'migration_status': self.AVAILABLE_MIGRATION_STATUS,
'replication_status': (None, 'disabled'),
'consistencygroup_id': (None, ''),
'group_id': (None, ''),
'host': db.Not(host)}
filters = [~db.volume_has_snapshots_filter()]
@ -1316,8 +1332,8 @@ class API(base.Base):
if not result:
msg = _('Volume %s status must be available or in-use, must not '
'be migrating, have snapshots, be replicated, be part of '
'a consistency group and destination host must be '
'different than the current host') % {'vol_id': volume.id}
'a group and destination host must be different than the '
'current host') % {'vol_id': volume.id}
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
@ -1450,6 +1466,7 @@ class API(base.Base):
expected = {'status': ('available', 'in-use'),
'migration_status': self.AVAILABLE_MIGRATION_STATUS,
'consistencygroup_id': (None, ''),
'group_id': (None, ''),
'volume_type_id': db.Not(vol_type_id)}
# We don't support changing encryption requirements yet
@ -1465,9 +1482,9 @@ class API(base.Base):
if not volume.conditional_update(updates, expected, filters):
msg = _('Retype needs volume to be in available or in-use state, '
'have same encryption requirements, not be part of an '
'active migration or a consistency group, requested type '
'has to be different that the one from the volume, and '
'for in-use volumes front-end qos specs cannot change.')
'active migration or a group, requested type has to be '
'different that the one from the volume, and for in-use '
'volumes front-end qos specs cannot change.')
LOG.error(msg)
QUOTAS.rollback(context, reservations + old_reservations,
project_id=volume.project_id)

View File

@ -1680,6 +1680,99 @@ class BaseVD(object):
"""Old replication update method, deprecate."""
raise NotImplementedError()
def create_group(self, context, group):
"""Creates a group.
:param context: the context of the caller.
:param group: the dictionary of the group to be created.
:returns: model_update
model_update will be in this format: {'status': xxx, ......}.
If the status in model_update is 'error', the manager will throw
an exception and it will be caught in the try-except block in the
manager. If the driver throws an exception, the manager will also
catch it in the try-except block. The group status in the db will
be changed to 'error'.
For a successful operation, the driver can either build the
model_update and return it or return None. The group status will
be set to 'available'.
"""
raise NotImplementedError()
def delete_group(self, context, group, volumes):
"""Deletes a group.
:param context: the context of the caller.
:param group: the dictionary of the group to be deleted.
:param volumes: a list of volume dictionaries in the group.
:returns: model_update, volumes_model_update
param volumes is retrieved directly from the db. It is a list of
cinder.db.sqlalchemy.models.Volume to be precise. It cannot be
assigned to volumes_model_update. volumes_model_update is a list of
dictionaries. It has to be built by the driver. An entry will be
in this format: {'id': xxx, 'status': xxx, ......}. model_update
will be in this format: {'status': xxx, ......}.
The driver should populate volumes_model_update and model_update
and return them.
The manager will check volumes_model_update and update db accordingly
for each volume. If the driver successfully deleted some volumes
but failed to delete others, it should set statuses of the volumes
accordingly so that the manager can update db correctly.
If the status in any entry of volumes_model_update is 'error_deleting'
or 'error', the status in model_update will be set to the same if it
is not already 'error_deleting' or 'error'.
If the status in model_update is 'error_deleting' or 'error', the
manager will raise an exception and the status of the group will be
set to 'error' in the db. If volumes_model_update is not returned by
the driver, the manager will set the status of every volume in the
group to 'error' in the except block.
If the driver raises an exception during the operation, it will be
caught by the try-except block in the manager. The statuses of the
group and all volumes in it will be set to 'error'.
For a successful operation, the driver can either build the
model_update and volumes_model_update and return them or
return None, None. The statuses of the group and all volumes
will be set to 'deleted' after the manager deletes them from db.
"""
raise NotImplementedError()
def update_group(self, context, group,
add_volumes=None, remove_volumes=None):
"""Updates a group.
:param context: the context of the caller.
:param group: the dictionary of the group to be updated.
:param add_volumes: a list of volume dictionaries to be added.
:param remove_volumes: a list of volume dictionaries to be removed.
:returns: model_update, add_volumes_update, remove_volumes_update
model_update is a dictionary that the driver wants the manager
to update upon a successful return. If None is returned, the manager
will set the status to 'available'.
add_volumes_update and remove_volumes_update are lists of dictionaries
that the driver wants the manager to update upon a successful return.
Note that each entry requires a {'id': xxx} so that the correct
volume entry can be updated. If None is returned, the volume will
remain its original status. Also note that you cannot directly
assign add_volumes to add_volumes_update as add_volumes is a list of
cinder.db.sqlalchemy.models.Volume objects and cannot be used for
db update directly. Same with remove_volumes.
If the driver throws an exception, the status of the group as well as
those of the volumes to be added/removed will be set to 'error'.
"""
raise NotImplementedError()
@six.add_metaclass(abc.ABCMeta)
class LocalVD(object):
@ -2083,8 +2176,8 @@ class ReplicaVD(object):
return
class VolumeDriver(ConsistencyGroupVD, TransferVD, ManageableVD, ExtendVD,
CloneableImageVD, ManageableSnapshotsVD,
class VolumeDriver(ConsistencyGroupVD, TransferVD, ManageableVD,
ExtendVD, CloneableImageVD, ManageableSnapshotsVD,
SnapshotVD, ReplicaVD, LocalVD, MigrateVD, BaseVD):
"""This class will be deprecated soon.

View File

@ -47,6 +47,7 @@ SRC_VOL_PROCEED_STATUS = ('available', 'in-use',)
REPLICA_PROCEED_STATUS = ('active', 'active-stopped',)
CG_PROCEED_STATUS = ('available', 'creating',)
CGSNAPSHOT_PROCEED_STATUS = ('available',)
GROUP_PROCEED_STATUS = ('available', 'creating',)
class ExtractVolumeRequestTask(flow_utils.CinderTask):
@ -67,7 +68,7 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask):
'source_volid', 'volume_type', 'volume_type_id',
'encryption_key_id', 'source_replicaid',
'consistencygroup_id', 'cgsnapshot_id',
'qos_specs'])
'qos_specs', 'group_id'])
def __init__(self, image_service, availability_zones, **kwargs):
super(ExtractVolumeRequestTask, self).__init__(addons=[ACTION],
@ -115,6 +116,11 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask):
exception.InvalidConsistencyGroup,
'consistencygroup')
def _extract_group(self, group):
return self._extract_resource(group, (GROUP_PROCEED_STATUS,),
exception.InvalidGroup,
'group')
def _extract_cgsnapshot(self, cgsnapshot):
return self._extract_resource(cgsnapshot, (CGSNAPSHOT_PROCEED_STATUS,),
exception.InvalidCgSnapshot,
@ -269,7 +275,7 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask):
return volume_type
def _extract_availability_zone(self, availability_zone, snapshot,
source_volume):
source_volume, group):
"""Extracts and returns a validated availability zone.
This function will extract the availability zone (if not provided) from
@ -278,6 +284,14 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask):
the validated availability zone.
"""
# If the volume will be created in a group, it should be placed in
# in same availability zone as the group.
if group:
try:
availability_zone = group['availability_zone']
except (TypeError, KeyError):
pass
# Try to extract the availability zone from the corresponding snapshot
# or source volume if either is valid so that we can be in the same
# availability zone as the source.
@ -389,7 +403,7 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask):
def execute(self, context, size, snapshot, image_id, source_volume,
availability_zone, volume_type, metadata, key_manager,
source_replica, consistencygroup, cgsnapshot):
source_replica, consistencygroup, cgsnapshot, group):
utils.check_exclusive_options(snapshot=snapshot,
imageRef=image_id,
@ -404,12 +418,14 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask):
size = self._extract_size(size, source_volume, snapshot)
consistencygroup_id = self._extract_consistencygroup(consistencygroup)
cgsnapshot_id = self._extract_cgsnapshot(cgsnapshot)
group_id = self._extract_group(group)
self._check_image_metadata(context, image_id, size)
availability_zone = self._extract_availability_zone(availability_zone,
snapshot,
source_volume)
source_volume,
group)
# TODO(joel-coffman): This special handling of snapshots to ensure that
# their volume type matches the source volume is too convoluted. We
@ -467,6 +483,7 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask):
'source_replicaid': source_replicaid,
'consistencygroup_id': consistencygroup_id,
'cgsnapshot_id': cgsnapshot_id,
'group_id': group_id,
}
@ -483,7 +500,8 @@ class EntryCreateTask(flow_utils.CinderTask):
'name', 'reservations', 'size', 'snapshot_id',
'source_volid', 'volume_type_id', 'encryption_key_id',
'source_replicaid', 'consistencygroup_id',
'cgsnapshot_id', 'multiattach', 'qos_specs']
'cgsnapshot_id', 'multiattach', 'qos_specs',
'group_id', ]
super(EntryCreateTask, self).__init__(addons=[ACTION],
requires=requires)
self.db = db
@ -687,7 +705,7 @@ class VolumeCastTask(flow_utils.CinderTask):
requires = ['image_id', 'scheduler_hints', 'snapshot_id',
'source_volid', 'volume_id', 'volume', 'volume_type',
'volume_properties', 'source_replicaid',
'consistencygroup_id', 'cgsnapshot_id', ]
'consistencygroup_id', 'cgsnapshot_id', 'group_id', ]
super(VolumeCastTask, self).__init__(addons=[ACTION],
requires=requires)
self.volume_rpcapi = volume_rpcapi
@ -704,12 +722,21 @@ class VolumeCastTask(flow_utils.CinderTask):
cgroup_id = request_spec['consistencygroup_id']
host = None
cgsnapshot_id = request_spec['cgsnapshot_id']
group_id = request_spec['group_id']
if cgroup_id:
# If cgroup_id existed, we should cast volume to the scheduler
# to choose a proper pool whose backend is same as CG's backend.
cgroup = objects.ConsistencyGroup.get_by_id(context, cgroup_id)
request_spec['CG_backend'] = vol_utils.extract_host(cgroup.host)
elif group_id:
# If group_id exists, we should cast volume to the scheduler
# to choose a proper pool whose backend is same as group's backend.
group = objects.Group.get_by_id(context, group_id)
# FIXME(wanghao): group_backend got added before request_spec was
# converted to versioned objects. We should make sure that this
# will be handled by object version translations once we add
# RequestSpec object.
request_spec['group_backend'] = vol_utils.extract_host(group.host)
elif snapshot_id and CONF.snapshot_same_host:
# NOTE(Rongze Zhu): A simple solution for bug 1008866.
#

View File

@ -83,14 +83,23 @@ LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
CGQUOTAS = quota.CGQUOTAS
GROUP_QUOTAS = quota.GROUP_QUOTAS
VALID_REMOVE_VOL_FROM_CG_STATUS = (
'available',
'in-use',
'error',
'error_deleting')
VALID_REMOVE_VOL_FROM_GROUP_STATUS = (
'available',
'in-use',
'error',
'error_deleting')
VALID_ADD_VOL_TO_CG_STATUS = (
'available',
'in-use')
VALID_ADD_VOL_TO_GROUP_STATUS = (
'available',
'in-use')
VALID_CREATE_CG_SRC_SNAP_STATUS = (fields.SnapshotStatus.AVAILABLE,)
VALID_CREATE_CG_SRC_CG_STATUS = ('available',)
@ -170,7 +179,7 @@ class VolumeManager(manager.SchedulerDependentManager):
_VOLUME_CLONE_SKIP_PROPERTIES = {
'id', '_name_id', 'name_id', 'name', 'status',
'attach_status', 'migration_status', 'volume_type',
'consistencygroup', 'volume_attachment'}
'consistencygroup', 'volume_attachment', 'group'}
def __init__(self, volume_driver=None, service_name=None,
*args, **kwargs):
@ -2028,6 +2037,25 @@ class VolumeManager(manager.SchedulerDependentManager):
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_group_usage(self,
context,
group,
event_suffix,
volumes=None,
extra_usage_info=None):
vol_utils.notify_about_group_usage(
context, group, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not volumes:
volumes = self.db.volume_get_all_by_generic_group(
context, group.id)
if volumes:
for volume in volumes:
vol_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_cgsnapshot_usage(self,
context,
cgsnapshot,
@ -2431,27 +2459,46 @@ class VolumeManager(manager.SchedulerDependentManager):
def create_consistencygroup(self, context, group):
"""Creates the consistency group."""
return self._create_group(context, group, False)
def create_group(self, context, group):
"""Creates the group."""
return self._create_group(context, group)
def _create_group(self, context, group, is_generic_group=True):
context = context.elevated()
status = fields.ConsistencyGroupStatus.AVAILABLE
status = fields.GroupStatus.AVAILABLE
model_update = None
self._notify_about_consistencygroup_usage(
context, group, "create.start")
if is_generic_group:
self._notify_about_group_usage(
context, group, "create.start")
else:
self._notify_about_consistencygroup_usage(
context, group, "create.start")
try:
utils.require_driver_initialized(self.driver)
LOG.info(_LI("Consistency group %s: creating"), group.name)
model_update = self.driver.create_consistencygroup(context,
group)
LOG.info(_LI("Group %s: creating"), group.name)
if is_generic_group:
try:
model_update = self.driver.create_group(context,
group)
except NotImplementedError:
model_update = self._create_group_generic(context,
group)
else:
model_update = self.driver.create_consistencygroup(context,
group)
if model_update:
if (model_update['status'] ==
fields.ConsistencyGroupStatus.ERROR):
msg = (_('Create consistency group failed.'))
fields.GroupStatus.ERROR):
msg = (_('Create group failed.'))
LOG.error(msg,
resource={'type': 'consistency_group',
resource={'type': 'group',
'id': group.id})
raise exception.VolumeDriverException(message=msg)
else:
@ -2459,22 +2506,26 @@ class VolumeManager(manager.SchedulerDependentManager):
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = fields.ConsistencyGroupStatus.ERROR
group.status = fields.GroupStatus.ERROR
group.save()
LOG.error(_LE("Consistency group %s: create failed"),
LOG.error(_LE("Group %s: create failed"),
group.name)
group.status = status
group.created_at = timeutils.utcnow()
group.save()
LOG.info(_LI("Consistency group %s: created successfully"),
LOG.info(_LI("Group %s: created successfully"),
group.name)
self._notify_about_consistencygroup_usage(
context, group, "create.end")
if is_generic_group:
self._notify_about_group_usage(
context, group, "create.end")
else:
self._notify_about_consistencygroup_usage(
context, group, "create.end")
LOG.info(_LI("Create consistency group completed successfully."),
resource={'type': 'consistency_group',
LOG.info(_LI("Create group completed successfully."),
resource={'type': 'group',
'id': group.id})
return group
@ -2846,6 +2897,170 @@ class VolumeManager(manager.SchedulerDependentManager):
resource={'type': 'consistency_group',
'id': group.id})
def delete_group(self, context, group):
"""Deletes group and the volumes in the group."""
context = context.elevated()
project_id = group.project_id
if context.project_id != group.project_id:
project_id = group.project_id
else:
project_id = context.project_id
volumes = objects.VolumeList.get_all_by_generic_group(
context, group.id)
for vol_obj in volumes:
if vol_obj.attach_status == "attached":
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=vol_obj.id)
# self.host is 'host@backend'
# vol_obj.host is 'host@backend#pool'
# Extract host before doing comparison
if vol_obj.host:
new_host = vol_utils.extract_host(vol_obj.host)
msg = (_("Volume %(vol_id)s is not local to this node "
"%(host)s") % {'vol_id': vol_obj.id,
'host': self.host})
if new_host != self.host:
raise exception.InvalidVolume(reason=msg)
self._notify_about_group_usage(
context, group, "delete.start")
volumes_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
try:
model_update, volumes_model_update = (
self.driver.delete_group(context, group, volumes))
except NotImplementedError:
model_update, volumes_model_update = (
self._delete_group_generic(context, group, volumes))
if volumes_model_update:
for update in volumes_model_update:
# If we failed to delete a volume, make sure the
# status for the group is set to error as well
if (update['status'] in ['error_deleting', 'error']
and model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = update['status']
self.db.volumes_update(context, volumes_model_update)
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Delete group failed.'))
LOG.error(msg,
resource={'type': 'group',
'id': group.id})
raise exception.VolumeDriverException(message=msg)
else:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = 'error'
group.save()
# Update volume status to 'error' if driver returns
# None for volumes_model_update.
if not volumes_model_update:
for vol_obj in volumes:
vol_obj.status = 'error'
vol_obj.save()
# Get reservations for group
try:
reserve_opts = {'groups': -1}
grpreservations = GROUP_QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
grpreservations = None
LOG.exception(_LE("Delete group "
"failed to update usages."),
resource={'type': 'group',
'id': group.id})
for vol in volumes:
# Get reservations for volume
try:
reserve_opts = {'volumes': -1,
'gigabytes': -vol.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
vol.volume_type_id)
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Delete group "
"failed to update usages."),
resource={'type': 'group',
'id': group.id})
# Delete glance metadata if it exists
self.db.volume_glance_metadata_delete_by_volume(context, vol.id)
vol.destroy()
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
self.stats['allocated_capacity_gb'] -= vol.size
if grpreservations:
GROUP_QUOTAS.commit(context, grpreservations,
project_id=project_id)
group.destroy()
self._notify_about_group_usage(
context, group, "delete.end")
self.publish_service_capabilities(context)
LOG.info(_LI("Delete group "
"completed successfully."),
resource={'type': 'group',
'id': group.id})
def _create_group_generic(self, context, group):
"""Creates a group."""
# A group entry is already created in db. Just returns a status here.
model_update = {'status': fields.GroupStatus.AVAILABLE,
'created_at': timeutils.utcnow()}
return model_update
def _delete_group_generic(self, context, group, volumes):
"""Deletes a group and volumes in the group."""
model_update = {'status': group.status}
volume_model_updates = []
for volume_ref in volumes:
volume_model_update = {'id': volume_ref.id}
try:
self.driver.remove_export(context, volume_ref)
self.driver.delete_volume(volume_ref)
volume_model_update['status'] = 'deleted'
except exception.VolumeIsBusy:
volume_model_update['status'] = 'available'
except Exception:
volume_model_update['status'] = 'error'
model_update['status'] = fields.GroupStatus.ERROR
volume_model_updates.append(volume_model_update)
return model_update, volume_model_updates
def _update_group_generic(self, context, group,
add_volumes=None, remove_volumes=None):
"""Updates a group."""
# NOTE(xyang): The volume manager adds/removes the volume to/from the
# group in the database. This default implementation does not do
# anything in the backend storage.
return None, None, None
def update_consistencygroup(self, context, group,
add_volumes=None, remove_volumes=None):
"""Updates consistency group.
@ -2990,6 +3205,151 @@ class VolumeManager(manager.SchedulerDependentManager):
resource={'type': 'consistency_group',
'id': group.id})
def update_group(self, context, group,
add_volumes=None, remove_volumes=None):
"""Updates group.
Update group by adding volumes to the group,
or removing volumes from the group.
"""
add_volumes_ref = []
remove_volumes_ref = []
add_volumes_list = []
remove_volumes_list = []
if add_volumes:
add_volumes_list = add_volumes.split(',')
if remove_volumes:
remove_volumes_list = remove_volumes.split(',')
for add_vol in add_volumes_list:
try:
add_vol_ref = objects.Volume.get_by_id(context, add_vol)
except exception.VolumeNotFound:
LOG.error(_LE("Update group "
"failed to add volume-%(volume_id)s: "
"VolumeNotFound."),
{'volume_id': add_vol_ref.id},
resource={'type': 'group',
'id': group.id})
raise
if add_vol_ref.status not in VALID_ADD_VOL_TO_GROUP_STATUS:
msg = (_("Cannot add volume %(volume_id)s to "
"group %(group_id)s because volume is in an invalid "
"state: %(status)s. Valid states are: %(valid)s.") %
{'volume_id': add_vol_ref.id,
'group_id': group.id,
'status': add_vol_ref.status,
'valid': VALID_ADD_VOL_TO_GROUP_STATUS})
raise exception.InvalidVolume(reason=msg)
# self.host is 'host@backend'
# volume_ref['host'] is 'host@backend#pool'
# Extract host before doing comparison
new_host = vol_utils.extract_host(add_vol_ref.host)
if new_host != self.host:
raise exception.InvalidVolume(
reason=_("Volume is not local to this node."))
add_volumes_ref.append(add_vol_ref)
for remove_vol in remove_volumes_list:
try:
remove_vol_ref = objects.Volume.get_by_id(context, remove_vol)
except exception.VolumeNotFound:
LOG.error(_LE("Update group "
"failed to remove volume-%(volume_id)s: "
"VolumeNotFound."),
{'volume_id': remove_vol_ref.id},
resource={'type': 'group',
'id': group.id})
raise
if (remove_vol_ref.status not in
VALID_REMOVE_VOL_FROM_GROUP_STATUS):
msg = (_("Cannot remove volume %(volume_id)s from "
"group %(group_id)s because volume is in an invalid "
"state: %(status)s. Valid states are: %(valid)s.") %
{'volume_id': remove_vol_ref.id,
'group_id': group.id,
'status': remove_vol_ref.status,
'valid': VALID_REMOVE_VOL_FROM_GROUP_STATUS})
raise exception.InvalidVolume(reason=msg)
remove_volumes_ref.append(remove_vol_ref)
self._notify_about_group_usage(
context, group, "update.start")
try:
utils.require_driver_initialized(self.driver)
try:
model_update, add_volumes_update, remove_volumes_update = (
self.driver.update_group(
context, group,
add_volumes=add_volumes_ref,
remove_volumes=remove_volumes_ref))
except NotImplementedError:
model_update, add_volumes_update, remove_volumes_update = (
self._update_group_generic(
context, group,
add_volumes=add_volumes_ref,
remove_volumes=remove_volumes_ref))
if add_volumes_update:
self.db.volumes_update(context, add_volumes_update)
if remove_volumes_update:
self.db.volumes_update(context, remove_volumes_update)
if model_update:
if model_update['status'] in (
[fields.GroupStatus.ERROR]):
msg = (_('Error occurred when updating group '
'%s.') % group.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
group.update(model_update)
group.save()
except exception.VolumeDriverException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred in the volume driver when "
"updating group %(group_id)s."),
{'group_id': group.id})
group.status = 'error'
group.save()
for add_vol in add_volumes_ref:
add_vol.status = 'error'
add_vol.save()
for rem_vol in remove_volumes_ref:
rem_vol.status = 'error'
rem_vol.save()
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred when updating "
"group %(group_id)s."),
{'group_id': group.id})
group.status = 'error'
group.save()
for add_vol in add_volumes_ref:
add_vol.status = 'error'
add_vol.save()
for rem_vol in remove_volumes_ref:
rem_vol.status = 'error'
rem_vol.save()
group.status = 'available'
group.save()
for add_vol in add_volumes_ref:
add_vol.group_id = group.id
add_vol.save()
for rem_vol in remove_volumes_ref:
rem_vol.group_id = None
rem_vol.save()
self._notify_about_group_usage(
context, group, "update.end")
LOG.info(_LI("Update group completed successfully."),
resource={'type': 'group',
'id': group.id})
def create_cgsnapshot(self, context, cgsnapshot):
"""Creates the cgsnapshot."""
caller_context = context

View File

@ -104,9 +104,10 @@ class VolumeAPI(rpc.RPCAPI):
2.3 - Adds support for sending objects over RPC in
initialize_connection().
2.4 - Sends request_spec as object in create_volume().
2.5 - Adds create_group, delete_group, and update_group
"""
RPC_API_VERSION = '2.4'
RPC_API_VERSION = '2.5'
TOPIC = CONF.volume_topic
BINARY = 'cinder-volume'
@ -341,3 +342,21 @@ class VolumeAPI(rpc.RPCAPI):
return cctxt.call(ctxt, 'get_manageable_snapshots', marker=marker,
limit=limit, offset=offset, sort_keys=sort_keys,
sort_dirs=sort_dirs)
def create_group(self, ctxt, group, host):
cctxt = self._get_cctxt(host, '2.5')
cctxt.cast(ctxt, 'create_group',
group=group)
def delete_group(self, ctxt, group):
cctxt = self._get_cctxt(group.host, '2.5')
cctxt.cast(ctxt, 'delete_group',
group=group)
def update_group(self, ctxt, group, add_volumes=None,
remove_volumes=None):
cctxt = self._get_cctxt(group.host, '2.5')
cctxt.cast(ctxt, 'update_group',
group=group,
add_volumes=add_volumes,
remove_volumes=remove_volumes)

View File

@ -243,6 +243,36 @@ def notify_about_consistencygroup_usage(context, group, event_suffix,
usage_info)
def _usage_from_group(group_ref, **kw):
usage_info = dict(tenant_id=group_ref.project_id,
user_id=group_ref.user_id,
availability_zone=group_ref.availability_zone,
group_id=group_ref.id,
name=group_ref.name,
created_at=group_ref.created_at.isoformat(),
status=group_ref.status)
usage_info.update(kw)
return usage_info
def notify_about_group_usage(context, group, event_suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_group(group,
**extra_usage_info)
rpc.get_notifier("group", host).info(
context,
'group.%s' % event_suffix,
usage_info)
def _usage_from_cgsnapshot(cgsnapshot, **kw):
usage_info = dict(
tenant_id=cgsnapshot.project_id,

View File

@ -110,6 +110,12 @@ def get_all_types(context, inactive=0, filters=None, marker=None,
return vol_types
def get_all_types_by_group(context, group_id):
"""Get all volume_types in a group."""
vol_types = db.volume_type_get_all_by_group(context, group_id)
return vol_types
def get_volume_type(ctxt, id, expected_fields=None):
"""Retrieves single volume type by id."""
if id is None:

View File

@ -114,6 +114,12 @@
"group:access_group_types_specs": "rule:admin_api",
"group:group_type_access": "rule:admin_or_owner",
"group:create" : "",
"group:delete": "rule:admin_or_owner",
"group:update": "rule:admin_or_owner",
"group:get": "rule:admin_or_owner",
"group:get_all": "rule:admin_or_owner",
"scheduler_extension:scheduler_stats:get_pools" : "rule:admin_api",
"message:delete": "rule:admin_or_owner",
"message:get": "rule:admin_or_owner",

View File

@ -0,0 +1,4 @@
---
features:
- Introduced generic volume groups and added create/
delete/update/list/show APIs for groups.

View File

@ -59,6 +59,11 @@ ignore_messages = [
# during runtime.
"Class 'ConsistencyGroup' has no '__table__' member",
"Class 'Cgsnapshot' has no '__table__' member",
# NOTE(xyang): this error message is for code [E1120] when checking if
# there are already 'groups' entries in 'quota_classes' `in DB migration
# (078_add_groups_and_group_volume_type_mapping_table).
"No value passed for parameter 'functions' in function call",
]
# Note(maoy): We ignore cinder.tests for now due to high false
@ -99,6 +104,8 @@ objects_ignore_messages = [
"Module 'cinder.objects' has no 'VolumeProperties' member",
"Module 'cinder.objects' has no 'VolumeType' member",
"Module 'cinder.objects' has no 'VolumeTypeList' member",
"Module 'cinder.objects' has no 'Group' member",
"Module 'cinder.objects' has no 'GroupList' member",
]
objects_ignore_modules = ["cinder/objects/"]