Scheduler changes for consistency groups

This patch implements the scheduler changes for
scheduling the creation of consistency groups,
shares within consistency groups, and cgsnapshots.

The consistency group scheduling filter was added
to allow the filtering of hosts that are not supported
by a specified consistency group. If no CG is specified
on share create then this filter is a no-op. CGs will be
scheduled only to a backend that supports all of its
specified share types.

Partially implements bp manila-consistency-groups

Change-Id: Ia03191085cefb47a17ce99ad3f30ba70412f5802
This commit is contained in:
Alex Meade 2015-08-12 12:59:39 -04:00
parent 9e9d904412
commit f3a761f06b
18 changed files with 1622 additions and 23 deletions

View File

@ -51,6 +51,16 @@ def share_update_db(context, share_id, host):
return db.share_update(context, share_id, values)
def cg_update_db(context, cg_id, host):
'''Set the host and set the updated_at field of a consistency group.
:returns: A CG with the updated fields set properly.
'''
now = timeutils.utcnow()
values = {'host': host, 'updated_at': now}
return db.consistency_group_update(context, cg_id, values)
class Scheduler(object):
"""The base class that all Scheduler classes should inherit from."""
@ -89,6 +99,13 @@ class Scheduler(object):
"""Must override schedule method for scheduler to work."""
raise NotImplementedError(_("Must implement schedule_create_share"))
def schedule_create_consistency_group(self, context, group_id,
request_spec,
filter_properties):
"""Must override schedule method for scheduler to work."""
raise NotImplementedError(_(
"Must implement schedule_create_consistency_group"))
def get_pools(self, context, filters):
"""Must override schedule method for scheduler to work."""
raise NotImplementedError(_("Must implement get_pools"))

View File

@ -25,7 +25,7 @@ from oslo_log import log
from manila import exception
from manila.i18n import _
from manila.i18n import _LE
from manila.i18n import _LE, _LI
from manila.scheduler import driver
from manila.scheduler import scheduler_options
from manila.share import share_types
@ -145,6 +145,18 @@ class FilterScheduler(driver.Scheduler):
config_options = self._get_configuration_options()
# NOTE(ameade): If a consistency group is specified, pass the
# consistency group support level to the ConsistencyGroupFilter
# (host, pool, or False)
cg_support = None
cg = request_spec.get('consistency_group')
if cg:
temp_hosts = self.host_manager.get_all_host_states_share(elevated)
cg_host = next((host for host in temp_hosts
if host.host == cg.get('host')), None)
if cg_host:
cg_support = cg_host.consistency_group_support
if filter_properties is None:
filter_properties = {}
self._populate_retry_share(filter_properties, resource_properties)
@ -153,7 +165,9 @@ class FilterScheduler(driver.Scheduler):
'request_spec': request_spec,
'config_options': config_options,
'share_type': share_type,
'resource_type': resource_type
'resource_type': resource_type,
'cg_support': cg_support,
'consistency_group': cg,
})
self.populate_filter_properties_share(request_spec, filter_properties)
@ -254,3 +268,119 @@ class FilterScheduler(driver.Scheduler):
)
filter_properties['user_id'] = shr.get('user_id')
filter_properties['metadata'] = shr.get('metadata')
def schedule_create_consistency_group(self, context, group_id,
request_spec,
filter_properties):
LOG.info(_LI("Scheduling consistency group %s") % group_id)
host = self._get_best_host_for_consistency_group(
context,
request_spec)
if not host:
msg = _("No hosts available for consistency group %s") % group_id
raise exception.NoValidHost(reason=msg)
msg = _LI("Chose host %(host)s for create_consistency_group %(cg_id)s")
LOG.info(msg % {'host': host, 'cg_id': group_id})
updated_group = driver.cg_update_db(context, group_id, host)
self.share_rpcapi.create_consistency_group(context,
updated_group, host)
def _get_weighted_hosts_for_share_type(self, context, request_spec,
share_type):
config_options = self._get_configuration_options()
# NOTE(ameade): Find our local list of acceptable hosts by
# filtering and weighing our options. We virtually consume
# resources on it so subsequent selections can adjust accordingly.
# NOTE(ameade): Remember, we are using an iterator here. So only
# traverse this list once.
all_hosts = self.host_manager.get_all_host_states_share(context)
if not all_hosts:
return []
share_type['extra_specs'] = share_type.get('extra_specs', {})
if share_type['extra_specs']:
for spec_name in share_types.get_undeletable_extra_specs():
extra_spec = share_type['extra_specs'].get(spec_name)
if extra_spec is not None:
share_type['extra_specs'][spec_name] = (
"<is> %s" % extra_spec)
# Only allow pools that support consistency groups
share_type['extra_specs']['consistency_group_support'] = (
"<or> host <or> pool")
filter_properties = {
'context': context,
'request_spec': request_spec,
'config_options': config_options,
'share_type': share_type,
'resource_type': share_type,
'size': 0,
}
# Filter local hosts based on requirements ...
hosts = self.host_manager.get_filtered_hosts(all_hosts,
filter_properties)
if not hosts:
return []
LOG.debug("Filtered %s" % hosts)
# weighted_host = WeightedHost() ... the best host for the job.
weighed_hosts = self.host_manager.get_weighed_hosts(
hosts,
filter_properties)
if not weighed_hosts:
return []
return weighed_hosts
def _get_weighted_candidates_cg(self, context, request_spec):
"""Finds hosts that support the consistency group.
Returns a list of hosts that meet the required specs,
ordered by their fitness.
"""
elevated = context.elevated()
shr_types = request_spec.get("share_types", None)
weighed_hosts = []
for iteration_count, share_type in enumerate(shr_types):
temp_weighed_hosts = self._get_weighted_hosts_for_share_type(
elevated, request_spec, share_type)
# NOTE(ameade): Take the intersection of hosts so we have one that
# can support all share types of the CG
if iteration_count == 0:
weighed_hosts = temp_weighed_hosts
else:
new_weighed_hosts = []
for host1 in weighed_hosts:
for host2 in temp_weighed_hosts:
if host1.obj.host == host2.obj.host:
new_weighed_hosts.append(host1)
weighed_hosts = new_weighed_hosts
if not weighed_hosts:
return []
return weighed_hosts
def _get_best_host_for_consistency_group(self, context, request_spec):
weighed_hosts = self._get_weighted_candidates_cg(
context,
request_spec)
if not weighed_hosts:
return None
return weighed_hosts[0].obj.host

View File

@ -0,0 +1,54 @@
# Copyright (c) 2015 Alex Meade
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log
from manila.openstack.common.scheduler import filters
from manila.share import utils as share_utils
LOG = log.getLogger(__name__)
class ConsistencyGroupFilter(filters.BaseHostFilter):
"""ConsistencyGroupFilter filters host based on compatibility with CG."""
def host_passes(self, host_state, filter_properties):
"""Return True if host will work with desired consistency group."""
cg = filter_properties.get('consistency_group')
cg_support = filter_properties.get('cg_support')
# NOTE(ameade): If creating a share not in a CG, then of course the
# host is valid for the cg.
if not cg:
return True
# NOTE(ameade): If the CG host can only support shares on the same
# pool, then the only valid pool is that one.
if cg_support == 'pool' and cg.get('host') == host_state.host:
return True
# NOTE(ameade): If the CG host can support shares on the same host,
# then any pool on that backend will work.
elif cg_support == 'host':
cg_backend = share_utils.extract_host(cg['host'])
host_backend = share_utils.extract_host(host_state.host)
return cg_backend == host_backend
LOG.debug("Host %(host)s is not compatible with consistency "
"group %(cg)s"
% {"host": host_state.host, "cg": cg['id']})
return False

View File

@ -44,7 +44,8 @@ host_manager_opts = [
default=[
'AvailabilityZoneFilter',
'CapacityFilter',
'CapabilitiesFilter'
'CapabilitiesFilter',
'ConsistencyGroupFilter',
],
help='Which filter class names to use for filtering hosts '
'when not specified in the request.'),
@ -123,6 +124,7 @@ class HostState(object):
self.thin_provisioning = False
self.driver_handles_share_servers = False
self.snapshot_support = True
self.consistency_group_support = False
# PoolState for all pools
self.pools = {}
@ -278,6 +280,10 @@ class HostState(object):
if not pool_cap.get('snapshot_support'):
pool_cap['snapshot_support'] = True
if not pool_cap.get('consistency_group_support'):
pool_cap['consistency_group_support'] = \
self.consistency_group_support
def update_backend(self, capability):
self.share_backend_name = capability.get('share_backend_name')
self.vendor_name = capability.get('vendor_name')
@ -286,6 +292,8 @@ class HostState(object):
self.driver_handles_share_servers = capability.get(
'driver_handles_share_servers')
self.snapshot_support = capability.get('snapshot_support')
self.consistency_group_support = capability.get(
'consistency_group_support', False)
self.updated = capability['timestamp']
def consume_from_share(self, share):

View File

@ -24,10 +24,11 @@ from oslo_log import log
from oslo_utils import excutils
from oslo_utils import importutils
from manila.common import constants
from manila import context
from manila import db
from manila import exception
from manila.i18n import _LE
from manila.i18n import _LE, _LW
from manila import manager
from manila import rpc
from manila.share import rpcapi as share_rpcapi
@ -46,7 +47,7 @@ CONF.register_opt(scheduler_driver_opt)
class SchedulerManager(manager.Manager):
"""Chooses a host to create shares."""
RPC_API_VERSION = '1.2'
RPC_API_VERSION = '1.3'
def __init__(self, scheduler_driver=None, service_name=None,
*args, **kwargs):
@ -119,3 +120,32 @@ class SchedulerManager(manager.Manager):
def request_service_capabilities(self, context):
share_rpcapi.ShareAPI().publish_service_capabilities(context)
def _set_cg_error_state(self, method, context, ex, request_spec):
LOG.warning(_LW("Failed to schedule_%(method)s: %(ex)s"),
{"method": method, "ex": ex})
cg_state = {'status': constants.STATUS_ERROR}
consistency_group_id = request_spec.get('consistency_group_id')
if consistency_group_id:
db.consistency_group_update(context,
consistency_group_id,
cg_state)
# TODO(ameade): add notifications
def create_consistency_group(self, context, cg_id, request_spec=None,
filter_properties=None):
try:
self.driver.schedule_create_consistency_group(context, cg_id,
request_spec,
filter_properties)
except exception.NoValidHost as ex:
self._set_cg_error_state('create_consistency_group',
context, ex, request_spec)
except Exception as ex:
with excutils.save_and_reraise_exception():
self._set_cg_error_state('create_consistency_group',
context, ex, request_spec)

View File

@ -34,15 +34,16 @@ class SchedulerAPI(object):
1.1 - Add get_pools method
1.2 - Introduce Share Instances:
Replace create_share() - > create_share_instance()
1.3 - Add create_consistency_group method
'''
RPC_API_VERSION = '1.2'
RPC_API_VERSION = '1.3'
def __init__(self):
super(SchedulerAPI, self).__init__()
target = messaging.Target(topic=CONF.scheduler_topic,
version=self.RPC_API_VERSION)
self.client = rpc.get_client(target, version_cap='1.2')
self.client = rpc.get_client(target, version_cap='1.3')
def create_share_instance(self, ctxt, request_spec=None,
filter_properties=None):
@ -71,3 +72,15 @@ class SchedulerAPI(object):
cctxt = self.client.prepare(version='1.1')
return cctxt.call(ctxt, 'get_pools',
filters=filters)
def create_consistency_group(self, ctxt, cg_id, request_spec=None,
filter_properties=None):
request_spec_p = jsonutils.to_primitive(request_spec)
cctxt = self.client.prepare(version='1.3')
return cctxt.cast(
ctxt,
'create_consistency_group',
cg_id=cg_id,
request_spec=request_spec_p,
filter_properties=filter_properties,
)

View File

@ -344,7 +344,8 @@ class ShareDriver(object):
self.network_api.deallocate_network(context, share_server_id)
def choose_share_server_compatible_with_share(self, context, share_servers,
share, snapshot=None):
share, snapshot=None,
consistency_group=None):
"""Method that allows driver to choose share server for provided share.
If compatible share-server is not found, method should return None.
@ -353,8 +354,22 @@ class ShareDriver(object):
:param share_servers: list with share-server models
:param share: share model
:param snapshot: snapshot model
:param consistency_group: ConsistencyGroup model with shares
:returns: share-server or None
"""
# If creating in a consistency group, use its share server
if consistency_group:
for share_server in share_servers:
if (consistency_group.get('share_server_id') ==
share_server['id']):
return share_server
return None
return share_servers[0] if share_servers else None
def choose_share_server_compatible_with_cg(self, context, share_servers,
cg_ref, cgsnapshot=None):
return share_servers[0] if share_servers else None
def setup_server(self, *args, **kwargs):
@ -512,6 +527,234 @@ class ShareDriver(object):
"""
return []
def create_consistency_group(self, context, cg_dict, share_server=None):
"""Create a consistency group.
:param context:
:param cg_dict: The consistency group details
EXAMPLE:
{
'status': 'creating',
'project_id': '13c0be6290934bd98596cfa004650049',
'user_id': 'a0314a441ca842019b0952224aa39192',
'description': None,
'deleted': 'False',
'created_at': datetime.datetime(2015, 8, 10, 15, 14, 6),
'updated_at': None,
'source_cgsnapshot_id': 'f6aa3b59-57eb-421e-965c-4e182538e36a',
'host': 'openstack2@cmodeSSVMNFS',
'deleted_at': None,
'share_types': [<models.ConsistencyGroupShareTypeMapping>],
'id': 'eda52174-0442-476d-9694-a58327466c14',
'name': None
}
:returns: (cg_model_update, share_update_list)
cg_model_update - a dict containing any values to be updated
for the CG in the database. This value may be None.
"""
raise NotImplementedError()
def create_consistency_group_from_cgsnapshot(self, context, cg_dict,
cgsnapshot_dict,
share_server=None):
"""Create a consistency group from a cgsnapshot.
:param context:
:param cg_dict: The consistency group details
EXAMPLE:
.. code::
{
'status': 'creating',
'project_id': '13c0be6290934bd98596cfa004650049',
'user_id': 'a0314a441ca842019b0952224aa39192',
'description': None,
'deleted': 'False',
'created_at': datetime.datetime(2015, 8, 10, 15, 14, 6),
'updated_at': None,
'source_cgsnapshot_id': 'f6aa3b59-57eb-421e-965c-4e182538e36a',
'host': 'openstack2@cmodeSSVMNFS',
'deleted_at': None,
'shares': [<models.Share>], # The new shares being created
'share_types': [<models.ConsistencyGroupShareTypeMapping>],
'id': 'eda52174-0442-476d-9694-a58327466c14',
'name': None
}
:param cgsnapshot_dict: The cgsnapshot details
EXAMPLE:
.. code::
{
'status': 'available',
'project_id': '13c0be6290934bd98596cfa004650049',
'user_id': 'a0314a441ca842019b0952224aa39192',
'description': None,
'deleted': '0',
'created_at': datetime.datetime(2015, 8, 10, 0, 5, 58),
'updated_at': datetime.datetime(2015, 8, 10, 0, 5, 58),
'consistency_group_id': '4b04fdc3-00b9-4909-ba1a-06e9b3f88b67',
'cgsnapshot_members': [
{
'status': 'available',
'share_type_id': '1a9ed31e-ee70-483d-93ba-89690e028d7f',
'user_id': 'a0314a441ca842019b0952224aa39192',
'deleted': 'False',
'created_at': datetime.datetime(2015, 8, 10, 0, 5, 58),
'share': <models.Share>,
'updated_at': datetime.datetime(2015, 8, 10, 0, 5, 58),
'share_proto': 'NFS',
'project_id': '13c0be6290934bd98596cfa004650049',
'cgsnapshot_id': 'f6aa3b59-57eb-421e-965c-4e182538e36a',
'deleted_at': None,
'id': '6813e06b-a8f5-4784-b17d-f3e91afa370e',
'size': 1
}
],
'deleted_at': None,
'id': 'f6aa3b59-57eb-421e-965c-4e182538e36a',
'name': None
}
:return: (cg_model_update, share_update_list)
cg_model_update - a dict containing any values to be updated
for the CG in the database. This value may be None.
share_update_list - a list of dictionaries containing dicts for
every share created in the CG. Any share dicts should at a minimum
contain the 'id' key and 'export_locations'. Export locations
should be in the same format as returned by a share_create. This
list may be empty or None.
EXAMPLE:
.. code::
[{'id': 'uuid', 'export_locations': ['export_path']}]
"""
raise NotImplementedError()
def delete_consistency_group(self, context, cg_dict, share_server=None):
"""Delete a consistency group
:param context: The request context
:param cg_dict: The consistency group details
EXAMPLE:
.. code::
{
'status': 'creating',
'project_id': '13c0be6290934bd98596cfa004650049',
'user_id': 'a0314a441ca842019b0952224aa39192',
'description': None,
'deleted': 'False',
'created_at': datetime.datetime(2015, 8, 10, 15, 14, 6),
'updated_at': None,
'source_cgsnapshot_id': 'f6aa3b59-57eb-421e-965c-4e182538e36a',
'host': 'openstack2@cmodeSSVMNFS',
'deleted_at': None,
'shares': [<models.Share>], # The new shares being created
'share_types': [<models.ConsistencyGroupShareTypeMapping>],
'id': 'eda52174-0442-476d-9694-a58327466c14',
'name': None
}
:return: cg_model_update
cg_model_update - a dict containing any values to be updated
for the CG in the database. This value may be None.
"""
raise NotImplementedError()
def create_cgsnapshot(self, context, snap_dict, share_server=None):
"""Create a consistency group snapshot.
:param context:
:param snap_dict: The cgsnapshot details
EXAMPLE:
.. code::
{
'status': 'available',
'project_id': '13c0be6290934bd98596cfa004650049',
'user_id': 'a0314a441ca842019b0952224aa39192',
'description': None,
'deleted': '0',
'created_at': datetime.datetime(2015, 8, 10, 0, 5, 58),
'updated_at': datetime.datetime(2015, 8, 10, 0, 5, 58),
'consistency_group_id': '4b04fdc3-00b9-4909-ba1a-06e9b3f88b67',
'cgsnapshot_members': [
{
'status': 'available',
'share_type_id': '1a9ed31e-ee70-483d-93ba-89690e028d7f',
'user_id': 'a0314a441ca842019b0952224aa39192',
'deleted': 'False',
'created_at': datetime.datetime(2015, 8, 10, 0, 5, 58),
'share': <models.Share>,
'updated_at': datetime.datetime(2015, 8, 10, 0, 5, 58),
'share_proto': 'NFS',
'project_id': '13c0be6290934bd98596cfa004650049',
'cgsnapshot_id': 'f6aa3b59-57eb-421e-965c-4e182538e36a',
'deleted_at': None,
'id': '6813e06b-a8f5-4784-b17d-f3e91afa370e',
'size': 1
}
],
'deleted_at': None,
'id': 'f6aa3b59-57eb-421e-965c-4e182538e36a',
'name': None
}
:return: (cgsnapshot_update, member_update_list)
cgsnapshot_update - a dict containing any values to be updated
for the CGSnapshot in the database. This value may be None.
member_update_list - a list of dictionaries containing for every
member of the cgsnapshot. Each dict should contains values to be
updated for teh CGSnapshotMember in the database. This list may be
empty or None.
"""
raise NotImplementedError()
def delete_cgsnapshot(self, context, snap_dict, share_server=None):
"""Delete a consistency group snapshot
:param context:
:param snap_dict: The cgsnapshot details
EXAMPLE:
.. code::
{
'status': 'available',
'project_id': '13c0be6290934bd98596cfa004650049',
'user_id': 'a0314a441ca842019b0952224aa39192',
'description': None,
'deleted': '0',
'created_at': datetime.datetime(2015, 8, 10, 0, 5, 58),
'updated_at': datetime.datetime(2015, 8, 10, 0, 5, 58),
'consistency_group_id': '4b04fdc3-00b9-4909-ba1a-06e9b3f88b67',
'cgsnapshot_members': [
{
'status': 'available',
'share_type_id': '1a9ed31e-ee70-483d-93ba-89690e028d7f',
'share_id': 'e14b5174-e534-4f35-bc4f-fe81c1575d6f',
'user_id': 'a0314a441ca842019b0952224aa39192',
'deleted': 'False',
'created_at': datetime.datetime(2015, 8, 10, 0, 5, 58),
'share': <models.Share>,
'updated_at': datetime.datetime(2015, 8, 10, 0, 5, 58),
'share_proto': 'NFS',
'project_id': '13c0be6290934bd98596cfa004650049',
'cgsnapshot_id': 'f6aa3b59-57eb-421e-965c-4e182538e36a',
'deleted_at': None,
'id': '6813e06b-a8f5-4784-b17d-f3e91afa370e',
'size': 1
}
],
'deleted_at': None,
'id': 'f6aa3b59-57eb-421e-965c-4e182538e36a',
'name': None
}
:return: (cgsnapshot_update, member_update_list)
cgsnapshot_update - a dict containing any values to be updated
for the CGSnapshot in the database. This value may be None.
"""
raise NotImplementedError()
def get_periodic_hook_data(self, context, share_instances):
"""Dedicated for update/extend of data for existing share instances.

View File

@ -19,6 +19,7 @@
:share_driver: Used by :class:`ShareManager`.
"""
import copy
import datetime
from oslo_config import cfg
@ -128,7 +129,7 @@ def add_hooks(f):
class ShareManager(manager.SchedulerDependentManager):
"""Manages NAS storages."""
RPC_API_VERSION = '1.4'
RPC_API_VERSION = '1.5'
def __init__(self, share_driver=None, service_name=None, *args, **kwargs):
"""Load the driver from args, or from flags."""
@ -264,7 +265,8 @@ class ShareManager(manager.SchedulerDependentManager):
self.publish_service_capabilities(ctxt)
def _provide_share_server_for_share(self, context, share_network_id,
share_instance, snapshot=None):
share_instance, snapshot=None,
consistency_group=None):
"""Gets or creates share_server and updates share with its id.
Active share_server can be deleted if there are no dependent shares
@ -334,7 +336,8 @@ class ShareManager(manager.SchedulerDependentManager):
context, self.host, share_network_id)
)
@utils.synchronized("share_manager_%s" % share_network_id)
@utils.synchronized("share_manager_%s" % share_network_id,
external=True)
def _provide_share_server_for_share():
try:
available_share_servers = get_available_share_servers()
@ -348,12 +351,13 @@ class ShareManager(manager.SchedulerDependentManager):
compatible_share_server = (
self.driver.choose_share_server_compatible_with_share(
context, available_share_servers, share_instance,
snapshot=snapshot.instance if snapshot else None
snapshot=snapshot.instance if snapshot else None,
consistency_group=consistency_group
)
)
except Exception as e:
with excutils.save_and_reraise_exception():
error(_LE("Cannot choose compatible share-server: %s"),
error(_LE("Cannot choose compatible share server: %s"),
e)
if not compatible_share_server:
@ -393,6 +397,103 @@ class ShareManager(manager.SchedulerDependentManager):
return _provide_share_server_for_share()
def _provide_share_server_for_cg(self, context, share_network_id,
cg_ref, cgsnapshot=None):
"""Gets or creates share_server and updates share with its id.
Active share_server can be deleted if there are no dependent shares
on it.
So we need avoid possibility to delete share_server in time gap
between reaching active state for share_server and setting up
share_server_id for share. It is possible, for example, with first
share creation, which starts share_server creation.
For this purpose used shared lock between this method and the one
with deletion of share_server.
:param context: Current context
:param share_network_id: Share network where existing share server
should be found or created. If
share_network_id is None method use
share_network_id from provided snapshot.
:param cg_ref: Consistency Group model
:param cgsnapshot: Optional -- CGSnapshot model
:returns: dict, dict -- first value is share_server, that
has been chosen for consistency group schedule.
Second value is consistency group updated with
share_server_id.
"""
if not (share_network_id or cgsnapshot):
msg = _("'share_network_id' parameter or 'snapshot'"
" should be provided. ")
raise exception.InvalidInput(reason=msg)
def error(msg, *args):
LOG.error(msg, *args)
self.db.consistency_group_update(
context, cg_ref['id'], {'status': constants.STATUS_ERROR})
@utils.synchronized("share_manager_%s" % share_network_id,
external=True)
def _provide_share_server_for_cg():
try:
available_share_servers = (
self.db.share_server_get_all_by_host_and_share_net_valid(
context, self.host, share_network_id))
except exception.ShareServerNotFound:
available_share_servers = None
compatible_share_server = None
if available_share_servers:
try:
compatible_share_server = (
self.driver.choose_share_server_compatible_with_cg(
context, available_share_servers, cg_ref,
cgsnapshot=cgsnapshot
)
)
except Exception as e:
with excutils.save_and_reraise_exception():
error(_LE("Cannot choose compatible share-server: %s"),
e)
if not compatible_share_server:
compatible_share_server = self.db.share_server_create(
context,
{
'host': self.host,
'share_network_id': share_network_id,
'status': constants.STATUS_CREATING
}
)
msg = ("Using share_server %(share_server)s for consistency "
"group %(cg_id)s")
LOG.debug(msg, {
'share_server': compatible_share_server['id'],
'cg_id': cg_ref['id']
})
updated_cg = self.db.consistency_group_update(
context,
cg_ref['id'],
{'share_server_id': compatible_share_server['id']},
)
if compatible_share_server['status'] == constants.STATUS_CREATING:
# Create share server on backend with data from db.
compatible_share_server = self._setup_server(
context, compatible_share_server)
LOG.info(_LI("Share server created successfully."))
else:
LOG.info(_LI("Used preexisting share server "
"'%(share_server_id)s'"),
{'share_server_id': compatible_share_server['id']})
return compatible_share_server, updated_cg
return _provide_share_server_for_cg()
def _get_share_server(self, context, share_instance):
if share_instance['share_server_id']:
return self.db.share_server_get(
@ -438,12 +539,18 @@ class ShareManager(manager.SchedulerDependentManager):
snapshot_ref = None
parent_share_server_id = None
consistency_group_ref = None
if share_instance.get('consistency_group_id'):
consistency_group_ref = self.db.consistency_group_get(
context, share_instance['consistency_group_id'])
if share_network_id or parent_share_server_id:
try:
share_server, share_instance = (
self._provide_share_server_for_share(
context, share_network_id, share_instance,
snapshot=snapshot_ref
snapshot=snapshot_ref,
consistency_group=consistency_group_ref
)
)
except Exception:
@ -1135,3 +1242,254 @@ class ShareManager(manager.SchedulerDependentManager):
share = self.db.share_update(context, share['id'], share_update)
LOG.info(_LI("Shrink share completed successfully."), resource=share)
def create_consistency_group(self, context, cg_id):
context = context.elevated()
group_ref = self.db.consistency_group_get(context, cg_id)
group_ref['host'] = self.host
shares = self.db.share_instances_get_all_by_consistency_group_id(
context, cg_id)
source_cgsnapshot_id = group_ref.get("source_cgsnapshot_id")
snap_ref = None
parent_share_server_id = None
if source_cgsnapshot_id:
snap_ref = self.db.cgsnapshot_get(context, source_cgsnapshot_id)
for member in snap_ref['cgsnapshot_members']:
member['share'] = self.db.share_instance_get(
context, member['share_instance_id'], with_share_data=True)
member['share_id'] = member['share_instance_id']
if 'consistency_group' in snap_ref:
parent_share_server_id = snap_ref['consistency_group'][
'share_server_id']
status = constants.STATUS_AVAILABLE
model_update = False
share_network_id = group_ref.get('share_network_id', None)
share_server = None
if parent_share_server_id:
share_server = self.db.share_server_get(context,
parent_share_server_id)
share_network_id = share_server['share_network_id']
if share_network_id and not self.driver.driver_handles_share_servers:
self.db.consistency_group_update(
context, cg_id, {'status': constants.STATUS_ERROR})
msg = _("Driver does not expect share-network to be provided "
"with current configuration.")
raise exception.InvalidInput(reason=msg)
if not share_server and share_network_id:
try:
share_server, group_ref = self._provide_share_server_for_cg(
context, share_network_id, group_ref, cgsnapshot=snap_ref
)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to get share server"
" for consistency group creation."))
self.db.consistency_group_update(
context, cg_id, {'status': constants.STATUS_ERROR})
try:
# TODO(ameade): Add notification for create.start
LOG.info(_LI("Consistency group %s: creating"), cg_id)
model_update, share_update_list = None, None
group_ref['shares'] = shares
if snap_ref:
model_update, share_update_list = (
self.driver.create_consistency_group_from_cgsnapshot(
context, group_ref, snap_ref,
share_server=share_server))
else:
model_update = self.driver.create_consistency_group(
context, group_ref, share_server=share_server)
if model_update:
group_ref = self.db.consistency_group_update(context,
group_ref['id'],
model_update)
if share_update_list:
for share in share_update_list:
values = copy.deepcopy(share)
values.pop('id')
export_locations = values.pop('export_locations')
self.db.share_instance_update(context, share['id'], values)
self.db.share_export_locations_update(context,
share['id'],
export_locations)
except Exception:
with excutils.save_and_reraise_exception():
self.db.consistency_group_update(
context,
group_ref['id'],
{'status': constants.STATUS_ERROR})
for share in shares:
self.db.share_instance_update(
context, share['id'],
{'status': constants.STATUS_ERROR})
LOG.error(_LE("Consistency group %s: create failed"), cg_id)
now = timeutils.utcnow()
for share in shares:
self.db.share_instance_update(
context, share['id'], {'status': constants.STATUS_AVAILABLE})
self.db.consistency_group_update(context,
group_ref['id'],
{'status': status,
'created_at': now})
LOG.info(_LI("Consistency group %s: created successfully"), cg_id)
# TODO(ameade): Add notification for create.end
return group_ref['id']
def delete_consistency_group(self, context, cg_id):
context = context.elevated()
group_ref = self.db.consistency_group_get(context, cg_id)
group_ref['host'] = self.host
group_ref['shares'] = (
self.db.share_instances_get_all_by_consistency_group_id(
context, cg_id))
model_update = False
# TODO(ameade): Add notification for delete.start
try:
LOG.info(_LI("Consistency group %s: deleting"), cg_id)
share_server = None
if group_ref.get('share_server_id'):
share_server = self.db.share_server_get(
context, group_ref['share_server_id'])
model_update = self.driver.delete_consistency_group(
context, group_ref, share_server=share_server)
if model_update:
group_ref = self.db.consistency_group_update(
context, group_ref['id'], model_update)
except Exception:
with excutils.save_and_reraise_exception():
self.db.consistency_group_update(
context,
group_ref['id'],
{'status': constants.STATUS_ERROR})
LOG.error(_LE("Consistency group %s: delete failed"),
group_ref['id'])
self.db.consistency_group_destroy(context,
cg_id)
LOG.info(_LI("Consistency group %s: deleted successfully"),
cg_id)
# TODO(ameade): Add notification for delete.end
def create_cgsnapshot(self, context, cgsnapshot_id):
context = context.elevated()
snap_ref = self.db.cgsnapshot_get(context, cgsnapshot_id)
for member in snap_ref['cgsnapshot_members']:
member['share'] = self.db.share_instance_get(
context, member['share_instance_id'], with_share_data=True)
member['share_id'] = member['share_instance_id']
status = constants.STATUS_AVAILABLE
snapshot_update = False
try:
LOG.info(_LI("Consistency group snapshot %s: creating"),
cgsnapshot_id)
share_server = None
if snap_ref['consistency_group'].get('share_server_id'):
share_server = self.db.share_server_get(
context, snap_ref['consistency_group']['share_server_id'])
snapshot_update, member_update_list = (
self.driver.create_cgsnapshot(context, snap_ref,
share_server=share_server))
if member_update_list:
snapshot_update = snapshot_update or {}
snapshot_update['cgsnapshot_members'] = []
for update in (member_update_list or []):
snapshot_update['cgsnapshot_members'].append(update)
if snapshot_update:
snap_ref = self.db.cgsnapshot_update(
context, snap_ref['id'], snapshot_update)
except Exception:
with excutils.save_and_reraise_exception():
self.db.cgsnapshot_update(
context,
snap_ref['id'],
{'status': constants.STATUS_ERROR})
LOG.error(_LE("Consistency group snapshot %s: create failed"),
cgsnapshot_id)
now = timeutils.utcnow()
for member in (snap_ref.get('cgsnapshot_members') or []):
update = {'status': status, 'created_at': now}
self.db.cgsnapshot_member_update(context, member['id'],
update)
self.db.cgsnapshot_update(context,
snap_ref['id'],
{'status': status,
'created_at': now})
LOG.info(_LI("Consistency group snapshot %s: created successfully"),
cgsnapshot_id)
return snap_ref['id']
def delete_cgsnapshot(self, context, cgsnapshot_id):
context = context.elevated()
snap_ref = self.db.cgsnapshot_get(context, cgsnapshot_id)
for member in snap_ref['cgsnapshot_members']:
member['share'] = self.db.share_instance_get(
context, member['share_instance_id'], with_share_data=True)
member['share_id'] = member['share_instance_id']
snapshot_update = False
try:
LOG.info(_LI("Consistency group snapshot %s: deleting"),
cgsnapshot_id)
share_server = None
if snap_ref['consistency_group'].get('share_server_id'):
share_server = self.db.share_server_get(
context, snap_ref['consistency_group']['share_server_id'])
snapshot_update, member_update_list = (
self.driver.delete_cgsnapshot(context, snap_ref,
share_server=share_server))
if member_update_list:
snapshot_update = snapshot_update or {}
snapshot_update['cgsnapshot_members'] = []
for update in (member_update_list or []):
snapshot_update['cgsnapshot_members'].append(update)
if snapshot_update:
snap_ref = self.db.cgsnapshot_update(
context, snap_ref['id'], snapshot_update)
except Exception:
with excutils.save_and_reraise_exception():
self.db.cgsnapshot_update(
context,
snap_ref['id'],
{'status': constants.STATUS_ERROR})
LOG.error(_LE("Consistency group snapshot %s: delete failed"),
snap_ref['name'])
self.db.cgsnapshot_destroy(context, cgsnapshot_id)
LOG.info(_LI("Consistency group snapshot %s: deleted successfully"),
cgsnapshot_id)

View File

@ -39,6 +39,9 @@ class ShareAPI(object):
create_share() -> create_share_instance()
delete_share() -> delete_share_instance()
Add share_instance argument to allow_access() & deny_access()
1.5 - Add create_consistency_group, delete_consistency_group
create_cgsnapshot, and delete_cgsnapshot methods
"""
BASE_RPC_API_VERSION = '1.0'
@ -47,7 +50,7 @@ class ShareAPI(object):
super(ShareAPI, self).__init__()
target = messaging.Target(topic=CONF.share_topic,
version=self.BASE_RPC_API_VERSION)
self.client = rpc.get_client(target, version_cap='1.4')
self.client = rpc.get_client(target, version_cap='1.5')
def create_share_instance(self, ctxt, share_instance, host,
request_spec, filter_properties,
@ -132,3 +135,35 @@ class ShareAPI(object):
cctxt = self.client.prepare(server=host, version='1.3')
cctxt.cast(ctxt, 'shrink_share', share_id=share['id'],
new_size=new_size)
def create_consistency_group(self, ctxt, cg, host):
new_host = utils.extract_host(host)
cctxt = self.client.prepare(server=new_host, version='1.5')
cctxt.cast(
ctxt,
'create_consistency_group',
cg_id=cg['id'])
def delete_consistency_group(self, ctxt, cg):
new_host = utils.extract_host(cg['host'])
cctxt = self.client.prepare(server=new_host, version='1.5')
cctxt.cast(
ctxt,
'delete_consistency_group',
cg_id=cg['id'])
def create_cgsnapshot(self, ctxt, cgsnapshot, host):
new_host = utils.extract_host(host)
cctxt = self.client.prepare(server=new_host, version='1.5')
cctxt.cast(
ctxt,
'create_cgsnapshot',
cgsnapshot_id=cgsnapshot['id'])
def delete_cgsnapshot(self, ctxt, cgsnapshot, host):
new_host = utils.extract_host(host)
cctxt = self.client.prepare(server=new_host, version='1.5')
cctxt.cast(
ctxt,
'delete_cgsnapshot',
cgsnapshot_id=cgsnapshot['id'])

View File

@ -104,7 +104,8 @@ SHARE_SERVICE_STATES_WITH_POOLS = {
reserved_percentage=0,
provisioned_capacity_gb=100,
max_over_subscription_ratio=20.0,
thin_provisioning=True)]),
thin_provisioning=True,
consistency_group_support='pool')]),
'host4@DDD': dict(share_backend_name='DDD',
timestamp=None, reserved_percentage=0,
driver_handles_share_servers=False,
@ -115,14 +116,16 @@ SHARE_SERVICE_STATES_WITH_POOLS = {
reserved_percentage=0,
provisioned_capacity_gb=800,
max_over_subscription_ratio=2.0,
thin_provisioning=True),
thin_provisioning=True,
consistency_group_support='host'),
dict(pool_name='pool4b',
total_capacity_gb=542,
free_capacity_gb=442,
reserved_percentage=0,
provisioned_capacity_gb=2000,
max_over_subscription_ratio=10.0,
thin_provisioning=True)]),
thin_provisioning=True,
consistency_group_support='host')]),
'host5@EEE': dict(share_backend_name='EEE',
timestamp=None, reserved_percentage=0,
driver_handles_share_servers=False,
@ -192,6 +195,7 @@ class FakeHostManager(host_manager.HostManager):
'provisioned_capacity_gb': 256,
'max_over_subscription_ratio': 2.0,
'thin_provisioning': False,
'consistency_group_support': 'host',
'reserved_percentage': 0,
'timestamp': None},
'host4': {'total_capacity_gb': 2048,
@ -209,7 +213,8 @@ class FakeHostManager(host_manager.HostManager):
'max_over_subscription_ratio': 1.5,
'thin_provisioning': True,
'reserved_percentage': 5,
'timestamp': None},
'timestamp': None,
'consistency_group_support': 'pool'},
'host6': {'total_capacity_gb': 'unknown',
'free_capacity_gb': 'unknown',
'allocated_capacity_gb': 1548,

View File

@ -23,6 +23,7 @@ from oslo_utils import strutils
from manila.common import constants
from manila import context
from manila import exception
from manila.scheduler import driver
from manila.scheduler import filter_scheduler
from manila.scheduler import host_manager
from manila.tests.scheduler import fakes
@ -132,6 +133,34 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.assertIsNone(weighed_host)
self.assertTrue(_mock_service_get_all_by_topic.called)
@mock.patch('manila.db.service_get_all_by_topic')
def test_schedule_share_with_cg_pool_support(
self, _mock_service_get_all_by_topic):
sched = fakes.FakeFilterScheduler()
sched.host_manager = fakes.FakeHostManager()
fake_context = context.RequestContext('user', 'project',
is_admin=True)
fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic)
request_spec = {
'share_type': {
'name': 'NFS',
'extra_specs': {'consistency_group_support': 'pool'}
},
'share_properties': {'project_id': 1, 'size': 1},
'share_instance_properties': {'project_id': 1, 'size': 1},
'consistency_group': {
'id': 'fake-cg-id',
'host': 'host5#_pool0',
}
}
weighed_host = sched._schedule_share(fake_context, request_spec, {})
self.assertIsNotNone(weighed_host)
self.assertIsNotNone(weighed_host.obj)
self.assertEqual('host5#_pool0', weighed_host.obj.host)
self.assertTrue(_mock_service_get_all_by_topic.called)
def test_schedule_share_type_is_none(self):
sched = fakes.FakeFilterScheduler()
request_spec = {
@ -234,3 +263,87 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.assertEqual('host',
filter_properties['retry']['hosts'][0])
self.assertEqual(1024, host_state.total_capacity_gb)
def test_schedule_create_consistency_group(self):
# Ensure empty hosts/child_zones result in NoValidHosts exception.
sched = fakes.FakeFilterScheduler()
fake_context = context.RequestContext('user', 'project')
fake_host = 'fake_host'
request_spec = {'share_types': [{'id': 'NFS'}]}
self.mock_object(sched, "_get_best_host_for_consistency_group",
mock.Mock(return_value=fake_host))
fake_updated_group = mock.Mock()
self.mock_object(driver, "cg_update_db", mock.Mock(
return_value=fake_updated_group))
self.mock_object(sched.share_rpcapi, "create_consistency_group")
sched.schedule_create_consistency_group(fake_context, 'fake_id',
request_spec, {})
sched._get_best_host_for_consistency_group.assert_called_once_with(
fake_context, request_spec)
driver.cg_update_db.assert_called_once_with(fake_context, 'fake_id',
fake_host)
sched.share_rpcapi.create_consistency_group.assert_called_once_with(
fake_context, fake_updated_group, fake_host)
def test_create_cg_no_hosts(self):
# Ensure empty hosts/child_zones result in NoValidHosts exception.
sched = fakes.FakeFilterScheduler()
fake_context = context.RequestContext('user', 'project')
request_spec = {'share_types': [{'id': 'NFS'}]}
self.assertRaises(exception.NoValidHost,
sched.schedule_create_consistency_group,
fake_context, 'fake_id', request_spec, {})
@mock.patch('manila.db.service_get_all_by_topic')
def test_get_weighted_candidates_for_consistency_group(
self, _mock_service_get_all_by_topic):
sched = fakes.FakeFilterScheduler()
sched.host_manager = fakes.FakeHostManager()
fake_context = context.RequestContext('user', 'project')
fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic)
request_spec = {'share_types': [{'name': 'NFS',
'extra_specs': {
SNAPSHOT_SUPPORT: True,
}}]}
hosts = sched._get_weighted_candidates_cg(fake_context,
request_spec)
self.assertTrue(hosts)
@mock.patch('manila.db.service_get_all_by_topic')
def test_get_weighted_candidates_for_consistency_group_no_hosts(
self, _mock_service_get_all_by_topic):
sched = fakes.FakeFilterScheduler()
sched.host_manager = fakes.FakeHostManager()
fake_context = context.RequestContext('user', 'project')
fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic)
request_spec = {'share_types': [{'name': 'NFS',
'extra_specs': {
SNAPSHOT_SUPPORT: False
}}]}
hosts = sched._get_weighted_candidates_cg(fake_context,
request_spec)
self.assertEqual([], hosts)
@mock.patch('manila.db.service_get_all_by_topic')
def test_get_weighted_candidates_for_consistency_group_many_hosts(
self, _mock_service_get_all_by_topic):
sched = fakes.FakeFilterScheduler()
sched.host_manager = fakes.FakeHostManager()
fake_context = context.RequestContext('user', 'project')
fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic)
request_spec = {'share_types': [{'name': 'NFS',
'extra_specs': {
SNAPSHOT_SUPPORT: True
}}]}
hosts = sched._get_weighted_candidates_cg(fake_context,
request_spec)
self.assertEqual(2, len(hosts))

View File

@ -195,6 +195,7 @@ class HostManagerTestCase(test.TestCase):
'storage_protocol': None,
'driver_handles_share_servers': False,
'snapshot_support': True,
'consistency_group_support': False,
},
}, {
'name': 'host2@back1#BBB',
@ -215,6 +216,7 @@ class HostManagerTestCase(test.TestCase):
'storage_protocol': None,
'driver_handles_share_servers': False,
'snapshot_support': True,
'consistency_group_support': False,
},
}, {
'name': 'host2@back2#CCC',
@ -235,6 +237,7 @@ class HostManagerTestCase(test.TestCase):
'storage_protocol': None,
'driver_handles_share_servers': False,
'snapshot_support': True,
'consistency_group_support': False,
},
},
]
@ -277,6 +280,7 @@ class HostManagerTestCase(test.TestCase):
'storage_protocol': None,
'driver_handles_share_servers': False,
'snapshot_support': True,
'consistency_group_support': False,
},
}, {
'name': 'host2@BBB#pool2',
@ -298,6 +302,7 @@ class HostManagerTestCase(test.TestCase):
'storage_protocol': None,
'driver_handles_share_servers': False,
'snapshot_support': True,
'consistency_group_support': False,
},
}, {
'name': 'host3@CCC#pool3',
@ -319,6 +324,7 @@ class HostManagerTestCase(test.TestCase):
'storage_protocol': None,
'driver_handles_share_servers': False,
'snapshot_support': True,
'consistency_group_support': 'pool',
},
}, {
'name': 'host4@DDD#pool4a',
@ -340,6 +346,7 @@ class HostManagerTestCase(test.TestCase):
'storage_protocol': None,
'driver_handles_share_servers': False,
'snapshot_support': True,
'consistency_group_support': 'host',
},
}, {
'name': 'host4@DDD#pool4b',
@ -361,6 +368,7 @@ class HostManagerTestCase(test.TestCase):
'storage_protocol': None,
'driver_handles_share_servers': False,
'snapshot_support': True,
'consistency_group_support': 'host',
},
},
]
@ -415,6 +423,7 @@ class HostManagerTestCase(test.TestCase):
'provisioned_capacity_gb': 312,
'max_over_subscription_ratio': 1.0,
'thin_provisioning': False,
'consistency_group_support': False,
},
}, {
'name': 'host2@back1#BBB',
@ -435,6 +444,7 @@ class HostManagerTestCase(test.TestCase):
'provisioned_capacity_gb': 400,
'max_over_subscription_ratio': 2.0,
'thin_provisioning': True,
'consistency_group_support': False,
},
},
]
@ -480,7 +490,8 @@ class HostManagerTestCase(test.TestCase):
'max_over_subscription_ratio': 2.0,
'thin_provisioning': True,
'vendor_name': None,
'storage_protocol': None
'storage_protocol': None,
'consistency_group_support': False,
},
},
]

View File

@ -92,3 +92,11 @@ class SchedulerRpcAPITestCase(test.TestCase):
rpc_method='call',
filters=None,
version='1.1')
def test_create_consistency_group(self):
self._test_scheduler_api('create_consistency_group',
rpc_method='cast',
cg_id='cg_id',
request_spec='fake_request_spec',
filter_properties='filter_properties',
version='1.3')

View File

@ -108,6 +108,50 @@ class SchedulerManagerTestCase(test.TestCase):
mock_get_pools.assert_called_once_with(self.context, 'fake_filters')
self.assertEqual('fake_pools', result)
@mock.patch.object(db, 'consistency_group_update', mock.Mock())
def test_create_cg_no_valid_host_puts_cg_in_error_state(self):
"""Test that NoValidHost is raised for create_consistency_group.
Puts the share in 'error' state and eats the exception.
"""
def raise_no_valid_host(*args, **kwargs):
raise exception.NoValidHost(reason="")
fake_cg_id = 1
cg_id = fake_cg_id
request_spec = {"consistency_group_id": cg_id}
with mock.patch.object(self.manager.driver,
'schedule_create_consistency_group',
mock.Mock(side_effect=raise_no_valid_host)):
self.manager.create_consistency_group(self.context,
fake_cg_id,
request_spec=request_spec,
filter_properties={})
db.consistency_group_update.assert_called_once_with(
self.context, fake_cg_id, {'status': 'error'})
self.manager.driver.schedule_create_consistency_group\
.assert_called_once_with(self.context, cg_id,
request_spec, {})
@mock.patch.object(db, 'consistency_group_update', mock.Mock())
def test_create_cg_exception_puts_cg_in_error_state(self):
"""Test that exceptions for create_consistency_group.
Puts the share in 'error' state and raises the exception.
"""
fake_cg_id = 1
cg_id = fake_cg_id
request_spec = {"consistency_group_id": cg_id}
with mock.patch.object(self.manager.driver,
'schedule_create_consistency_group',
mock.Mock(side_effect=exception.NotFound)):
self.assertRaises(exception.NotFound,
self.manager.create_consistency_group,
self.context, fake_cg_id,
request_spec=request_spec,
filter_properties={})
class SchedulerTestCase(test.TestCase):
"""Test case for base scheduler driver class."""

View File

@ -1627,6 +1627,24 @@ class GenericShareDriverTestCase(test.TestCase):
self.assertEqual(result, actual_result)
@ddt.data({'consistency_group': {'share_server_id': 'fake'},
'result': {'id': 'fake'}},
{'consistency_group': None, 'result': {'id': 'fake'}},
{'consistency_group': {'share_server_id': 'test'},
'result': {'id': 'test'}})
@ddt.unpack
def tests_choose_share_server_compatible_with_share_and_cg(
self, consistency_group, result):
share_servers = [{'id': 'fake'}, {'id': 'test'}]
fake_share = "fake"
actual_result = self._driver.choose_share_server_compatible_with_share(
self._context, share_servers, fake_share,
consistency_group=consistency_group
)
self.assertEqual(result, actual_result)
@generic.ensure_server
def fake(driver_instance, context, share_server=None):

View File

@ -759,7 +759,8 @@ class ShareManagerTestCase(test.TestCase):
driver_mock.choose_share_server_compatible_with_share
)
driver_method_mock.assert_called_once_with(
self.context, [fake_share_server], share.instance, snapshot=None)
self.context, [fake_share_server], share.instance, snapshot=None,
consistency_group=None)
def test_provide_share_server_for_share_invalid_arguments(self):
self.assertRaises(ValueError,
@ -798,6 +799,35 @@ class ShareManagerTestCase(test.TestCase):
db.share_server_get.assert_called_once_with(
self.context, fake_parent_id)
def test_provide_share_server_for_cg_incompatible_servers(self):
fake_exception = exception.ManilaException("fake")
fake_share_server = {'id': 'fake'}
cg = db_utils.create_consistency_group()
self.mock_object(db,
'share_server_get_all_by_host_and_share_net_valid',
mock.Mock(return_value=[fake_share_server]))
self.mock_object(
self.share_manager.driver,
"choose_share_server_compatible_with_cg",
mock.Mock(side_effect=fake_exception)
)
self.assertRaises(exception.ManilaException,
self.share_manager._provide_share_server_for_cg,
self.context, "fake_id", cg)
driver_mock = self.share_manager.driver
driver_method_mock = (
driver_mock.choose_share_server_compatible_with_cg
)
driver_method_mock.assert_called_once_with(
self.context, [fake_share_server], cg, cgsnapshot=None)
def test_provide_share_server_for_cg_invalid_arguments(self):
self.assertRaises(exception.InvalidInput,
self.share_manager._provide_share_server_for_cg,
self.context, None, None)
def test_manage_share_invalid_driver(self):
self.mock_object(self.share_manager, 'driver', mock.Mock())
self.share_manager.driver.driver_handles_share_servers = True
@ -1820,6 +1850,447 @@ class ShareManagerTestCase(test.TestCase):
self.assertEqual(old_capabilities,
self.share_manager.last_capabilities)
def test_create_consistency_group(self):
fake_cg = {'id': 'fake_id'}
self.mock_object(self.share_manager.db, 'consistency_group_get',
mock.Mock(return_value=fake_cg))
self.mock_object(self.share_manager.db, 'consistency_group_update',
mock.Mock(return_value=fake_cg))
self.mock_object(self.share_manager.driver,
'create_consistency_group',
mock.Mock(return_value=None))
self.share_manager.create_consistency_group(self.context, "fake_id")
self.share_manager.db.consistency_group_update.\
assert_called_once_with(mock.ANY, 'fake_id',
{'status': constants.STATUS_AVAILABLE,
'created_at': mock.ANY})
def test_create_cg_with_share_network_driver_not_handles_servers(self):
manager.CONF.set_default('driver_handles_share_servers', False)
self.mock_object(
self.share_manager.driver.configuration, 'safe_get',
mock.Mock(return_value=False))
cg_id = 'fake_cg_id'
share_network_id = 'fake_sn'
fake_cg = {'id': 'fake_id', 'share_network_id': share_network_id}
self.mock_object(
self.share_manager.db, 'consistency_group_get',
mock.Mock(return_value=fake_cg))
self.mock_object(self.share_manager.db, 'consistency_group_update')
self.assertRaises(
exception.ManilaException,
self.share_manager.create_consistency_group, self.context, cg_id)
self.share_manager.db.consistency_group_get.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), cg_id)
self.share_manager.db.consistency_group_update.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), cg_id,
{'status': constants.STATUS_ERROR})
def test_create_cg_with_share_network_driver_handles_servers(self):
manager.CONF.set_default('driver_handles_share_servers', True)
self.mock_object(
self.share_manager.driver.configuration, 'safe_get',
mock.Mock(return_value=True))
share_network_id = 'fake_sn'
fake_cg = {'id': 'fake_id', 'share_network_id': share_network_id,
'host': "fake_host"}
self.mock_object(self.share_manager.db, 'consistency_group_get',
mock.Mock(return_value=fake_cg))
self.mock_object(self.share_manager.db, 'consistency_group_update',
mock.Mock(return_value=fake_cg))
self.mock_object(self.share_manager, '_provide_share_server_for_cg',
mock.Mock(return_value=({}, fake_cg)))
self.mock_object(self.share_manager.driver,
'create_consistency_group',
mock.Mock(return_value=None))
self.share_manager.create_consistency_group(self.context, "fake_id")
self.share_manager.db.consistency_group_update.\
assert_called_once_with(mock.ANY, 'fake_id',
{'status': constants.STATUS_AVAILABLE,
'created_at': mock.ANY})
def test_create_consistency_group_with_update(self):
fake_cg = {'id': 'fake_id'}
self.mock_object(self.share_manager.db, 'consistency_group_get',
mock.Mock(return_value=fake_cg))
self.mock_object(self.share_manager.db, 'consistency_group_update',
mock.Mock(return_value=fake_cg))
self.mock_object(self.share_manager.driver,
'create_consistency_group',
mock.Mock(return_value={'foo': 'bar'}))
self.share_manager.create_consistency_group(self.context, "fake_id")
self.share_manager.db.consistency_group_update.\
assert_any_call(mock.ANY, 'fake_id', {'foo': 'bar'})
self.share_manager.db.consistency_group_update.\
assert_any_call(mock.ANY, 'fake_id',
{'status': constants.STATUS_AVAILABLE,
'created_at': mock.ANY})
def test_create_consistency_group_with_error(self):
fake_cg = {'id': 'fake_id'}
self.mock_object(self.share_manager.db, 'consistency_group_get',
mock.Mock(return_value=fake_cg))
self.mock_object(self.share_manager.db, 'consistency_group_update',
mock.Mock(return_value=fake_cg))
self.mock_object(self.share_manager.driver,
'create_consistency_group',
mock.Mock(side_effect=exception.Error))
self.assertRaises(exception.Error,
self.share_manager.create_consistency_group,
self.context, "fake_id")
self.share_manager.db.consistency_group_update.\
assert_called_once_with(mock.ANY, 'fake_id',
{'status': constants.STATUS_ERROR})
def test_create_consistency_group_from_cgsnapshot(self):
fake_cg = {'id': 'fake_id', 'source_cgsnapshot_id': 'fake_snap_id',
'shares': [], 'share_server_id': 'fake_ss_id'}
fake_ss = {'id': 'fake_ss_id', 'share_network_id': 'fake_sn'}
fake_snap = {'id': 'fake_snap_id', 'cgsnapshot_members': [],
'consistency_group': {'share_server_id': fake_ss['id']}}
self.mock_object(self.share_manager.db, 'consistency_group_get',
mock.Mock(return_value=fake_cg))
self.mock_object(self.share_manager.db, 'cgsnapshot_get',
mock.Mock(return_value=fake_snap))
self.mock_object(self.share_manager.db, 'share_server_get',
mock.Mock(
return_value=fake_ss))
self.mock_object(self.share_manager.db, 'consistency_group_update',
mock.Mock(return_value=fake_cg))
self.mock_object(self.share_manager.driver,
'create_consistency_group_from_cgsnapshot',
mock.Mock(return_value=(None, None)))
self.share_manager.create_consistency_group(self.context, "fake_id")
self.share_manager.db.consistency_group_update.\
assert_called_once_with(mock.ANY, 'fake_id',
{'status': constants.STATUS_AVAILABLE,
'created_at': mock.ANY})
self.share_manager.db.share_server_get(mock.ANY, 'fake_ss_id')
self.share_manager.driver.create_consistency_group_from_cgsnapshot.\
assert_called_once_with(
mock.ANY, fake_cg, fake_snap, share_server=fake_ss)
def test_create_cg_cgsnapshot_share_network_driver_not_handles_servers(
self):
manager.CONF.set_default('driver_handles_share_servers', False)
self.mock_object(
self.share_manager.driver.configuration, 'safe_get',
mock.Mock(return_value=False))
cg_id = 'fake_cg_id'
share_network_id = 'fake_sn'
fake_cg = {'id': 'fake_id', 'source_cgsnapshot_id': 'fake_snap_id',
'shares': [], 'share_network_id': share_network_id,
'host': "fake_host"}
self.mock_object(
self.share_manager.db, 'consistency_group_get',
mock.Mock(return_value=fake_cg))
fake_snap = {'id': 'fake_snap_id', 'cgsnapshot_members': []}
self.mock_object(self.share_manager.db, 'cgsnapshot_get',
mock.Mock(return_value=fake_snap))
self.mock_object(self.share_manager.db, 'consistency_group_update')
self.assertRaises(exception.ManilaException,
self.share_manager.create_consistency_group,
self.context, cg_id)
self.share_manager.db.consistency_group_get.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), cg_id)
self.share_manager.db.consistency_group_update.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), cg_id,
{'status': constants.STATUS_ERROR})
def test_create_cg_from_cgsnapshot_share_network_driver_handles_servers(
self):
manager.CONF.set_default('driver_handles_share_servers', True)
self.mock_object(self.share_manager.driver.configuration, 'safe_get',
mock.Mock(return_value=True))
share_network_id = 'fake_sn'
fake_cg = {'id': 'fake_id', 'source_cgsnapshot_id': 'fake_snap_id',
'shares': [], 'share_network_id': share_network_id}
fake_snap = {'id': 'fake_snap_id', 'cgsnapshot_members': []}
self.mock_object(self.share_manager.db, 'consistency_group_get',
mock.Mock(return_value=fake_cg))
self.mock_object(self.share_manager.db, 'cgsnapshot_get',
mock.Mock(return_value=fake_snap))
self.mock_object(self.share_manager.db, 'consistency_group_update',
mock.Mock(return_value=fake_cg))
self.mock_object(self.share_manager, '_provide_share_server_for_cg',
mock.Mock(return_value=({}, fake_cg)))
self.mock_object(self.share_manager.driver,
'create_consistency_group_from_cgsnapshot',
mock.Mock(return_value=(None, None)))
self.share_manager.create_consistency_group(self.context, "fake_id")
self.share_manager.db.consistency_group_update.\
assert_called_once_with(mock.ANY, 'fake_id',
{'status': constants.STATUS_AVAILABLE,
'created_at': mock.ANY})
def test_create_consistency_group_from_cgsnapshot_with_update(self):
fake_cg = {'id': 'fake_id', 'source_cgsnapshot_id': 'fake_snap_id',
'shares': []}
fake_snap = {'id': 'fake_snap_id', 'cgsnapshot_members': []}
self.mock_object(self.share_manager.db, 'consistency_group_get',
mock.Mock(return_value=fake_cg))
self.mock_object(self.share_manager.db, 'cgsnapshot_get',
mock.Mock(return_value=fake_snap))
self.mock_object(self.share_manager.db, 'consistency_group_update',
mock.Mock(return_value=fake_cg))
self.mock_object(self.share_manager.driver,
'create_consistency_group_from_cgsnapshot',
mock.Mock(return_value=({'foo': 'bar'}, None)))
self.share_manager.create_consistency_group(self.context, "fake_id")
self.share_manager.db.consistency_group_update.\
assert_any_call(mock.ANY, 'fake_id', {'foo': 'bar'})
self.share_manager.db.consistency_group_update.\
assert_any_call(mock.ANY, 'fake_id',
{'status': constants.STATUS_AVAILABLE,
'created_at': mock.ANY})
def test_create_consistency_group_from_cgsnapshot_with_share_update(self):
fake_share = {'id': 'fake_share_id'}
fake_export_locations = ['my_export_location']
fake_cg = {'id': 'fake_id', 'source_cgsnapshot_id': 'fake_snap_id',
'shares': [fake_share]}
fake_snap = {'id': 'fake_snap_id', 'cgsnapshot_members': []}
self.mock_object(self.share_manager.db, 'consistency_group_get',
mock.Mock(return_value=fake_cg))
self.mock_object(self.share_manager.db, 'cgsnapshot_get',
mock.Mock(return_value=fake_snap))
self.mock_object(self.share_manager.db, 'consistency_group_update')
self.mock_object(self.share_manager.db, 'share_instance_update')
self.mock_object(self.share_manager.db,
'share_export_locations_update')
fake_share_update_list = [{'id': fake_share['id'],
'foo': 'bar',
'export_locations': fake_export_locations}]
self.mock_object(self.share_manager.driver,
'create_consistency_group_from_cgsnapshot',
mock.Mock(
return_value=(None, fake_share_update_list)))
self.share_manager.create_consistency_group(self.context, "fake_id")
self.share_manager.db.share_instance_update.\
assert_any_call(mock.ANY, 'fake_share_id', {'foo': 'bar'})
self.share_manager.db.share_export_locations_update.\
assert_any_call(mock.ANY, 'fake_share_id', fake_export_locations)
self.share_manager.db.consistency_group_update.\
assert_any_call(mock.ANY, 'fake_id',
{'status': constants.STATUS_AVAILABLE,
'created_at': mock.ANY})
def test_create_consistency_group_from_cgsnapshot_with_error(self):
fake_cg = {'id': 'fake_id', 'source_cgsnapshot_id': 'fake_snap_id',
'shares': []}
fake_snap = {'id': 'fake_snap_id', 'cgsnapshot_members': []}
self.mock_object(self.share_manager.db, 'consistency_group_get',
mock.Mock(return_value=fake_cg))
self.mock_object(self.share_manager.db, 'cgsnapshot_get',
mock.Mock(return_value=fake_snap))
self.mock_object(self.share_manager.db,
'share_instances_get_all_by_consistency_group_id',
mock.Mock(return_value=[]))
self.mock_object(self.share_manager.db, 'consistency_group_update',
mock.Mock(return_value=fake_cg))
self.mock_object(self.share_manager.driver,
'create_consistency_group_from_cgsnapshot',
mock.Mock(side_effect=exception.Error))
self.assertRaises(exception.Error,
self.share_manager.create_consistency_group,
self.context, "fake_id")
self.share_manager.db.consistency_group_update.\
assert_called_once_with(mock.ANY, 'fake_id',
{'status': constants.STATUS_ERROR})
def test_create_consistency_group_from_cgsnapshot_with_share_error(self):
fake_share = {'id': 'fake_share_id'}
fake_cg = {'id': 'fake_id', 'source_cgsnapshot_id': 'fake_snap_id',
'shares': [fake_share]}
fake_snap = {'id': 'fake_snap_id', 'cgsnapshot_members': []}
self.mock_object(self.share_manager.db, 'consistency_group_get',
mock.Mock(return_value=fake_cg))
self.mock_object(self.share_manager.db, 'cgsnapshot_get',
mock.Mock(return_value=fake_snap))
self.mock_object(self.share_manager.db,
'share_instances_get_all_by_consistency_group_id',
mock.Mock(return_value=[fake_share]))
self.mock_object(self.share_manager.db, 'consistency_group_update')
self.mock_object(self.share_manager.db, 'share_instance_update')
self.mock_object(self.share_manager.driver,
'create_consistency_group_from_cgsnapshot',
mock.Mock(side_effect=exception.Error))
self.assertRaises(exception.Error,
self.share_manager.create_consistency_group,
self.context, "fake_id")
self.share_manager.db.share_instance_update.\
assert_any_call(mock.ANY, 'fake_share_id',
{'status': constants.STATUS_ERROR})
self.share_manager.db.consistency_group_update.\
assert_called_once_with(mock.ANY, 'fake_id',
{'status': constants.STATUS_ERROR})
def test_delete_consistency_group(self):
fake_cg = {'id': 'fake_id'}
self.mock_object(self.share_manager.db, 'consistency_group_get',
mock.Mock(return_value=fake_cg))
self.mock_object(self.share_manager.db, 'consistency_group_update',
mock.Mock(return_value=fake_cg))
self.mock_object(self.share_manager.db, 'consistency_group_destroy',
mock.Mock(return_value=fake_cg))
self.mock_object(self.share_manager.driver,
'delete_consistency_group',
mock.Mock(return_value=None))
self.share_manager.delete_consistency_group(self.context, "fake_id")
self.share_manager.db.consistency_group_destroy.\
assert_called_once_with(mock.ANY, 'fake_id')
def test_delete_consistency_group_with_update(self):
fake_cg = {'id': 'fake_id'}
self.mock_object(self.share_manager.db, 'consistency_group_get',
mock.Mock(return_value=fake_cg))
self.mock_object(self.share_manager.db, 'consistency_group_update',
mock.Mock(return_value=fake_cg))
self.mock_object(self.share_manager.db, 'consistency_group_destroy',
mock.Mock(return_value=fake_cg))
self.mock_object(self.share_manager.driver,
'delete_consistency_group',
mock.Mock(return_value={'foo': 'bar'}))
self.share_manager.delete_consistency_group(self.context, "fake_id")
self.share_manager.db.consistency_group_update.\
assert_called_once_with(mock.ANY, 'fake_id', {'foo': 'bar'})
self.share_manager.db.consistency_group_destroy.\
assert_called_once_with(mock.ANY, 'fake_id')
def test_delete_consistency_group_with_error(self):
fake_cg = {'id': 'fake_id'}
self.mock_object(self.share_manager.db, 'consistency_group_get',
mock.Mock(return_value=fake_cg))
self.mock_object(self.share_manager.db, 'consistency_group_update',
mock.Mock(return_value=fake_cg))
self.mock_object(self.share_manager.driver,
'delete_consistency_group',
mock.Mock(side_effect=exception.Error))
self.assertRaises(exception.Error,
self.share_manager.delete_consistency_group,
self.context, "fake_id")
self.share_manager.db.consistency_group_update.\
assert_called_once_with(mock.ANY, 'fake_id',
{'status': constants.STATUS_ERROR})
def test_create_cgsnapshot(self):
fake_snap = {'id': 'fake_snap_id', 'consistency_group': {},
'cgsnapshot_members': []}
self.mock_object(self.share_manager.db, 'cgsnapshot_get',
mock.Mock(return_value=fake_snap))
self.mock_object(self.share_manager.db, 'cgsnapshot_update',
mock.Mock(return_value=fake_snap))
self.mock_object(self.share_manager.driver,
'create_cgsnapshot',
mock.Mock(return_value=(None, None)))
self.share_manager.create_cgsnapshot(self.context, fake_snap['id'])
self.share_manager.db.cgsnapshot_update.\
assert_called_once_with(mock.ANY, fake_snap['id'],
{'status': constants.STATUS_AVAILABLE,
'created_at': mock.ANY})
def test_create_cgsnapshot_with_update(self):
fake_snap = {'id': 'fake_snap_id', 'consistency_group': {},
'cgsnapshot_members': []}
self.mock_object(self.share_manager.db, 'cgsnapshot_get',
mock.Mock(return_value=fake_snap))
self.mock_object(self.share_manager.db, 'cgsnapshot_update',
mock.Mock(return_value=fake_snap))
self.mock_object(self.share_manager.driver,
'create_cgsnapshot',
mock.Mock(return_value=({'foo': 'bar'}, None)))
self.share_manager.create_cgsnapshot(self.context, fake_snap['id'])
self.share_manager.db.cgsnapshot_update.\
assert_any_call(mock.ANY, 'fake_snap_id', {'foo': 'bar'})
self.share_manager.db.cgsnapshot_update.assert_any_call(
mock.ANY, fake_snap['id'],
{'status': constants.STATUS_AVAILABLE, 'created_at': mock.ANY})
def test_create_cgsnapshot_with_member_update(self):
fake_member = {
'id': 'fake_member_id',
'share_instance_id': 'blah',
}
fake_member_update = {
'id': 'fake_member_id',
'foo': 'bar'
}
fake_snap = {'id': 'fake_snap_id', 'consistency_group': {},
'cgsnapshot_members': [fake_member]}
self.mock_object(self.share_manager.db, 'cgsnapshot_get',
mock.Mock(return_value=fake_snap))
self.mock_object(self.share_manager.db, 'cgsnapshot_update',
mock.Mock(return_value=fake_snap))
self.mock_object(self.share_manager.db, 'cgsnapshot_member_update')
self.mock_object(self.share_manager.db, 'share_instance_get',
mock.Mock(return_value={'id': 'blah'}))
self.mock_object(self.share_manager.driver, 'create_cgsnapshot',
mock.Mock(return_value=(None, [fake_member_update])))
self.share_manager.create_cgsnapshot(self.context, fake_snap['id'])
self.share_manager.db.cgsnapshot_update.assert_any_call(
mock.ANY, fake_snap['id'],
{'cgsnapshot_members': [fake_member_update]})
self.share_manager.db.cgsnapshot_update.\
assert_any_call(mock.ANY, fake_snap['id'],
{'status': constants.STATUS_AVAILABLE,
'created_at': mock.ANY})
self.assertTrue(self.share_manager.db.cgsnapshot_member_update.called)
def test_create_cgsnapshot_with_error(self):
fake_snap = {'id': 'fake_snap_id', 'consistency_group': {},
'cgsnapshot_members': []}
self.mock_object(self.share_manager.db, 'cgsnapshot_get',
mock.Mock(return_value=fake_snap))
self.mock_object(self.share_manager.db, 'cgsnapshot_update',
mock.Mock(return_value=fake_snap))
self.mock_object(self.share_manager.driver,
'create_cgsnapshot',
mock.Mock(side_effect=exception.Error))
self.assertRaises(exception.Error,
self.share_manager.create_cgsnapshot,
self.context, fake_snap['id'])
self.share_manager.db.cgsnapshot_update.\
assert_called_once_with(mock.ANY, fake_snap['id'],
{'status': constants.STATUS_ERROR})
@ddt.ddt
class HookWrapperTestCase(test.TestCase):

View File

@ -1,4 +1,4 @@
# Copyright 2013 NetApp
# Copyright 2015 Alex Meade
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@ -36,7 +36,6 @@ class ShareRpcAPITestCase(test.TestCase):
def setUp(self):
super(ShareRpcAPITestCase, self).setUp()
self.context = context.get_admin_context()
share = db_utils.create_share(
availability_zone=CONF.storage_availability_zone,
status=constants.STATUS_AVAILABLE
@ -44,10 +43,14 @@ class ShareRpcAPITestCase(test.TestCase):
access = db_utils.create_access(share_id=share['id'])
snapshot = db_utils.create_snapshot(share_id=share['id'])
share_server = db_utils.create_share_server()
cg = {'id': 'fake_cg_id', 'host': 'fake_host'}
cgsnapshot = {'id': 'fake_cg_id'}
self.fake_share = jsonutils.to_primitive(share)
self.fake_access = jsonutils.to_primitive(access)
self.fake_snapshot = jsonutils.to_primitive(snapshot)
self.fake_share_server = jsonutils.to_primitive(share_server)
self.fake_cg = jsonutils.to_primitive(cg)
self.fake_cgsnapshot = jsonutils.to_primitive(cgsnapshot)
self.ctxt = context.RequestContext('fake_user', 'fake_project')
self.rpcapi = share_rpcapi.ShareAPI()
@ -68,6 +71,14 @@ class ShareRpcAPITestCase(test.TestCase):
if 'share_instance' in expected_msg:
share_instance = expected_msg.pop('share_instance', None)
expected_msg['share_instance_id'] = share_instance['id']
if 'cg' in expected_msg:
cg = expected_msg['cg']
del expected_msg['cg']
expected_msg['cg_id'] = cg['id']
if 'cgsnapshot' in expected_msg:
snap = expected_msg['cgsnapshot']
del expected_msg['cgsnapshot']
expected_msg['cgsnapshot_id'] = snap['id']
if 'access' in expected_msg:
access = expected_msg['access']
del expected_msg['access']
@ -83,6 +94,8 @@ class ShareRpcAPITestCase(test.TestCase):
host = kwargs['host']
elif 'share_server' in kwargs:
host = kwargs['share_server']['host']
elif 'cg' in kwargs:
host = kwargs['cg']['host']
elif 'share_instance' in kwargs:
host = kwargs['share_instance']['host']
else:
@ -178,3 +191,30 @@ class ShareRpcAPITestCase(test.TestCase):
version='1.3',
share=self.fake_share,
new_size=123)
def test_create_consistency_group(self):
self._test_share_api('create_consistency_group',
version='1.5',
rpc_method='cast',
cg=self.fake_cg,
host='fake_host1')
def test_delete_consistency_group(self):
self._test_share_api('delete_consistency_group',
version='1.5',
rpc_method='cast',
cg=self.fake_cg)
def test_create_cgsnapshot(self):
self._test_share_api('create_cgsnapshot',
version='1.5',
rpc_method='cast',
cgsnapshot=self.fake_cgsnapshot,
host='fake_host1')
def test_delete_cgsnapshot(self):
self._test_share_api('delete_cgsnapshot',
version='1.5',
rpc_method='cast',
cgsnapshot=self.fake_cgsnapshot,
host='fake_host1')

View File

@ -36,6 +36,7 @@ manila.scheduler.filters =
AvailabilityZoneFilter = manila.openstack.common.scheduler.filters.availability_zone_filter:AvailabilityZoneFilter
CapabilitiesFilter = manila.openstack.common.scheduler.filters.capabilities_filter:CapabilitiesFilter
CapacityFilter = manila.scheduler.filters.capacity_filter:CapacityFilter
ConsistencyGroupFilter = manila.scheduler.filters.consistency_group_filter:ConsistencyGroupFilter
JsonFilter = manila.openstack.common.scheduler.filters.json_filter:JsonFilter
RetryFilter = manila.scheduler.filters.retry_filter:RetryFilter
manila.scheduler.weights =