Share Migration Ocata Improvements

Implemented several improvements to share migration
according to spec [1].

Summary of changes:
- Snapshot restriction in API has been changed to return error only
when parameter force-host-assisted-migration is True
- Added preserve_snapshot to API and migration_check_compatibility
driver interface
- Changed all driver-assisted API parameters to be mandatory
- Added validation to prevent 'force_host_assisted_migration' to be
used alongside driver-assisted parameters
- Changed "same host" validation to reject only if the combination
of "host", "new_share_network" and "new_share_type" is the same as
the source
- Updated migration driver interfaces to support snapshots
- Updated zfsonlinux driver, defaulting preserve_snapshots to False
- Updated dummy driver to support preserve_snapshots

Spec update with latest changes since [1] merged
can be found in [2].

APIImpact
DocImpact

[1] I5717e902373d79ed0d55372afdedfaa98134c24e
[2] If02180ec3b5ae05c9ff18c9f5a054c33f13edcdf

Change-Id: I764b389816319ed0ac5178cadbf809cb632035b4
Partially-implements: blueprint ocata-migration-improvements
This commit is contained in:
Rodrigo Barbieri 2016-11-24 15:52:03 -02:00
parent 7a340efc69
commit 53539c0e1d
30 changed files with 1240 additions and 346 deletions

View File

@ -142,6 +142,7 @@ if [[ "$TEST_TYPE" == "scenario" ]]; then
echo "Set test set to scenario only"
MANILA_TESTS='manila_tempest_tests.tests.scenario'
iniset $TEMPEST_CONFIG auth use_dynamic_credentials True
RUN_MANILA_HOST_ASSISTED_MIGRATION_TESTS=True
elif [[ "$DRIVER" == "generic" ]]; then
RUN_MANILA_HOST_ASSISTED_MIGRATION_TESTS=True
RUN_MANILA_MANAGE_SNAPSHOT_TESTS=True

View File

@ -90,13 +90,18 @@ REST_API_VERSION_HISTORY = """
shares (share_instances) with 'syncing'. Share action API
'access_allow' now accepts rules even when a share or any of
its instances may have an access_rules_status set to 'error'.
* 2.29 - Updated migration_start API adding mandatory parameter
'preserve_snapshots' and changed 'preserve_metadata', 'writable',
'nondisruptive' to be mandatory as well. All previous
migration_start APIs prior to this microversion are now
unsupported.
"""
# The minimum and maximum versions of the API supported
# The default api version request is defined to be the
# minimum version of the API supported.
_MIN_API_VERSION = "2.0"
_MAX_API_VERSION = "2.28"
_MAX_API_VERSION = "2.29"
DEFAULT_API_VERSION = _MIN_API_VERSION

View File

@ -133,7 +133,7 @@ user documentation.
2.22
----
Updated migration_start API with 'preserve-metadata', 'writable',
Updated migration_start API with 'preserve_metadata', 'writable',
'nondisruptive' and 'new_share_network_id' parameters, renamed
'force_host_copy' to 'force_host_assisted_migration', removed 'notify'
parameter and removed previous migrate_share API support. Updated
@ -172,3 +172,10 @@ user documentation.
values for the 'access_rules_status' field of shares, they have
been collapsed into the transitional state 'syncing'. Access rule changes
can be made independent of a share's 'access_rules_status'.
2.29
----
Updated migration_start API adding mandatory parameter 'preserve_snapshots'
and changed 'preserve_metadata', 'writable', 'nondisruptive' to be mandatory
as well. All previous migration_start APIs prior to this microversion are now
unsupported.

View File

@ -199,7 +199,7 @@ class ShareController(shares.ShareMixin,
def share_force_delete(self, req, id, body):
return self._force_delete(req, id, body)
@wsgi.Controller.api_version('2.22', experimental=True)
@wsgi.Controller.api_version('2.29', experimental=True)
@wsgi.action("migration_start")
@wsgi.Controller.authorize
def migration_start(self, req, id, body):
@ -215,22 +215,18 @@ class ShareController(shares.ShareMixin,
if not params:
raise exc.HTTPBadRequest(explanation=_("Request is missing body."))
try:
host = params['host']
except KeyError:
raise exc.HTTPBadRequest(explanation=_("Must specify 'host'."))
driver_assisted_params = ['preserve_metadata', 'writable',
'nondisruptive', 'preserve_snapshots']
bool_params = (driver_assisted_params +
['force_host_assisted_migration'])
mandatory_params = driver_assisted_params + ['host']
force_host_assisted_migration = utils.get_bool_from_api_params(
'force_host_assisted_migration', params)
utils.check_params_exist(mandatory_params, params)
bool_param_values = utils.check_params_are_boolean(bool_params, params)
new_share_network = None
new_share_type = None
preserve_metadata = utils.get_bool_from_api_params('preserve_metadata',
params, True)
writable = utils.get_bool_from_api_params('writable', params, True)
nondisruptive = utils.get_bool_from_api_params('nondisruptive', params)
new_share_network_id = params.get('new_share_network_id', None)
if new_share_network_id:
try:
@ -251,15 +247,19 @@ class ShareController(shares.ShareMixin,
raise exc.HTTPBadRequest(explanation=msg)
try:
self.share_api.migration_start(
context, share, host, force_host_assisted_migration,
preserve_metadata, writable, nondisruptive,
return_code = self.share_api.migration_start(
context, share, params['host'],
bool_param_values['force_host_assisted_migration'],
bool_param_values['preserve_metadata'],
bool_param_values['writable'],
bool_param_values['nondisruptive'],
bool_param_values['preserve_snapshots'],
new_share_network=new_share_network,
new_share_type=new_share_type)
except exception.Conflict as e:
raise exc.HTTPConflict(explanation=six.text_type(e))
return webob.Response(status_int=202)
return webob.Response(status_int=return_code)
@wsgi.Controller.api_version('2.22', experimental=True)
@wsgi.action("migration_complete")

View File

@ -638,7 +638,13 @@ class ShareSnapshot(BASE, ManilaBase):
replica_snapshots = list(filter(
lambda x: qualified_replica(x.share_instance), self.instances))
snapshot_instances = replica_snapshots or self.instances
migrating_snapshots = list(filter(
lambda x: x.share_instance['status'] ==
constants.STATUS_MIGRATING, self.instances))
snapshot_instances = (replica_snapshots or migrating_snapshots
or self.instances)
result = snapshot_instances[0]
return result
@ -665,7 +671,8 @@ class ShareSnapshot(BASE, ManilaBase):
return self.status
order = (constants.STATUS_DELETING, constants.STATUS_CREATING,
constants.STATUS_ERROR, constants.STATUS_AVAILABLE)
constants.STATUS_ERROR, constants.STATUS_MIGRATING,
constants.STATUS_AVAILABLE)
other_statuses = [x['status'] for x in self.instances if
x['status'] not in order]
order = (order + tuple(other_statuses))

View File

@ -58,7 +58,7 @@ MAPPING = {
class SchedulerManager(manager.Manager):
"""Chooses a host to create shares."""
RPC_API_VERSION = '1.6'
RPC_API_VERSION = '1.7'
def __init__(self, scheduler_driver=None, service_name=None,
*args, **kwargs):
@ -147,8 +147,9 @@ class SchedulerManager(manager.Manager):
def migrate_share_to_host(
self, context, share_id, host, force_host_assisted_migration,
preserve_metadata, writable, nondisruptive, new_share_network_id,
new_share_type_id, request_spec, filter_properties=None):
preserve_metadata, writable, nondisruptive, preserve_snapshots,
new_share_network_id, new_share_type_id, request_spec,
filter_properties=None):
"""Ensure that the host exists and can accept the share."""
share_ref = db.share_get(context, share_id)
@ -179,7 +180,8 @@ class SchedulerManager(manager.Manager):
share_rpcapi.ShareAPI().migration_start(
context, share_ref, tgt_host.host,
force_host_assisted_migration, preserve_metadata, writable,
nondisruptive, new_share_network_id, new_share_type_id)
nondisruptive, preserve_snapshots, new_share_network_id,
new_share_type_id)
except Exception as ex:
with excutils.save_and_reraise_exception():
_migrate_share_set_error(self, context, ex, request_spec)

View File

@ -38,15 +38,16 @@ class SchedulerAPI(object):
1.4 - Add migrate_share_to_host method
1.5 - Add create_share_replica
1.6 - Add manage_share
1.7 - Updated migrate_share_to_host method with new parameters
"""
RPC_API_VERSION = '1.6'
RPC_API_VERSION = '1.7'
def __init__(self):
super(SchedulerAPI, self).__init__()
target = messaging.Target(topic=CONF.scheduler_topic,
version=self.RPC_API_VERSION)
self.client = rpc.get_client(target, version_cap='1.6')
self.client = rpc.get_client(target, version_cap='1.7')
def create_share_instance(self, context, request_spec=None,
filter_properties=None):
@ -83,10 +84,11 @@ class SchedulerAPI(object):
def migrate_share_to_host(
self, context, share_id, host, force_host_assisted_migration,
preserve_metadata, writable, nondisruptive, new_share_network_id,
new_share_type_id, request_spec=None, filter_properties=None):
preserve_metadata, writable, nondisruptive, preserve_snapshots,
new_share_network_id, new_share_type_id, request_spec=None,
filter_properties=None):
call_context = self.client.prepare(version='1.4')
call_context = self.client.prepare(version='1.7')
request_spec_p = jsonutils.to_primitive(request_spec)
return call_context.cast(
context, 'migrate_share_to_host',
@ -96,6 +98,7 @@ class SchedulerAPI(object):
preserve_metadata=preserve_metadata,
writable=writable,
nondisruptive=nondisruptive,
preserve_snapshots=preserve_snapshots,
new_share_network_id=new_share_network_id,
new_share_type_id=new_share_type_id,
request_spec=request_spec_p,

View File

@ -226,7 +226,7 @@ class API(base.Base):
'is_public': is_public,
'consistency_group_id': consistency_group_id,
}
options.update(self._get_share_attributes_from_share_type(share_type))
options.update(self.get_share_attributes_from_share_type(share_type))
if cgsnapshot_member:
options['source_cgsnapshot_member_id'] = cgsnapshot_member['id']
@ -259,7 +259,7 @@ class API(base.Base):
return share
def _get_share_attributes_from_share_type(self, share_type):
def get_share_attributes_from_share_type(self, share_type):
"""Determine share attributes from the share type.
The share type can change any time after shares of that type are
@ -579,7 +579,7 @@ class API(base.Base):
'scheduled_at': timeutils.utcnow(),
})
share_data.update(
self._get_share_attributes_from_share_type(share_type))
self.get_share_attributes_from_share_type(share_type))
LOG.debug("Manage: Found shares %s.", len(shares))
@ -1049,10 +1049,20 @@ class API(base.Base):
def migration_start(
self, context, share, dest_host, force_host_assisted_migration,
preserve_metadata=True, writable=True, nondisruptive=False,
preserve_metadata, writable, nondisruptive, preserve_snapshots,
new_share_network=None, new_share_type=None):
"""Migrates share to a new host."""
if force_host_assisted_migration and (
preserve_metadata or writable or nondisruptive or
preserve_snapshots):
msg = _('Invalid parameter combination. Cannot set parameters '
'"nondisruptive", "writable", "preserve_snapshots" or '
'"preserve_metadata" to True when enabling the '
'"force_host_assisted_migration" option.')
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
share_instance = share.instance
# NOTE(gouthamr): Ensure share does not have replicas.
@ -1071,30 +1081,32 @@ class API(base.Base):
'instance_status': share_instance['status']}
raise exception.InvalidShare(reason=msg)
# Access rules status must not be error
if share_instance['access_rules_status'] == constants.STATUS_ERROR:
msg = _('Share instance %(instance_id)s access rules status must '
'not be in %(error)s when attempting to start a '
'migration.') % {
'instance_id': share_instance['id'],
'error': constants.STATUS_ERROR}
raise exception.InvalidShare(reason=msg)
self._check_is_share_busy(share)
# Make sure the destination host is different than the current one
if dest_host == share_instance['host']:
msg = _('Destination host %(dest_host)s must be different '
'than the current host %(src_host)s.') % {
'dest_host': dest_host,
'src_host': share_instance['host']}
raise exception.InvalidHost(reason=msg)
# We only handle shares without snapshots for now
snaps = self.db.share_snapshot_get_all_for_share(context, share['id'])
if force_host_assisted_migration:
# We only handle shares without snapshots for
# host-assisted migration
snaps = self.db.share_snapshot_get_all_for_share(context,
share['id'])
if snaps:
msg = _("Share %s must not have snapshots.") % share['id']
raise exception.InvalidShare(reason=msg)
msg = _("Share %s must not have snapshots when using "
"host-assisted migration.") % share['id']
raise exception.Conflict(err=msg)
dest_host_host = share_utils.extract_host(dest_host)
# Make sure the host is in the list of available hosts
utils.validate_service_host(context, dest_host_host)
service = self.db.service_get_by_args(
context, dest_host_host, 'manila-share')
if new_share_type:
share_type = new_share_type
new_share_type_id = new_share_type['id']
@ -1132,6 +1144,30 @@ class API(base.Base):
new_share_network_id = None
# Make sure the destination is different than the source
if (new_share_network_id == share_instance['share_network_id'] and
new_share_type_id == share_instance['share_type_id'] and
dest_host == share_instance['host']):
msg = _LI("Destination host (%(dest_host)s), share network "
"(%(dest_sn)s) or share type (%(dest_st)s) are the same "
"as the current host's '%(src_host)s', '%(src_sn)s' and "
"'%(src_st)s' respectively. Nothing to be done.") % {
'dest_host': dest_host,
'dest_sn': new_share_network_id,
'dest_st': new_share_type_id,
'src_host': share_instance['host'],
'src_sn': share_instance['share_network_id'],
'src_st': share_instance['share_type_id'],
}
LOG.info(msg)
self.db.share_update(
context, share['id'],
{'task_state': constants.TASK_STATE_MIGRATION_SUCCESS})
return 200
service = self.db.service_get_by_args(
context, dest_host_host, 'manila-share')
request_spec = self._get_request_spec_dict(
share,
share_type,
@ -1147,8 +1183,10 @@ class API(base.Base):
self.scheduler_rpcapi.migrate_share_to_host(
context, share['id'], dest_host, force_host_assisted_migration,
preserve_metadata, writable, nondisruptive, new_share_network_id,
new_share_type_id, request_spec)
preserve_metadata, writable, nondisruptive, preserve_snapshots,
new_share_network_id, new_share_type_id, request_spec)
return 202
def migration_complete(self, context, share):

View File

@ -355,18 +355,22 @@ class ShareDriver(object):
'writable': True,
'preserve_metadata': True,
'nondisruptive': True,
'preserve_snapshots': True,
}
"""
return {
'compatible': False,
'writable': False,
'preserve_metadata': False,
'nondisruptive': False,
'preserve_snapshots': False,
}
def migration_start(
self, context, source_share, destination_share,
share_server=None, destination_share_server=None):
source_snapshots, snapshot_mappings, share_server=None,
destination_share_server=None):
"""Starts migration of a given share to another host.
.. note::
@ -382,6 +386,9 @@ class ShareDriver(object):
:param source_share: Reference to the original share model.
:param destination_share: Reference to the share model to be used by
migrated share.
:param source_snapshots: List of snapshots owned by the source share.
:param snapshot_mappings: Mapping of source snapshot IDs to
destination snapshot models.
:param share_server: Share server model or None.
:param destination_share_server: Destination Share server model or
None.
@ -389,8 +396,9 @@ class ShareDriver(object):
raise NotImplementedError()
def migration_continue(
self, context, source_share, destination_share,
share_server=None, destination_share_server=None):
self, context, source_share, destination_share, source_snapshots,
snapshot_mappings, share_server=None,
destination_share_server=None):
"""Continues migration of a given share to another host.
.. note::
@ -404,6 +412,9 @@ class ShareDriver(object):
:param source_share: Reference to the original share model.
:param destination_share: Reference to the share model to be used by
migrated share.
:param source_snapshots: List of snapshots owned by the source share.
:param snapshot_mappings: Mapping of source snapshot IDs to
destination snapshot models.
:param share_server: Share server model or None.
:param destination_share_server: Destination Share server model or
None.
@ -412,8 +423,9 @@ class ShareDriver(object):
raise NotImplementedError()
def migration_complete(
self, context, source_share, destination_share,
share_server=None, destination_share_server=None):
self, context, source_share, destination_share, source_snapshots,
snapshot_mappings, share_server=None,
destination_share_server=None):
"""Completes migration of a given share to another host.
.. note::
@ -428,16 +440,54 @@ class ShareDriver(object):
:param source_share: Reference to the original share model.
:param destination_share: Reference to the share model to be used by
migrated share.
:param source_snapshots: List of snapshots owned by the source share.
:param snapshot_mappings: Mapping of source snapshot IDs to
destination snapshot models.
:param share_server: Share server model or None.
:param destination_share_server: Destination Share server model or
None.
:return: List of export locations to update the share with.
:return: If the migration changes the export locations or snapshot
provider locations, this method should return a dictionary with
the relevant info. In such case, a dictionary containing a list of
export locations and a list of model updates for each snapshot
indexed by their IDs.
Example::
{
'export_locations':
[
{
'path': '1.2.3.4:/foo',
'metadata': {},
'is_admin_only': False
},
{
'path': '5.6.7.8:/foo',
'metadata': {},
'is_admin_only': True
},
],
'snapshot_updates':
{
'bc4e3b28-0832-4168-b688-67fdc3e9d408':
{
'provider_location': '/snapshots/foo/bar_1'
},
'2e62b7ea-4e30-445f-bc05-fd523ca62941':
{
'provider_location': '/snapshots/foo/bar_2'
},
},
}
"""
raise NotImplementedError()
def migration_cancel(
self, context, source_share, destination_share,
share_server=None, destination_share_server=None):
self, context, source_share, destination_share, source_snapshots,
snapshot_mappings, share_server=None,
destination_share_server=None):
"""Cancels migration of a given share to another host.
.. note::
@ -450,6 +500,9 @@ class ShareDriver(object):
:param source_share: Reference to the original share model.
:param destination_share: Reference to the share model to be used by
migrated share.
:param source_snapshots: List of snapshots owned by the source share.
:param snapshot_mappings: Mapping of source snapshot IDs to
destination snapshot models.
:param share_server: Share server model or None.
:param destination_share_server: Destination Share server model or
None.
@ -457,8 +510,9 @@ class ShareDriver(object):
raise NotImplementedError()
def migration_get_progress(
self, context, source_share, destination_share,
share_server=None, destination_share_server=None):
self, context, source_share, destination_share, source_snapshots,
snapshot_mappings, share_server=None,
destination_share_server=None):
"""Obtains progress of migration of a given share to another host.
.. note::
@ -466,10 +520,14 @@ class ShareDriver(object):
If possible, driver can implement a way to return migration progress
information.
:param context: The 'context.RequestContext' object for the request.
:param source_share: Reference to the original share model.
:param destination_share: Reference to the share model to be used by
migrated share.
:param source_snapshots: List of snapshots owned by the source share.
:param snapshot_mappings: Mapping of source snapshot IDs to
destination snapshot models.
:param share_server: Share server model or None.
:param destination_share_server: Destination Share server model or
None.

View File

@ -1372,8 +1372,9 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
@ensure_share_server_not_provided
def migration_start(
self, context, source_share, destination_share,
share_server=None, destination_share_server=None):
self, context, source_share, destination_share, source_snapshots,
snapshot_mappings, share_server=None,
destination_share_server=None):
"""Is called to start share migration."""
src_dataset_name = self.private_storage.get(
@ -1431,8 +1432,9 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
@ensure_share_server_not_provided
def migration_continue(
self, context, source_share, destination_share,
share_server=None, destination_share_server=None):
self, context, source_share, destination_share, source_snapshots,
snapshot_mappings, share_server=None,
destination_share_server=None):
"""Is called in source share's backend to continue migration."""
snapshot_tag = self.private_storage.get(
@ -1456,8 +1458,9 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
@ensure_share_server_not_provided
def migration_complete(
self, context, source_share, destination_share,
share_server=None, destination_share_server=None):
self, context, source_share, destination_share, source_snapshots,
snapshot_mappings, share_server=None,
destination_share_server=None):
"""Is called to perform 2nd phase of driver migration of a given share.
"""
@ -1490,12 +1493,13 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
# Destroy src share and temporary migration snapshot on src (this) host
self.delete_share(context, source_share)
return export_locations
return {'export_locations': export_locations}
@ensure_share_server_not_provided
def migration_cancel(
self, context, source_share, destination_share,
share_server=None, destination_share_server=None):
self, context, source_share, destination_share, source_snapshots,
snapshot_mappings, share_server=None,
destination_share_server=None):
"""Is called to cancel driver migration."""
src_dataset_name = self.private_storage.get(

View File

@ -189,7 +189,7 @@ def add_hooks(f):
class ShareManager(manager.SchedulerDependentManager):
"""Manages NAS storages."""
RPC_API_VERSION = '1.14'
RPC_API_VERSION = '1.15'
def __init__(self, share_driver=None, service_name=None, *args, **kwargs):
"""Load the driver from args, or from flags."""
@ -293,11 +293,6 @@ class ShareManager(manager.SchedulerDependentManager):
for share_instance in share_instances:
share_ref = self.db.share_get(ctxt, share_instance['share_id'])
if (share_ref['task_state'] == (
constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS) and
share_instance['status'] == constants.STATUS_MIGRATING):
continue
if share_ref.is_busy:
LOG.info(
_LI("Share instance %(id)s: skipping export, "
@ -673,8 +668,8 @@ class ShareManager(manager.SchedulerDependentManager):
def _migration_start_driver(
self, context, share_ref, src_share_instance, dest_host, writable,
preserve_metadata, nondisruptive, new_share_network_id, new_az_id,
new_share_type_id):
preserve_metadata, nondisruptive, preserve_snapshots,
new_share_network_id, new_az_id, new_share_type_id):
share_server = self._get_share_server(context, src_share_instance)
@ -744,6 +739,55 @@ class ShareManager(manager.SchedulerDependentManager):
"remaining writable.") % share_ref['id']
raise exception.ShareMigrationFailed(reason=msg)
if (not compatibility.get('preserve_snapshots') and
preserve_snapshots):
msg = _("Driver cannot perform migration of share %s while "
"preserving snapshots.") % share_ref['id']
raise exception.ShareMigrationFailed(reason=msg)
snapshot_mapping = {}
src_snap_instances = []
src_snapshots = self.db.share_snapshot_get_all_for_share(
context, share_ref['id'])
if compatibility.get('preserve_snapshots'):
# Make sure all snapshots are 'available'
if any(x['status'] != constants.STATUS_AVAILABLE
for x in src_snapshots):
msg = _(
"All snapshots must have '%(status)s' status to be "
"migrated by the driver along with share "
"%(share)s.") % {
'share': share_ref['id'],
'status': constants.STATUS_AVAILABLE
}
raise exception.ShareMigrationFailed(reason=msg)
src_snap_instances = [x.instance for x in src_snapshots]
dest_snap_instance_data = {
'status': constants.STATUS_MIGRATING_TO,
'progress': '0%',
'share_instance_id': dest_share_instance['id'],
}
for snap_instance in src_snap_instances:
snapshot_mapping[snap_instance['id']] = (
self.db.share_snapshot_instance_create(
context, snap_instance['snapshot_id'],
dest_snap_instance_data))
self.db.share_snapshot_instance_update(
context, snap_instance['id'],
{'status': constants.STATUS_MIGRATING})
else:
if src_snapshots:
msg = _("Driver does not support preserving snapshots, "
"driver-assisted migration cannot proceed while "
"share %s has snapshots.") % share_ref['id']
raise exception.ShareMigrationFailed(reason=msg)
if not compatibility.get('writable'):
self._cast_access_rules_to_readonly(
context, src_share_instance, share_server)
@ -758,7 +802,8 @@ class ShareManager(manager.SchedulerDependentManager):
self.driver.migration_start(
context, src_share_instance, dest_share_instance,
share_server, dest_share_server)
src_snap_instances, snapshot_mapping, share_server,
dest_share_server)
self.db.share_update(
context, share_ref['id'],
@ -770,6 +815,8 @@ class ShareManager(manager.SchedulerDependentManager):
# database. It is assumed that driver cleans up leftovers in
# backend when migration fails.
self._migration_delete_instance(context, dest_share_instance['id'])
self._restore_migrating_snapshots_status(
context, src_share_instance['id'])
# NOTE(ganso): For now source share instance should remain in
# migrating status for host-assisted migration.
@ -810,6 +857,7 @@ class ShareManager(manager.SchedulerDependentManager):
instances = self.db.share_instances_get_all_by_host(context, self.host)
for instance in instances:
if instance['status'] != constants.STATUS_MIGRATING:
continue
@ -834,10 +882,15 @@ class ShareManager(manager.SchedulerDependentManager):
dest_share_server = self._get_share_server(
context, dest_share_instance)
src_snap_instances, snapshot_mappings = (
self._get_migrating_snapshots(context, src_share_instance,
dest_share_instance))
try:
finished = self.driver.migration_continue(
context, src_share_instance, dest_share_instance,
src_snap_instances, snapshot_mappings,
src_share_server, dest_share_server)
if finished:
@ -867,7 +920,8 @@ class ShareManager(manager.SchedulerDependentManager):
# up leftovers in backend when migration fails.
self._migration_delete_instance(
context, dest_share_instance['id'])
self._restore_migrating_snapshots_status(
context, src_share_instance['id'])
self.db.share_instance_update(
context, src_share_instance['id'],
{'status': constants.STATUS_AVAILABLE})
@ -878,10 +932,54 @@ class ShareManager(manager.SchedulerDependentManager):
"failed.") % share['id']
LOG.exception(msg)
def _get_migrating_snapshots(
self, context, src_share_instance, dest_share_instance):
dest_snap_instances = (
self.db.share_snapshot_instance_get_all_with_filters(
context,
{'share_instance_ids': [dest_share_instance['id']]}))
snapshot_mappings = {}
src_snap_instances = []
if len(dest_snap_instances) > 0:
src_snap_instances = (
self.db.share_snapshot_instance_get_all_with_filters(
context,
{'share_instance_ids': [src_share_instance['id']]}))
for snap in src_snap_instances:
dest_snap_instance = next(
x for x in dest_snap_instances
if snap['snapshot_id'] == x['snapshot_id'])
snapshot_mappings[snap['id']] = dest_snap_instance
return src_snap_instances, snapshot_mappings
def _restore_migrating_snapshots_status(
self, context, src_share_instance_id,
errored_dest_instance_id=None):
filters = {'share_instance_ids': [src_share_instance_id]}
status = constants.STATUS_AVAILABLE
if errored_dest_instance_id:
filters['share_instance_ids'].append(errored_dest_instance_id)
status = constants.STATUS_ERROR
snap_instances = (
self.db.share_snapshot_instance_get_all_with_filters(
context, filters)
)
for instance in snap_instances:
if instance['status'] == constants.STATUS_MIGRATING:
self.db.share_snapshot_instance_update(
context, instance['id'], {'status': status})
elif (errored_dest_instance_id and
instance['status'] == constants.STATUS_MIGRATING_TO):
self.db.share_snapshot_instance_update(
context, instance['id'], {'status': status})
@utils.require_driver_initialized
def migration_start(
self, context, share_id, dest_host, force_host_assisted_migration,
preserve_metadata=True, writable=True, nondisruptive=False,
preserve_metadata, writable, nondisruptive, preserve_snapshots,
new_share_network_id=None, new_share_type_id=None):
"""Migrates a share from current host to another host."""
LOG.debug("Entered migration_start method for share %s.", share_id)
@ -904,8 +1002,8 @@ class ShareManager(manager.SchedulerDependentManager):
try:
success = self._migration_start_driver(
context, share_ref, share_instance, dest_host, writable,
preserve_metadata, nondisruptive, new_share_network_id,
new_az_id, new_share_type_id)
preserve_metadata, nondisruptive, preserve_snapshots,
new_share_network_id, new_az_id, new_share_type_id)
except Exception as e:
if not isinstance(e, NotImplementedError):
@ -916,15 +1014,24 @@ class ShareManager(manager.SchedulerDependentManager):
try:
if not success:
if writable or preserve_metadata or nondisruptive:
if (writable or preserve_metadata or nondisruptive or
preserve_snapshots):
msg = _("Migration for share %s could not be "
"performed because host-assisted migration is not "
"allowed when share must remain writable, "
"preserve all file metadata or be performed "
"non-disruptively.") % share_id
"preserve snapshots and/or file metadata or be "
"performed nondisruptively.") % share_id
raise exception.ShareMigrationFailed(reason=msg)
# We only handle shares without snapshots for now
snaps = self.db.share_snapshot_get_all_for_share(
context, share_id)
if snaps:
msg = _("Share %s must not have snapshots in order to "
"perform a host-assisted migration.") % share_id
raise exception.ShareMigrationFailed(reason=msg)
LOG.debug("Starting host-assisted migration "
"for share %s.", share_id)
@ -1018,13 +1125,27 @@ class ShareManager(manager.SchedulerDependentManager):
context, share_ref['id'],
{'task_state': constants.TASK_STATE_MIGRATION_COMPLETING})
export_locations = self.driver.migration_complete(
context, src_share_instance, dest_share_instance,
share_server, dest_share_server)
src_snap_instances, snapshot_mappings = (
self._get_migrating_snapshots(context, src_share_instance,
dest_share_instance))
if export_locations:
data_updates = self.driver.migration_complete(
context, src_share_instance, dest_share_instance,
src_snap_instances, snapshot_mappings, share_server,
dest_share_server) or {}
if data_updates.get('export_locations'):
self.db.share_export_locations_update(
context, dest_share_instance['id'], export_locations)
context, dest_share_instance['id'],
data_updates['export_locations'])
snapshot_updates = data_updates.get('snapshot_updates') or {}
for src_snap_ins, dest_snap_ins in snapshot_mappings.items():
model_update = snapshot_updates.get(dest_snap_ins['id']) or {}
model_update['status'] = constants.STATUS_AVAILABLE
model_update['progress'] = '100%'
self.db.share_snapshot_instance_update(
context, dest_snap_ins['id'], model_update)
helper = migration.ShareMigrationHelper(
context, self.db, share_ref, self.access_helper)
@ -1055,6 +1176,12 @@ class ShareManager(manager.SchedulerDependentManager):
self.access_helper.delete_share_instance_access_rules(
context, rules, instance_id)
snap_instances = self.db.share_snapshot_instance_get_all_with_filters(
context, {'share_instance_ids': [instance_id]})
for instance in snap_instances:
self.db.share_snapshot_instance_delete(context, instance['id'])
self.db.share_instance_delete(context, instance_id)
LOG.info(_LI("Share instance %s: deleted successfully."),
instance_id)
@ -1086,9 +1213,20 @@ class ShareManager(manager.SchedulerDependentManager):
msg = _("Driver migration completion failed for"
" share %s.") % share_ref['id']
LOG.exception(msg)
# NOTE(ganso): If driver fails during migration-complete,
# all instances are set to error and it is up to the admin
# to fix the problem to either complete migration
# manually or clean it up. At this moment, data
# preservation at the source backend cannot be
# guaranteed.
self._restore_migrating_snapshots_status(
context, src_share_instance['id'],
errored_dest_instance_id=dest_share_instance['id'])
self.db.share_instance_update(
context, src_instance_id,
{'status': constants.STATUS_AVAILABLE})
{'status': constants.STATUS_ERROR})
self.db.share_instance_update(
context, dest_instance_id,
{'status': constants.STATUS_ERROR})
@ -1113,9 +1251,24 @@ class ShareManager(manager.SchedulerDependentManager):
{'status': constants.STATUS_AVAILABLE})
raise exception.ShareMigrationFailed(reason=msg)
self._update_migrated_share_model(
context, share_ref['id'], dest_share_instance)
LOG.info(_LI("Share Migration for share %s"
" completed successfully."), share_ref['id'])
def _update_migrated_share_model(
self, context, share_id, dest_share_instance):
share_type = share_types.get_share_type(
context, dest_share_instance['share_type_id'])
share_api = api.API()
update = share_api.get_share_attributes_from_share_type(share_type)
self.db.share_update(context, share_id, update)
def _migration_complete_host_assisted(self, context, share_ref,
src_instance_id, dest_instance_id):
@ -1223,11 +1376,19 @@ class ShareManager(manager.SchedulerDependentManager):
helper.cleanup_access_rules(src_share_instance, share_server)
else:
src_snap_instances, snapshot_mappings = (
self._get_migrating_snapshots(context, src_share_instance,
dest_share_instance))
self.driver.migration_cancel(
context, src_share_instance, dest_share_instance,
share_server, dest_share_server)
src_snap_instances, snapshot_mappings, share_server,
dest_share_server)
self._migration_delete_instance(context, dest_share_instance['id'])
self._restore_migrating_snapshots_status(
context, src_share_instance['id'])
self.db.share_update(
context, share_ref['id'],
@ -1268,9 +1429,14 @@ class ShareManager(manager.SchedulerDependentManager):
dest_share_server = self.db.share_server_get(
context, dest_share_instance['share_server_id'])
src_snap_instances, snapshot_mappings = (
self._get_migrating_snapshots(context, src_share_instance,
dest_share_instance))
return self.driver.migration_get_progress(
context, src_share_instance, dest_share_instance,
share_server, dest_share_server)
src_snap_instances, snapshot_mappings, share_server,
dest_share_server)
def _get_share_instance(self, context, share):
if isinstance(share, six.string_types):

View File

@ -67,6 +67,8 @@ class ShareAPI(object):
migration_get_info() to connection_get_info()
1.13 - Introduce share revert to snapshot: revert_to_snapshot()
1.14 - Add update_access() and remove allow_access() and deny_access().
1.15 - Updated migration_start() method with new parameter
"preserve_snapshots"
"""
BASE_RPC_API_VERSION = '1.0'
@ -75,7 +77,7 @@ class ShareAPI(object):
super(ShareAPI, self).__init__()
target = messaging.Target(topic=CONF.share_topic,
version=self.BASE_RPC_API_VERSION)
self.client = rpc.get_client(target, version_cap='1.14')
self.client = rpc.get_client(target, version_cap='1.15')
def create_share_instance(self, context, share_instance, host,
request_spec, filter_properties,
@ -138,10 +140,10 @@ class ShareAPI(object):
def migration_start(self, context, share, dest_host,
force_host_assisted_migration, preserve_metadata,
writable, nondisruptive, new_share_network_id,
new_share_type_id):
writable, nondisruptive, preserve_snapshots,
new_share_network_id, new_share_type_id):
new_host = utils.extract_host(share['instance']['host'])
call_context = self.client.prepare(server=new_host, version='1.12')
call_context = self.client.prepare(server=new_host, version='1.15')
call_context.cast(
context,
'migration_start',
@ -151,6 +153,7 @@ class ShareAPI(object):
preserve_metadata=preserve_metadata,
writable=writable,
nondisruptive=nondisruptive,
preserve_snapshots=preserve_snapshots,
new_share_network_id=new_share_network_id,
new_share_type_id=new_share_type_id)

View File

@ -550,7 +550,7 @@ class ShareAPITest(test.TestCase):
share_network = db_utils.create_share_network()
share_type = {'share_type_id': 'fake_type_id'}
req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'],
use_admin_context=True, version='2.22')
use_admin_context=True, version='2.29')
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.api_version_request.experimental = True
@ -564,13 +564,18 @@ class ShareAPITest(test.TestCase):
body = {
'migration_start': {
'host': 'fake_host',
'preserve_metadata': True,
'preserve_snapshots': True,
'writable': True,
'nondisruptive': True,
'new_share_network_id': 'fake_net_id',
'new_share_type_id': 'fake_type_id',
}
}
method = 'migration_start'
self.mock_object(share_api.API, 'migration_start')
self.mock_object(share_api.API, 'migration_start',
mock.Mock(return_value=202))
self.mock_object(share_api.API, 'get', mock.Mock(return_value=share))
response = getattr(self.controller, method)(req, share['id'], body)
@ -579,22 +584,32 @@ class ShareAPITest(test.TestCase):
share_api.API.get.assert_called_once_with(context, share['id'])
share_api.API.migration_start.assert_called_once_with(
context, share, 'fake_host', False, True, True, False,
context, share, 'fake_host', False, True, True, True, True,
new_share_network=share_network, new_share_type=share_type)
db.share_network_get.assert_called_once_with(
context, 'fake_net_id')
db.share_type_get.assert_called_once_with(
context, 'fake_type_id')
def test_migration_start_has_replicas(self):
def test_migration_start_conflict(self):
share = db_utils.create_share()
req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'],
use_admin_context=True)
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.api_version_request = api_version.APIVersionRequest('2.22')
req.api_version_request = api_version.APIVersionRequest('2.29')
req.api_version_request.experimental = True
body = {'migration_start': {'host': 'fake_host'}}
body = {
'migration_start': {
'host': 'fake_host',
'preserve_metadata': True,
'preserve_snapshots': True,
'writable': True,
'nondisruptive': True,
}
}
self.mock_object(share_api.API, 'migration_start',
mock.Mock(side_effect=exception.Conflict(err='err')))
@ -602,9 +617,78 @@ class ShareAPITest(test.TestCase):
self.controller.migration_start,
req, share['id'], body)
@ddt.data('nondisruptive', 'writable', 'preserve_metadata',
'preserve_snapshots', 'host', 'body')
def test_migration_start_missing_mandatory(self, param):
share = db_utils.create_share()
req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'],
use_admin_context=True,
version='2.29')
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.api_version_request.experimental = True
body = {
'migration_start': {
'host': 'fake_host',
'preserve_metadata': True,
'preserve_snapshots': True,
'writable': True,
'nondisruptive': True,
}
}
if param == 'body':
body.pop('migration_start')
else:
body['migration_start'].pop(param)
method = 'migration_start'
self.mock_object(share_api.API, 'migration_start')
self.mock_object(share_api.API, 'get',
mock.Mock(return_value=share))
self.assertRaises(webob.exc.HTTPBadRequest,
getattr(self.controller, method),
req, 'fake_id', body)
@ddt.data('nondisruptive', 'writable', 'preserve_metadata',
'preserve_snapshots', 'force_host_assisted_migration')
def test_migration_start_non_boolean(self, param):
share = db_utils.create_share()
req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'],
use_admin_context=True,
version='2.29')
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.api_version_request.experimental = True
body = {
'migration_start': {
'host': 'fake_host',
'preserve_metadata': True,
'preserve_snapshots': True,
'writable': True,
'nondisruptive': True,
}
}
body['migration_start'][param] = None
method = 'migration_start'
self.mock_object(share_api.API, 'migration_start')
self.mock_object(share_api.API, 'get',
mock.Mock(return_value=share))
self.assertRaises(webob.exc.HTTPBadRequest,
getattr(self.controller, method),
req, 'fake_id', body)
def test_migration_start_no_share_id(self):
req = fakes.HTTPRequest.blank('/shares/%s/action' % 'fake_id',
use_admin_context=True, version='2.22')
use_admin_context=True, version='2.29')
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.api_version_request.experimental = True
@ -618,31 +702,22 @@ class ShareAPITest(test.TestCase):
getattr(self.controller, method),
req, 'fake_id', body)
def test_migration_start_no_host(self):
share = db_utils.create_share()
req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'],
use_admin_context=True, version='2.22')
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.api_version_request.experimental = True
body = {'migration_start': {}}
method = 'migration_start'
self.assertRaises(webob.exc.HTTPBadRequest,
getattr(self.controller, method),
req, share['id'], body)
def test_migration_start_new_share_network_not_found(self):
share = db_utils.create_share()
req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'],
use_admin_context=True, version='2.22')
use_admin_context=True, version='2.29')
context = req.environ['manila.context']
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.api_version_request.experimental = True
body = {'migration_start': {'host': 'fake_host',
body = {
'migration_start': {
'host': 'fake_host',
'preserve_metadata': True,
'preserve_snapshots': True,
'writable': True,
'nondisruptive': True,
'new_share_network_id': 'nonexistent'}}
self.mock_object(db, 'share_network_get',
@ -655,13 +730,19 @@ class ShareAPITest(test.TestCase):
def test_migration_start_new_share_type_not_found(self):
share = db_utils.create_share()
req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'],
use_admin_context=True, version='2.22')
use_admin_context=True, version='2.29')
context = req.environ['manila.context']
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.api_version_request.experimental = True
body = {'migration_start': {'host': 'fake_host',
body = {
'migration_start': {
'host': 'fake_host',
'preserve_metadata': True,
'preserve_snapshots': True,
'writable': True,
'nondisruptive': True,
'new_share_type_id': 'nonexistent'}}
self.mock_object(db, 'share_type_get',
@ -674,7 +755,7 @@ class ShareAPITest(test.TestCase):
def test_migration_start_invalid_force_host_assisted_migration(self):
share = db_utils.create_share()
req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'],
use_admin_context=True, version='2.22')
use_admin_context=True, version='2.29')
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.api_version_request.experimental = True
@ -692,7 +773,7 @@ class ShareAPITest(test.TestCase):
self, parameter):
share = db_utils.create_share()
req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'],
use_admin_context=True, version='2.22')
use_admin_context=True, version='2.29')
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.api_version_request.experimental = True

View File

@ -234,13 +234,13 @@ class SchedulerManagerTestCase(test.TestCase):
self.assertRaises(
TypeError, self.manager.migrate_share_to_host,
self.context, share['id'], 'fake@backend#pool', False, True, True,
False, 'fake_net_id', 'fake_type_id', {}, None)
False, True, 'fake_net_id', 'fake_type_id', {}, None)
db.share_get.assert_called_once_with(self.context, share['id'])
base.Scheduler.host_passes_filters.assert_called_once_with(
self.context, 'fake@backend#pool', {}, None)
share_rpcapi.ShareAPI.migration_start.assert_called_once_with(
self.context, share, host.host, False, True, True, False,
self.context, share, host.host, False, True, True, False, True,
'fake_net_id', 'fake_type_id')
@ddt.data(exception.NoValidHost(reason='fake'), TypeError)
@ -262,7 +262,7 @@ class SchedulerManagerTestCase(test.TestCase):
self.assertRaises(
capture, self.manager.migrate_share_to_host,
self.context, share['id'], host, False, True, True, False,
self.context, share['id'], host, False, True, True, False, True,
'fake_net_id', 'fake_type_id', request_spec, None)
base.Scheduler.host_passes_filters.assert_called_once_with(

View File

@ -110,11 +110,12 @@ class SchedulerRpcAPITestCase(test.TestCase):
preserve_metadata=True,
writable=True,
nondisruptive=False,
preserve_snapshots=True,
new_share_network_id='fake_net_id',
new_share_type_id='fake_type_id',
request_spec='fake_request_spec',
filter_properties='filter_properties',
version='1.4')
version='1.7')
def test_create_share_replica(self):
self._test_scheduler_api('create_share_replica',

View File

@ -38,7 +38,9 @@ from oslo_log import log
from manila.common import constants
from manila import exception
from manila.i18n import _
from manila.share import configuration
from manila.share import driver
from manila.share.manager import share_manager_opts # noqa
from manila.share import utils as share_utils
LOG = log.getLogger(__name__)
@ -104,6 +106,27 @@ def slow_me_down(f):
return wrapped_func
def get_backend_configuration(backend_name):
config_stanzas = CONF.list_all_sections()
if backend_name not in config_stanzas:
msg = _("Could not find backend stanza %(backend_name)s in "
"configuration which is required for share replication and "
"migration. Available stanzas are %(stanzas)s")
params = {
"stanzas": config_stanzas,
"backend_name": backend_name,
}
raise exception.BadConfigurationException(reason=msg % params)
config = configuration.Configuration(
driver.share_opts, config_group=backend_name)
config.append_config_values(dummy_opts)
config.append_config_values(share_manager_opts)
config.append_config_values(driver.ssh_opts)
return config
class DummyDriver(driver.ShareDriver):
"""Dummy share driver that implements all share driver interfaces."""
@ -477,17 +500,23 @@ class DummyDriver(driver.ShareDriver):
self, context, source_share, destination_share,
share_server=None, destination_share_server=None):
"""Is called to test compatibility with destination backend."""
backend_name = share_utils.extract_host(
destination_share['host'], level='backend_name')
config = get_backend_configuration(backend_name)
compatible = 'Dummy' in config.share_driver
return {
'compatible': True,
'writable': True,
'preserve_metadata': True,
'nondisruptive': True,
'compatible': compatible,
'writable': compatible,
'preserve_metadata': compatible,
'nondisruptive': False,
'preserve_snapshots': compatible,
}
@slow_me_down
def migration_start(
self, context, source_share, destination_share,
share_server=None, destination_share_server=None):
self, context, source_share, destination_share, source_snapshots,
snapshot_mappings, share_server=None,
destination_share_server=None):
"""Is called to perform 1st phase of driver migration of a given share.
"""
@ -498,8 +527,9 @@ class DummyDriver(driver.ShareDriver):
@slow_me_down
def migration_continue(
self, context, source_share, destination_share,
share_server=None, destination_share_server=None):
self, context, source_share, destination_share, source_snapshots,
snapshot_mappings, share_server=None,
destination_share_server=None):
if source_share["id"] not in self.migration_progress:
self.migration_progress[source_share["id"]] = 0
@ -515,34 +545,45 @@ class DummyDriver(driver.ShareDriver):
@slow_me_down
def migration_complete(
self, context, source_share, destination_share,
share_server=None, destination_share_server=None):
self, context, source_share, destination_share, source_snapshots,
snapshot_mappings, share_server=None,
destination_share_server=None):
"""Is called to perform 2nd phase of driver migration of a given share.
"""
return self._do_migration(source_share, share_server)
snapshot_updates = {}
for src_snap_ins, dest_snap_ins in snapshot_mappings.items():
snapshot_updates[dest_snap_ins['id']] = self._create_snapshot(
dest_snap_ins)
return {
'snapshot_updates': snapshot_updates,
'export_locations': self._do_migration(
source_share, destination_share, share_server)
}
def _do_migration(self, share_ref, share_server):
share_name = self._get_share_name(share_ref)
def _do_migration(self, source_share_ref, dest_share_ref, share_server):
share_name = self._get_share_name(dest_share_ref)
mountpoint = "/path/to/fake/share/%s" % share_name
self.private_storage.delete(source_share_ref["id"])
self.private_storage.update(
share_ref["id"], {
dest_share_ref["id"], {
"fake_provider_share_name": share_name,
"fake_provider_location": mountpoint,
}
)
LOG.debug(
"Migration of dummy share with ID '%s' has been completed." %
share_ref["id"])
self.migration_progress.pop(share_ref["id"], None)
source_share_ref["id"])
self.migration_progress.pop(source_share_ref["id"], None)
return self._generate_export_locations(
mountpoint, share_server=share_server)
@slow_me_down
def migration_cancel(
self, context, source_share, destination_share,
share_server=None, destination_share_server=None):
self, context, source_share, destination_share, source_snapshots,
snapshot_mappings, share_server=None,
destination_share_server=None):
"""Is called to cancel driver migration."""
LOG.debug(
"Migration of dummy share with ID '%s' has been canceled." %
@ -551,8 +592,9 @@ class DummyDriver(driver.ShareDriver):
@slow_me_down
def migration_get_progress(
self, context, source_share, destination_share,
share_server=None, destination_share_server=None):
self, context, source_share, destination_share, source_snapshots,
snapshot_mappings, share_server=None,
destination_share_server=None):
"""Is called to get migration progress."""
# Simulate migration progress.
if source_share["id"] not in self.migration_progress:

View File

@ -2229,7 +2229,7 @@ class ZFSonLinuxShareDriverTestCase(test.TestCase):
with mock.patch("six.moves.builtins.open",
mock.mock_open(read_data="data")) as mock_file:
self.driver.migration_start(
self._context, src_share, dst_share)
self._context, src_share, dst_share, None, None)
expected_file_content = (
'ssh %(ssh_cmd)s sudo zfs send -vDR %(snap)s | '
@ -2281,7 +2281,7 @@ class ZFSonLinuxShareDriverTestCase(test.TestCase):
mock.Mock(return_value=('fake_out', 'fake_err')))
result = self.driver.migration_continue(
self._context, 'fake_src_share', dst_share)
self._context, 'fake_src_share', dst_share, None, None)
self.assertTrue(result)
mock_executor.assert_called_once_with(dst_share['host'])
@ -2310,7 +2310,7 @@ class ZFSonLinuxShareDriverTestCase(test.TestCase):
mock.Mock(return_value=('foo@%s' % snapshot_tag, 'fake_err')))
result = self.driver.migration_continue(
self._context, 'fake_src_share', dst_share)
self._context, 'fake_src_share', dst_share, None, None)
self.assertIsNone(result)
self.assertFalse(mock_executor.called)
@ -2340,7 +2340,7 @@ class ZFSonLinuxShareDriverTestCase(test.TestCase):
self.assertRaises(
exception.ZFSonLinuxException,
self.driver.migration_continue,
self._context, 'fake_src_share', dst_share,
self._context, 'fake_src_share', dst_share, None, None
)
mock_executor.assert_called_once_with(dst_share['host'])
@ -2379,10 +2379,14 @@ class ZFSonLinuxShareDriverTestCase(test.TestCase):
self.mock_object(self.driver, 'delete_share')
result = self.driver.migration_complete(
self._context, src_share, dst_share)
self._context, src_share, dst_share, None, None)
self.assertEqual(
mock_helper.return_value.create_exports.return_value, result)
expected_result = {
'export_locations': (mock_helper.return_value.
create_exports.return_value)
}
self.assertEqual(expected_result, result)
mock_executor.assert_called_once_with(dst_share['host'])
self.driver.execute.assert_called_once_with(
'sudo', 'zfs', 'destroy', dst_snapshot_name,
@ -2428,7 +2432,8 @@ class ZFSonLinuxShareDriverTestCase(test.TestCase):
mock.Mock(return_value=(ps_output, 'fake_err'))
)
self.driver.migration_cancel(self._context, src_share, dst_share)
self.driver.migration_cancel(
self._context, src_share, dst_share, [], {})
self.driver.execute.assert_has_calls([
mock.call('ps', 'aux'),
@ -2470,7 +2475,8 @@ class ZFSonLinuxShareDriverTestCase(test.TestCase):
mock.Mock(side_effect=exception.ProcessExecutionError),
)
self.driver.migration_cancel(self._context, src_share, dst_share)
self.driver.migration_cancel(
self._context, src_share, dst_share, [], {})
self.driver.execute.assert_has_calls([
mock.call('ps', 'aux'),

View File

@ -754,14 +754,14 @@ class ShareAPITestCase(test.TestCase):
}
}
result = self.api._get_share_attributes_from_share_type(share_type)
result = self.api.get_share_attributes_from_share_type(share_type)
self.assertEqual(share_type['extra_specs'], result)
@ddt.data({}, {'extra_specs': {}}, None)
def test_get_share_attributes_from_share_type_defaults(self, share_type):
result = self.api._get_share_attributes_from_share_type(share_type)
result = self.api.get_share_attributes_from_share_type(share_type)
expected = {
'snapshot_support': False,
@ -776,7 +776,7 @@ class ShareAPITestCase(test.TestCase):
def test_get_share_attributes_from_share_type_invalid(self, share_type):
self.assertRaises(exception.InvalidExtraSpec,
self.api._get_share_attributes_from_share_type,
self.api.get_share_attributes_from_share_type,
share_type)
@ddt.data('dr', 'readable', None)
@ -886,11 +886,11 @@ class ShareAPITestCase(test.TestCase):
share_type['extra_specs']['snapshot_support']),
'create_share_from_snapshot_support': kwargs.get(
'create_share_from_snapshot_support',
share_type['extra_specs']
['create_share_from_snapshot_support']),
share_type['extra_specs'].get(
'create_share_from_snapshot_support')),
'revert_to_snapshot_support': kwargs.get(
'revert_to_snapshot_support',
share_type['extra_specs']['revert_to_snapshot_support']),
share_type['extra_specs'].get('revert_to_snapshot_support')),
'share_proto': kwargs.get('share_proto', share.get('share_proto')),
'share_type_id': share_type['id'],
'is_public': kwargs.get('is_public', share.get('is_public')),
@ -2287,14 +2287,15 @@ class ShareAPITestCase(test.TestCase):
mock.Mock(return_value=service))
if share_type:
self.api.migration_start(self.context, share, host, True, True,
True, True, share_network, fake_type_2)
self.api.migration_start(self.context, share, host, False, True,
True, True, True, share_network,
fake_type_2)
else:
self.api.migration_start(self.context, share, host, True, True,
True, True, share_network, None)
self.api.migration_start(self.context, share, host, False, True,
True, True, True, share_network, None)
self.scheduler_rpcapi.migrate_share_to_host.assert_called_once_with(
self.context, share['id'], host, True, True, True, True,
self.context, share['id'], host, False, True, True, True, True,
share_network_id, fake_type_2['id'], request_spec)
if not share_type:
share_types.get_share_type.assert_called_once_with(
@ -2310,10 +2311,31 @@ class ShareAPITestCase(test.TestCase):
self.context, share.instance['id'],
{'status': constants.STATUS_MIGRATING})
@ddt.data({'force_host_assisted': True, 'writable': True,
'preserve_metadata': False, 'preserve_snapshots': False,
'nondisruptive': False},
{'force_host_assisted': True, 'writable': False,
'preserve_metadata': True, 'preserve_snapshots': False,
'nondisruptive': False},
{'force_host_assisted': True, 'writable': False,
'preserve_metadata': False, 'preserve_snapshots': True,
'nondisruptive': False},
{'force_host_assisted': True, 'writable': False,
'preserve_metadata': False, 'preserve_snapshots': False,
'nondisruptive': True})
@ddt.unpack
def test_migration_start_invalid_host_and_driver_assisted_params(
self, force_host_assisted, writable, preserve_metadata,
preserve_snapshots, nondisruptive):
self.assertRaises(
exception.InvalidInput, self.api.migration_start, self.context,
'some_share', 'some_host', force_host_assisted, preserve_metadata,
writable, preserve_snapshots, nondisruptive)
@ddt.data(True, False)
def test_migration_start_invalid_share_network_type_combo(self, dhss):
host = 'fake2@backend#pool'
service = {'availability_zone_id': 'fake_az_id'}
share_network = None
if not dhss:
share_network = db_utils.create_share_network(id='fake_net_id')
@ -2339,17 +2361,14 @@ class ShareAPITestCase(test.TestCase):
host='fake@backend#pool', share_type_id=fake_type['id'])
self.mock_object(utils, 'validate_service_host')
self.mock_object(db_api, 'service_get_by_args',
mock.Mock(return_value=service))
self.assertRaises(
exception.InvalidInput, self.api.migration_start, self.context,
share, host, True, True, True, True, share_network, fake_type_2)
share, host, False, True, True, True, True, share_network,
fake_type_2)
utils.validate_service_host.assert_called_once_with(
self.context, 'fake2@backend')
db_api.service_get_by_args.assert_called_once_with(
self.context, 'fake2@backend', 'manila-share')
def test_migration_start_status_unavailable(self):
host = 'fake2@backend#pool'
@ -2357,7 +2376,22 @@ class ShareAPITestCase(test.TestCase):
status=constants.STATUS_ERROR)
self.assertRaises(exception.InvalidShare, self.api.migration_start,
self.context, share, host, True)
self.context, share, host, False, True, True, True,
True)
def test_migration_start_access_rules_status_error(self):
host = 'fake2@backend#pool'
instance = db_utils.create_share_instance(
share_id='fake_share_id',
access_rules_status=constants.STATUS_ERROR,
status=constants.STATUS_AVAILABLE)
share = db_utils.create_share(
id='fake_share_id',
instances=[instance])
self.assertRaises(exception.InvalidShare, self.api.migration_start,
self.context, share, host, False, True, True, True,
True)
def test_migration_start_task_state_invalid(self):
host = 'fake2@backend#pool'
@ -2367,17 +2401,51 @@ class ShareAPITestCase(test.TestCase):
self.assertRaises(exception.ShareBusyException,
self.api.migration_start,
self.context, share, host, True)
self.context, share, host, False, True, True, True,
True)
def test_migration_start_with_snapshots(self):
def test_migration_start_host_assisted_with_snapshots(self):
host = 'fake2@backend#pool'
share = db_utils.create_share(
host='fake@backend#pool', status=constants.STATUS_AVAILABLE)
self.mock_object(db_api, 'share_snapshot_get_all_for_share',
mock.Mock(return_value=True))
self.assertRaises(exception.InvalidShare, self.api.migration_start,
self.context, share, host, True)
self.assertRaises(exception.Conflict, self.api.migration_start,
self.context, share, host, True, False, False, False,
False)
def test_migration_start_with_snapshots(self):
host = 'fake2@backend#pool'
fake_type = {
'id': 'fake_type_id',
'extra_specs': {
'snapshot_support': True,
'driver_handles_share_servers': False,
},
}
service = {'availability_zone_id': 'fake_az_id'}
self.mock_object(db_api, 'service_get_by_args',
mock.Mock(return_value=service))
self.mock_object(utils, 'validate_service_host')
self.mock_object(share_types, 'get_share_type',
mock.Mock(return_value=fake_type))
share = db_utils.create_share(
host='fake@backend#pool', status=constants.STATUS_AVAILABLE,
share_type_id=fake_type['id'])
request_spec = self._get_request_spec_dict(
share, fake_type, availability_zone_id='fake_az_id')
self.api.migration_start(self.context, share, host, False, True, True,
True, True)
self.scheduler_rpcapi.migrate_share_to_host.assert_called_once_with(
self.context, share['id'], host, False, True, True, True, True,
None, 'fake_type_id', request_spec)
def test_migration_start_has_replicas(self):
host = 'fake2@backend#pool'
@ -2396,7 +2464,8 @@ class ShareAPITestCase(test.TestCase):
share = db_api.share_get(self.context, share['id'])
self.assertRaises(exception.Conflict, self.api.migration_start,
self.context, share, host, True)
self.context, share, host, False, True, True, True,
True)
self.assertTrue(mock_log.error.called)
self.assertFalse(mock_snapshot_get_call.called)
@ -2410,16 +2479,62 @@ class ShareAPITestCase(test.TestCase):
self.assertRaises(exception.ServiceNotFound,
self.api.migration_start,
self.context, share, host, True)
self.context, share, host, False, True, True, True,
True)
def test_migration_start_same_host(self):
@ddt.data({'dhss': True, 'new_share_network_id': 'fake_net_id',
'new_share_type_id': 'fake_type_id'},
{'dhss': False, 'new_share_network_id': None,
'new_share_type_id': 'fake_type_id'},
{'dhss': True, 'new_share_network_id': 'fake_net_id',
'new_share_type_id': None})
@ddt. unpack
def test_migration_start_same_data_as_source(
self, dhss, new_share_network_id, new_share_type_id):
host = 'fake@backend#pool'
share = db_utils.create_share(
host='fake@backend#pool', status=constants.STATUS_AVAILABLE)
self.assertRaises(exception.InvalidHost,
self.api.migration_start,
self.context, share, host, True)
fake_type_src = {
'id': 'fake_type_id',
'extra_specs': {
'snapshot_support': True,
'driver_handles_share_servers': True,
},
}
new_share_type_param = None
if new_share_type_id:
new_share_type_param = {
'id': new_share_type_id,
'extra_specs': {
'snapshot_support': True,
'driver_handles_share_servers': dhss,
},
}
new_share_net_param = None
if new_share_network_id:
new_share_net_param = db_utils.create_share_network(
id=new_share_network_id)
share = db_utils.create_share(
host='fake@backend#pool', status=constants.STATUS_AVAILABLE,
share_type_id=fake_type_src['id'],
share_network_id=new_share_network_id)
self.mock_object(utils, 'validate_service_host')
self.mock_object(db_api, 'share_update')
self.mock_object(share_types, 'get_share_type',
mock.Mock(return_value=fake_type_src))
result = self.api.migration_start(
self.context, share, host, False, True, True, True, True,
new_share_net_param, new_share_type_param)
self.assertEqual(200, result)
db_api.share_update.assert_called_once_with(
self.context, share['id'],
{'task_state': constants.TASK_STATE_MIGRATION_SUCCESS})
@ddt.data({}, {'replication_type': None})
def test_create_share_replica_invalid_share_type(self, attributes):
@ -2736,7 +2851,6 @@ class ShareAPITestCase(test.TestCase):
db_api.service_get_all_by_topic.assert_called_once_with(
self.context, 'manila-data')
@ddt.unpack
def test_migration_cancel_service_down(self):
service = 'fake_service'
instance1 = db_utils.create_share_instance(
@ -2786,8 +2900,10 @@ class ShareAPITestCase(test.TestCase):
db_api.service_get_by_args.assert_called_once_with(
self.context, instance1['host'], 'manila-share')
@ddt.unpack
def test_migration_cancel_driver_service_down(self):
@ddt.data(constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS,
constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE,
constants.TASK_STATE_DATA_COPYING_COMPLETED)
def test_migration_cancel_driver_service_down(self, task_state):
service = 'fake_service'
instance1 = db_utils.create_share_instance(
@ -2799,7 +2915,7 @@ class ShareAPITestCase(test.TestCase):
status=constants.STATUS_MIGRATING_TO)
share = db_utils.create_share(
id='fake_id',
task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS,
task_state=task_state,
instances=[instance1, instance2])
self.mock_object(utils, 'service_is_up', mock.Mock(return_value=False))
@ -2852,7 +2968,6 @@ class ShareAPITestCase(test.TestCase):
db_api.service_get_all_by_topic.assert_called_once_with(
self.context, 'manila-data')
@ddt.unpack
def test_migration_get_progress_service_down(self):
instance1 = db_utils.create_share_instance(
share_id='fake_id', status=constants.STATUS_MIGRATING)

View File

@ -538,7 +538,7 @@ class ShareDriverTestCase(test.TestCase):
share_driver = driver.ShareDriver(False)
self.assertRaises(NotImplementedError, share_driver.migration_start,
None, None, None, None, None)
None, None, None, None, None, None, None)
def test_migration_continue(self):
@ -546,7 +546,7 @@ class ShareDriverTestCase(test.TestCase):
share_driver = driver.ShareDriver(False)
self.assertRaises(NotImplementedError, share_driver.migration_continue,
None, None, None, None, None,)
None, None, None, None, None, None, None)
def test_migration_complete(self):
@ -554,7 +554,7 @@ class ShareDriverTestCase(test.TestCase):
share_driver = driver.ShareDriver(False)
self.assertRaises(NotImplementedError, share_driver.migration_complete,
None, None, None, None, None)
None, None, None, None, None, None, None)
def test_migration_cancel(self):
@ -562,7 +562,7 @@ class ShareDriverTestCase(test.TestCase):
share_driver = driver.ShareDriver(False)
self.assertRaises(NotImplementedError, share_driver.migration_cancel,
None, None, None, None, None)
None, None, None, None, None, None, None)
def test_migration_get_progress(self):
@ -571,7 +571,7 @@ class ShareDriverTestCase(test.TestCase):
self.assertRaises(NotImplementedError,
share_driver.migration_get_progress,
None, None, None, None, None)
None, None, None, None, None, None, None)
@ddt.data(True, False)
def test_connection_get_info(self, admin):
@ -612,6 +612,7 @@ class ShareDriverTestCase(test.TestCase):
'writable': False,
'preserve_metadata': False,
'nondisruptive': False,
'preserve_snapshots': False,
}
result = share_driver.migration_check_compatibility(

View File

@ -3622,7 +3622,7 @@ class ShareManagerTestCase(test.TestCase):
# run
self.share_manager.migration_start(
self.context, 'fake_id', host, False, False, False, False,
self.context, 'fake_id', host, False, False, False, False, False,
'fake_net_id', 'fake_type_id')
# asserts
@ -3644,7 +3644,7 @@ class ShareManagerTestCase(test.TestCase):
self.share_manager.db.share_update.assert_has_calls(share_update_calls)
self.share_manager._migration_start_driver.assert_called_once_with(
self.context, share, instance, host, False, False, False,
self.context, share, instance, host, False, False, False, False,
'fake_net_id', 'fake_az_id', 'fake_type_id')
if not success:
(self.share_manager._migration_start_host_assisted.
@ -3654,12 +3654,36 @@ class ShareManagerTestCase(test.TestCase):
self.share_manager.db.service_get_by_args.assert_called_once_with(
self.context, 'fake2@backend', 'manila-share')
def test_migration_start_prevent_host_assisted(self):
@ddt.data({'writable': False, 'preserve_metadata': False,
'nondisruptive': False, 'preserve_snapshots': True,
'has_snapshots': False},
{'writable': False, 'preserve_metadata': False,
'nondisruptive': True, 'preserve_snapshots': False,
'has_snapshots': False},
{'writable': False, 'preserve_metadata': True,
'nondisruptive': False, 'preserve_snapshots': False,
'has_snapshots': False},
{'writable': True, 'preserve_metadata': False,
'nondisruptive': False, 'preserve_snapshots': False,
'has_snapshots': False},
{'writable': False, 'preserve_metadata': False,
'nondisruptive': False, 'preserve_snapshots': False,
'has_snapshots': True}
)
@ddt.unpack
def test_migration_start_prevent_host_assisted(
self, writable, preserve_metadata, nondisruptive,
preserve_snapshots, has_snapshots):
share = db_utils.create_share()
instance = share.instance
host = 'fake@backend#pool'
fake_service = {'availability_zone_id': 'fake_az_id'}
if has_snapshots:
snapshot = db_utils.create_snapshot(share_id=share['id'])
self.mock_object(
self.share_manager.db, 'share_snapshot_get_all_for_share',
mock.Mock(return_value=[snapshot]))
# mocks
self.mock_object(self.share_manager.db, 'service_get_by_args',
@ -3672,8 +3696,8 @@ class ShareManagerTestCase(test.TestCase):
# run
self.assertRaises(
exception.ShareMigrationFailed, self.share_manager.migration_start,
self.context, 'share_id', host, True, True, True, True,
'fake_net_id')
self.context, 'share_id', host, True, writable, preserve_metadata,
nondisruptive, preserve_snapshots, 'fake_net_id')
self.share_manager.db.share_update.assert_has_calls([
mock.call(
@ -3720,7 +3744,7 @@ class ShareManagerTestCase(test.TestCase):
self.assertRaises(
exception.ShareMigrationFailed,
self.share_manager.migration_start,
self.context, 'fake_id', host, False, False, False, False,
self.context, 'fake_id', host, False, False, False, False, False,
'fake_net_id', 'fake_type_id')
# asserts
@ -3743,7 +3767,7 @@ class ShareManagerTestCase(test.TestCase):
self.context, instance['id'],
{'status': constants.STATUS_AVAILABLE})
self.share_manager._migration_start_driver.assert_called_once_with(
self.context, share, instance, host, False, False, False,
self.context, share, instance, host, False, False, False, False,
'fake_net_id', 'fake_az_id', 'fake_type_id')
self.share_manager.db.service_get_by_args.assert_called_once_with(
self.context, 'fake2@backend', 'manila-share')
@ -3825,33 +3849,50 @@ class ShareManagerTestCase(test.TestCase):
migration_api.ShareMigrationHelper.\
cleanup_new_instance.assert_called_once_with(new_instance)
@ddt.data({'share_network_id': 'fake_net_id', 'exc': None},
{'share_network_id': None, 'exc': Exception('fake')},
{'share_network_id': None, 'exc': None})
@ddt.data({'share_network_id': 'fake_net_id', 'exc': None,
'has_snapshots': True},
{'share_network_id': None, 'exc': Exception('fake'),
'has_snapshots': True},
{'share_network_id': None, 'exc': None, 'has_snapshots': False})
@ddt.unpack
def test__migration_start_driver(self, exc, share_network_id):
def test__migration_start_driver(
self, exc, share_network_id, has_snapshots):
fake_dest_host = 'fake_host'
src_server = db_utils.create_share_server()
if share_network_id:
dest_server = db_utils.create_share_server()
else:
dest_server = None
share = db_utils.create_share(share_network_id=share_network_id)
share = db_utils.create_share(
id='fake_id',
share_server_id='fake_src_server_id',
share_network_id=share_network_id)
migrating_instance = db_utils.create_share_instance(
share_id='fake_id',
share_network_id=share_network_id)
src_instance = db_utils.create_share_instance(
share_id='fake_id',
share_server_id='fake_src_server_id',
share_network_id=share_network_id)
if has_snapshots:
snapshot = db_utils.create_snapshot(
status=(constants.STATUS_AVAILABLE if not exc
else constants.STATUS_ERROR),
share_id=share['id'])
migrating_snap_instance = db_utils.create_snapshot(
status=constants.STATUS_MIGRATING,
share_id=share['id'])
dest_snap_instance = db_utils.create_snapshot_instance(
status=constants.STATUS_AVAILABLE,
snapshot_id=snapshot['id'],
share_instance_id=migrating_instance['id'])
snapshot_mappings = {snapshot.instance['id']: dest_snap_instance}
else:
snapshot_mappings = {}
src_instance = share.instance
compatibility = {
'compatible': True,
'writable': False,
'preserve_metadata': False,
'non-disruptive': False,
'nondisruptive': False,
'preserve_snapshots': has_snapshots,
}
if exc:
compatibility = exc
# mocks
self.mock_object(self.share_manager.db, 'share_instance_get',
@ -3860,7 +3901,7 @@ class ShareManagerTestCase(test.TestCase):
mock.Mock(return_value=src_server))
self.mock_object(self.share_manager.driver,
'migration_check_compatibility',
mock.Mock(side_effect=[compatibility]))
mock.Mock(return_value=compatibility))
self.mock_object(
api.API, 'create_share_instance_and_get_request_spec',
mock.Mock(return_value=({}, migrating_instance)))
@ -3872,6 +3913,19 @@ class ShareManagerTestCase(test.TestCase):
self.mock_object(
migration_api.ShareMigrationHelper, 'wait_for_share_server',
mock.Mock(return_value=dest_server))
self.mock_object(
self.share_manager.db, 'share_snapshot_get_all_for_share',
mock.Mock(return_value=[snapshot] if has_snapshots else []))
if has_snapshots:
self.mock_object(
self.share_manager.db, 'share_snapshot_instance_create',
mock.Mock(return_value=dest_snap_instance))
self.mock_object(
self.share_manager.db, 'share_snapshot_instance_update')
self.mock_object(
self.share_manager.db,
'share_snapshot_instance_get_all_with_filters',
mock.Mock(return_value=[migrating_snap_instance]))
self.mock_object(self.share_manager.driver, 'migration_start')
self.mock_object(self.share_manager, '_migration_delete_instance')
self.mock_object(self.share_manager.access_helper,
@ -3884,11 +3938,13 @@ class ShareManagerTestCase(test.TestCase):
exception.ShareMigrationFailed,
self.share_manager._migration_start_driver,
self.context, share, src_instance, fake_dest_host, False,
False, False, share_network_id, 'fake_az_id', 'fake_type_id')
False, False, False, share_network_id, 'fake_az_id',
'fake_type_id')
else:
result = self.share_manager._migration_start_driver(
self.context, share, src_instance, fake_dest_host, False,
False, False, share_network_id, 'fake_az_id', 'fake_type_id')
False, False, False, share_network_id, 'fake_az_id',
'fake_type_id')
# asserts
if not exc:
@ -3908,7 +3964,8 @@ class ShareManagerTestCase(test.TestCase):
self.context, src_instance['id'], share_server=src_server))
self.share_manager.driver.migration_start.assert_called_once_with(
self.context, src_instance, migrating_instance,
src_server, dest_server)
[snapshot.instance] if has_snapshots else [],
snapshot_mappings, src_server, dest_server)
self.share_manager.db.share_instance_get.assert_called_once_with(
self.context, migrating_instance['id'], with_share_data=True)
@ -3921,6 +3978,10 @@ class ShareManagerTestCase(test.TestCase):
(self.share_manager.driver.migration_check_compatibility.
assert_called_once_with(self.context, src_instance,
migrating_instance, src_server, dest_server))
(self.share_manager.db.share_snapshot_get_all_for_share.
assert_called_once_with(self.context, share['id']))
if share_network_id:
(rpcapi.ShareAPI.provide_share_server.
assert_called_once_with(
@ -3932,23 +3993,57 @@ class ShareManagerTestCase(test.TestCase):
if exc:
(self.share_manager._migration_delete_instance.
assert_called_once_with(self.context, migrating_instance['id']))
if has_snapshots:
(self.share_manager.db.share_snapshot_instance_update.
assert_called_once_with(
self.context, migrating_snap_instance['id'],
{'status': constants.STATUS_AVAILABLE}))
(self.share_manager.db.
share_snapshot_instance_get_all_with_filters(
self.context,
{'share_instance_ids': [src_instance['id']]}))
else:
if has_snapshots:
snap_data = {
'status': constants.STATUS_MIGRATING_TO,
'progress': '0%',
'share_instance_id': migrating_instance['id'],
}
(self.share_manager.db.share_snapshot_instance_create.
assert_called_once_with(self.context, snapshot['id'],
snap_data))
(self.share_manager.db.share_snapshot_instance_update.
assert_called_once_with(
self.context, snapshot.instance['id'],
{'status': constants.STATUS_MIGRATING}))
self.share_manager.db.share_instance_update.assert_called_once_with(
self.context, migrating_instance['id'],
{'status': constants.STATUS_MIGRATING_TO})
@ddt.data({'writable': False, 'preserve_metadata': True,
'nondisruptive': True, 'compatible': True},
'nondisruptive': True, 'compatible': True,
'preserve_snapshots': True, 'has_snapshots': False},
{'writable': True, 'preserve_metadata': False,
'nondisruptive': True, 'compatible': True},
'nondisruptive': True, 'compatible': True,
'preserve_snapshots': True, 'has_snapshots': False},
{'writable': True, 'preserve_metadata': True,
'nondisruptive': False, 'compatible': True},
'nondisruptive': False, 'compatible': True,
'preserve_snapshots': True, 'has_snapshots': False},
{'writable': True, 'preserve_metadata': True,
'nondisruptive': True, 'compatible': False}
)
'nondisruptive': True, 'compatible': False,
'preserve_snapshots': True, 'has_snapshots': False},
{'writable': True, 'preserve_metadata': True,
'nondisruptive': True, 'compatible': True,
'preserve_snapshots': False, 'has_snapshots': False},
{'writable': True, 'preserve_metadata': True,
'nondisruptive': True, 'compatible': True,
'preserve_snapshots': False, 'has_snapshots': True})
@ddt.unpack
def test__migration_start_driver_not_compatible(
self, compatible, writable, preserve_metadata, nondisruptive):
self, compatible, writable, preserve_metadata, nondisruptive,
preserve_snapshots, has_snapshots):
share = db_utils.create_share()
src_instance = db_utils.create_share_instance(
@ -3965,8 +4060,10 @@ class ShareManagerTestCase(test.TestCase):
'compatible': compatible,
'writable': writable,
'preserve_metadata': preserve_metadata,
'non-disruptive': nondisruptive,
'nondisruptive': nondisruptive,
'preserve_snapshots': preserve_snapshots,
}
snapshot = db_utils.create_snapshot(share_id=share['id'])
# mocks
self.mock_object(self.share_manager.db, 'share_server_get',
@ -3987,13 +4084,17 @@ class ShareManagerTestCase(test.TestCase):
'migration_check_compatibility',
mock.Mock(return_value=compatibility))
self.mock_object(utils, 'wait_for_access_update')
self.mock_object(
self.share_manager.db, 'share_snapshot_get_all_for_share',
mock.Mock(return_value=[snapshot] if has_snapshots else []))
# run
self.assertRaises(
exception.ShareMigrationFailed,
self.share_manager._migration_start_driver,
self.context, share, src_instance, fake_dest_host, True, True,
True, 'fake_net_id', 'fake_az_id', 'fake_new_type_id')
True, not has_snapshots, 'fake_net_id', 'fake_az_id',
'fake_new_type_id')
# asserts
self.share_manager.db.share_server_get.assert_called_once_with(
@ -4017,26 +4118,35 @@ class ShareManagerTestCase(test.TestCase):
@ddt.data(Exception('fake'), False, True)
def test_migration_driver_continue(self, finished):
src_server = db_utils.create_share_server()
dest_server = db_utils.create_share_server()
share = db_utils.create_share(
task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS)
task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS,
id='share_id',
share_server_id=src_server['id'],
status=constants.STATUS_MIGRATING)
share_cancelled = db_utils.create_share(
task_state=constants.TASK_STATE_MIGRATION_CANCELLED)
if finished:
share_cancelled = share
src_server = db_utils.create_share_server()
dest_server = db_utils.create_share_server()
regular_instance = db_utils.create_share_instance(
status=constants.STATUS_AVAILABLE,
share_id='other_id')
src_instance = db_utils.create_share_instance(
share_id='share_id',
share_server_id=src_server['id'],
status=constants.STATUS_MIGRATING)
dest_instance = db_utils.create_share_instance(
share_id='share_id',
host='fake_host',
share_server_id=dest_server['id'],
status=constants.STATUS_MIGRATING)
status=constants.STATUS_MIGRATING_TO)
src_instance = share.instance
snapshot = db_utils.create_snapshot(share_id=share['id'])
dest_snap_instance = db_utils.create_snapshot_instance(
snapshot_id=snapshot['id'],
share_instance_id=dest_instance['id'])
migrating_snap_instance = db_utils.create_snapshot(
status=constants.STATUS_MIGRATING,
share_id=share['id'])
snapshot_mappings = {snapshot.instance['id']: dest_snap_instance}
self.mock_object(manager.LOG, 'warning')
self.mock_object(self.share_manager.db,
@ -4056,10 +4166,29 @@ class ShareManagerTestCase(test.TestCase):
self.mock_object(self.share_manager.db, 'share_instance_update')
self.mock_object(self.share_manager.db, 'share_update')
self.mock_object(self.share_manager, '_migration_delete_instance')
side_effect = [[dest_snap_instance], [snapshot.instance]]
if isinstance(finished, Exception):
side_effect.append([migrating_snap_instance])
self.mock_object(
self.share_manager.db,
'share_snapshot_instance_get_all_with_filters',
mock.Mock(side_effect=side_effect))
self.mock_object(
self.share_manager.db, 'share_snapshot_instance_update')
share_get_calls = [mock.call(self.context, 'share_id')]
self.share_manager.migration_driver_continue(self.context)
snapshot_instance_get_all_calls = [
mock.call(self.context,
{'share_instance_ids': [dest_instance['id']]}),
mock.call(self.context,
{'share_instance_ids': [src_instance['id']]})
]
if isinstance(finished, Exception):
self.share_manager.db.share_update.assert_called_once_with(
self.context, 'share_id',
@ -4069,6 +4198,14 @@ class ShareManagerTestCase(test.TestCase):
{'status': constants.STATUS_AVAILABLE}))
(self.share_manager._migration_delete_instance.
assert_called_once_with(self.context, dest_instance['id']))
(self.share_manager.db.share_snapshot_instance_update.
assert_called_once_with(
self.context, migrating_snap_instance['id'],
{'status': constants.STATUS_AVAILABLE}))
snapshot_instance_get_all_calls.append(
mock.call(
self.context,
{'share_instance_ids': [src_instance['id']]}))
else:
if finished:
@ -4092,7 +4229,10 @@ class ShareManagerTestCase(test.TestCase):
])
self.share_manager.driver.migration_continue.assert_called_once_with(
self.context, src_instance, dest_instance,
src_server, dest_server)
[snapshot.instance], snapshot_mappings, src_server, dest_server)
(self.share_manager.db.share_snapshot_instance_get_all_with_filters.
assert_has_calls(snapshot_instance_get_all_calls))
@ddt.data({'task_state': constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE,
'exc': None},
@ -4105,20 +4245,32 @@ class ShareManagerTestCase(test.TestCase):
@ddt.unpack
def test_migration_complete(self, task_state, exc):
instance = db_utils.create_share_instance(
instance_1 = db_utils.create_share_instance(
share_id='fake_id',
status=constants.STATUS_AVAILABLE,
share_server_id='fake_server_id')
status=constants.STATUS_MIGRATING,
share_server_id='fake_server_id',
share_type_id='fake_type_id')
instance_2 = db_utils.create_share_instance(
share_id='fake_id',
status=constants.STATUS_MIGRATING_TO,
share_server_id='fake_server_id',
share_type_id='fake_type_id')
share = db_utils.create_share(
id='fake_id',
instances=[instance],
instances=[instance_1, instance_2],
task_state=task_state)
model_type_update = {'create_share_from_snapshot_support': False}
# mocks
self.mock_object(self.share_manager.db, 'share_get',
mock.Mock(return_value=share))
self.mock_object(self.share_manager.db, 'share_instance_get',
mock.Mock(side_effect=[instance, instance]))
mock.Mock(side_effect=[instance_1, instance_2]))
self.mock_object(api.API, 'get_share_attributes_from_share_type',
mock.Mock(return_value=model_type_update))
self.mock_object(share_types, 'get_share_type',
mock.Mock(return_value='fake_type'))
self.mock_object(self.share_manager.db, 'share_update')
if task_state == constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE:
self.mock_object(
@ -4130,54 +4282,96 @@ class ShareManagerTestCase(test.TestCase):
mock.Mock(side_effect=exc))
if exc:
snapshot = db_utils.create_snapshot(share_id=share['id'])
snapshot_ins1 = db_utils.create_snapshot_instance(
snapshot_id=snapshot['id'],
share_instance_id=instance_1['id'],
status=constants.STATUS_MIGRATING,)
snapshot_ins2 = db_utils.create_snapshot_instance(
snapshot_id=snapshot['id'],
share_instance_id=instance_2['id'],
status=constants.STATUS_MIGRATING_TO)
self.mock_object(manager.LOG, 'exception')
self.mock_object(self.share_manager.db, 'share_update')
self.mock_object(self.share_manager.db, 'share_instance_update')
self.mock_object(self.share_manager.db,
'share_snapshot_instance_update')
self.mock_object(self.share_manager.db,
'share_snapshot_instance_get_all_with_filters',
mock.Mock(
return_value=[snapshot_ins1, snapshot_ins2]))
self.assertRaises(
exception.ShareMigrationFailed,
self.share_manager.migration_complete,
self.context, 'fake_ins_id', 'new_fake_ins_id')
self.context, instance_1['id'], instance_2['id'])
else:
self.share_manager.migration_complete(
self.context, 'fake_ins_id', 'new_fake_ins_id')
self.context, instance_1['id'], instance_2['id'])
# asserts
self.share_manager.db.share_get.assert_called_once_with(
self.context, share['id'])
self.share_manager.db.share_instance_get.assert_has_calls([
mock.call(self.context, 'fake_ins_id', with_share_data=True),
mock.call(self.context, 'new_fake_ins_id', with_share_data=True)])
mock.call(self.context, instance_1['id'], with_share_data=True),
mock.call(self.context, instance_2['id'], with_share_data=True)])
if task_state == constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE:
(self.share_manager._migration_complete_driver.
assert_called_once_with(self.context, share, instance, instance))
assert_called_once_with(
self.context, share, instance_1, instance_2))
else:
(self.share_manager._migration_complete_host_assisted.
assert_called_once_with(
self.context, share, 'fake_ins_id', 'new_fake_ins_id'))
self.context, share, instance_1['id'], instance_2['id']))
if exc:
self.assertTrue(manager.LOG.exception.called)
self.share_manager.db.share_update.assert_called_once_with(
self.context, share['id'],
{'task_state': constants.TASK_STATE_MIGRATION_ERROR})
share_instance_update_calls = [
mock.call(self.context, 'fake_ins_id',
{'status': constants.STATUS_AVAILABLE})
]
if task_state == constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE:
share_instance_update_calls.append(
mock.call(self.context, 'new_fake_ins_id',
share_instance_update_calls = [
mock.call(self.context, instance_1['id'],
{'status': constants.STATUS_ERROR}),
mock.call(self.context, instance_2['id'],
{'status': constants.STATUS_ERROR})
)
]
else:
share_instance_update_calls = [
mock.call(self.context, instance_1['id'],
{'status': constants.STATUS_AVAILABLE}),
]
self.share_manager.db.share_instance_update.assert_has_calls(
share_instance_update_calls)
if task_state == constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE:
(self.share_manager.db.share_snapshot_instance_update.
assert_has_calls([
mock.call(self.context, snapshot_ins1['id'],
{'status': constants.STATUS_ERROR}),
mock.call(self.context, snapshot_ins2['id'],
{'status': constants.STATUS_ERROR})]))
(self.share_manager.db.
share_snapshot_instance_get_all_with_filters.
assert_called_once_with(
self.context, {
'share_instance_ids': [instance_1['id'],
instance_2['id']]
}
))
else:
(api.API.get_share_attributes_from_share_type.
assert_called_once_with('fake_type'))
share_types.get_share_type.assert_called_once_with(
self.context, 'fake_type_id')
self.share_manager.db.share_update.assert_called_once_with(
self.context, share['id'], model_type_update)
@ddt.data(constants.TASK_STATE_DATA_COPYING_ERROR,
constants.TASK_STATE_DATA_COPYING_CANCELLED,
constants.TASK_STATE_DATA_COPYING_COMPLETED,
'other')
def test__migration_complete_status(self, status):
def test__migration_complete_host_assisted_status(self, status):
instance = db_utils.create_share_instance(
share_id='fake_id',
@ -4242,18 +4436,30 @@ class ShareManagerTestCase(test.TestCase):
fake_src_host = 'src_host'
fake_dest_host = 'dest_host'
fake_rules = 'fake_rules'
fake_export_locations = 'fake_export_locations'
src_server = db_utils.create_share_server()
dest_server = db_utils.create_share_server()
share = db_utils.create_share()
share = db_utils.create_share(
share_server_id='fake_src_server_id',
host=fake_src_host)
dest_instance = db_utils.create_share_instance(
share_id=share['id'],
share_server_id='fake_dest_server_id',
host=fake_dest_host)
src_instance = db_utils.create_share_instance(
share_id=share['id'],
share_server_id='fake_src_server_id',
host=fake_src_host)
src_instance = share.instance
snapshot = db_utils.create_snapshot(share_id=share['id'])
dest_snap_instance = db_utils.create_snapshot_instance(
snapshot_id=snapshot['id'],
share_instance_id=dest_instance['id'])
snapshot_mappings = {snapshot.instance['id']: dest_snap_instance}
fake_return_data = {
'export_locations': 'fake_export_locations',
'snapshot_updates': {dest_snap_instance['id']: {
'fake_keys': 'fake_values'},
},
}
# mocks
self.mock_object(self.share_manager.db, 'share_server_get', mock.Mock(
@ -4264,7 +4470,7 @@ class ShareManagerTestCase(test.TestCase):
self.mock_object(
self.share_manager.db, 'share_export_locations_update')
self.mock_object(self.share_manager.driver, 'migration_complete',
mock.Mock(return_value=fake_export_locations))
mock.Mock(return_value=fake_return_data))
self.mock_object(
self.share_manager.access_helper, '_check_needs_refresh',
mock.Mock(return_value=True))
@ -4273,6 +4479,13 @@ class ShareManagerTestCase(test.TestCase):
self.mock_object(self.share_manager, '_migration_delete_instance')
self.mock_object(migration_api.ShareMigrationHelper,
'apply_new_access_rules')
self.mock_object(
self.share_manager.db,
'share_snapshot_instance_get_all_with_filters',
mock.Mock(side_effect=[[dest_snap_instance],
[snapshot.instance]]))
self.mock_object(
self.share_manager.db, 'share_snapshot_instance_update')
# run
self.share_manager._migration_complete_driver(
@ -4284,9 +4497,10 @@ class ShareManagerTestCase(test.TestCase):
mock.call(self.context, 'fake_dest_server_id')])
(self.share_manager.db.share_export_locations_update.
assert_called_once_with(self.context, dest_instance['id'],
fake_export_locations))
'fake_export_locations'))
self.share_manager.driver.migration_complete.assert_called_once_with(
self.context, src_instance, dest_instance, src_server, dest_server)
self.context, src_instance, dest_instance, [snapshot.instance],
snapshot_mappings, src_server, dest_server)
(migration_api.ShareMigrationHelper.apply_new_access_rules.
assert_called_once_with(dest_instance))
self.share_manager._migration_delete_instance.assert_called_once_with(
@ -4301,6 +4515,23 @@ class ShareManagerTestCase(test.TestCase):
mock.call(
self.context, dest_instance['share_id'],
{'task_state': constants.TASK_STATE_MIGRATION_SUCCESS})])
(self.share_manager.db.share_snapshot_instance_get_all_with_filters.
assert_has_calls([
mock.call(self.context,
{'share_instance_ids': [dest_instance['id']]}),
mock.call(self.context,
{'share_instance_ids': [src_instance['id']]})]))
snap_data_update = (
fake_return_data['snapshot_updates'][dest_snap_instance['id']])
snap_data_update.update({
'status': constants.STATUS_AVAILABLE,
'progress': '100%',
})
(self.share_manager.db.share_snapshot_instance_update.
assert_called_once_with(self.context, dest_snap_instance['id'],
snap_data_update))
def test__migration_complete_host_assisted(self):
@ -4378,6 +4609,8 @@ class ShareManagerTestCase(test.TestCase):
self.mock_object(db, 'share_update')
self.mock_object(db, 'share_instance_update')
self.mock_object(self.share_manager, '_migration_delete_instance')
self.mock_object(self.share_manager,
'_restore_migrating_snapshots_status')
self.mock_object(db, 'share_server_get',
mock.Mock(side_effect=[server_1, server_2]))
self.mock_object(self.share_manager.driver, 'migration_cancel')
@ -4402,10 +4635,13 @@ class ShareManagerTestCase(test.TestCase):
else:
self.share_manager.driver.migration_cancel.assert_called_once_with(
self.context, instance_1, instance_2, server_1, server_2)
self.context, instance_1, instance_2, [], {}, server_1,
server_2)
(self.share_manager._migration_delete_instance.
assert_called_once_with(self.context, instance_2['id']))
(self.share_manager._restore_migrating_snapshots_status.
assert_called_once_with(self.context, instance_1['id']))
self.share_manager.db.share_get.assert_called_once_with(
self.context, share['id'])
@ -4430,7 +4666,9 @@ class ShareManagerTestCase(test.TestCase):
def test__migration_delete_instance(self):
instance = db_utils.create_share_instance(share_id='fake_id')
share = db_utils.create_share(id='fake_id')
instance = share.instance
snapshot = db_utils.create_snapshot(share_id=share['id'])
rules = [{'id': 'rule_id_1'}, {'id': 'rule_id_2'}]
# mocks
@ -4447,6 +4685,11 @@ class ShareManagerTestCase(test.TestCase):
self.mock_object(self.share_manager.db, 'share_instance_delete')
self.mock_object(self.share_manager.db, 'share_instance_access_delete')
self.mock_object(self.share_manager, '_check_delete_share_server')
self.mock_object(self.share_manager.db,
'share_snapshot_instance_delete')
self.mock_object(self.share_manager.db,
'share_snapshot_instance_get_all_with_filters',
mock.Mock(return_value=[snapshot.instance]))
# run
self.share_manager._migration_delete_instance(
@ -4466,6 +4709,11 @@ class ShareManagerTestCase(test.TestCase):
self.context, instance['id'])
self.share_manager._check_delete_share_server.assert_called_once_with(
self.context, instance)
(self.share_manager.db.share_snapshot_instance_get_all_with_filters.
assert_called_once_with(self.context,
{'share_instance_ids': [instance['id']]}))
(self.share_manager.db.share_snapshot_instance_delete.
assert_called_once_with(self.context, snapshot.instance['id']))
def test_migration_cancel_invalid(self):
@ -4512,7 +4760,8 @@ class ShareManagerTestCase(test.TestCase):
(self.share_manager.driver.migration_get_progress.
assert_called_once_with(
self.context, instance_1, instance_2, server_1, server_2))
self.context, instance_1, instance_2, [], {}, server_1,
server_2))
self.share_manager.db.share_get.assert_called_once_with(
self.context, share['id'])

View File

@ -243,13 +243,14 @@ class ShareRpcAPITestCase(test.TestCase):
def test_migration_start(self):
self._test_share_api('migration_start',
rpc_method='cast',
version='1.12',
version='1.15',
share=self.fake_share,
dest_host=self.fake_host,
force_host_assisted_migration=True,
preserve_metadata=True,
writable=True,
nondisruptive=False,
preserve_snapshots=True,
new_share_network_id='fake_net_id',
new_share_type_id='fake_type_id')

View File

@ -483,6 +483,32 @@ def get_bool_from_api_params(key, params, default=False, strict=True):
return param
def check_params_exist(keys, params):
"""Validates if keys exist in params.
:param keys: List of keys to check
:param params: Parameters received from REST API
"""
if any(set(keys) - set(params)):
msg = _("Must specify all mandatory parameters: %s") % keys
raise exc.HTTPBadRequest(explanation=msg)
def check_params_are_boolean(keys, params, default=False):
"""Validates if keys in params are boolean.
:param keys: List of keys to check
:param params: Parameters received from REST API
:param default: default value when it does not exist
:return: a dictionary with keys and respective retrieved value
"""
result = {}
for key in keys:
value = get_bool_from_api_params(key, params, default, strict=True)
result[key] = value
return result
def require_driver_initialized(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):

View File

@ -30,7 +30,7 @@ ShareGroup = [
help="The minimum api microversion is configured to be the "
"value of the minimum microversion supported by Manila."),
cfg.StrOpt("max_api_microversion",
default="2.28",
default="2.29",
help="The maximum api microversion is configured to be the "
"value of the latest microversion supported by Manila."),
cfg.StrOpt("region",

View File

@ -1092,8 +1092,9 @@ class SharesV2Client(shares_client.SharesClient):
def migrate_share(self, share_id, host,
force_host_assisted_migration=False,
new_share_network_id=None, writable=False,
preserve_metadata=False, nondisruptive=False,
new_share_type_id=None, version=LATEST_MICROVERSION):
preserve_metadata=False, preserve_snapshots=False,
nondisruptive=False, new_share_type_id=None,
version=LATEST_MICROVERSION):
body = {
'migration_start': {
@ -1103,6 +1104,7 @@ class SharesV2Client(shares_client.SharesClient):
'new_share_type_id': new_share_type_id,
'writable': writable,
'preserve_metadata': preserve_metadata,
'preserve_snapshots': preserve_snapshots,
'nondisruptive': nondisruptive,
}
}

View File

@ -36,7 +36,8 @@ class MigrationNFSTest(base.BaseSharesAdminTest):
1) Driver-assisted migration: force_host_assisted_migration, nondisruptive,
writable and preserve-metadata are False.
2) Host-assisted migration: force_host_assisted_migration is True,
nondisruptive, writable and preserve-metadata are False.
nondisruptive, writable, preserve-metadata and preserve-snapshots are
False.
3) 2-phase migration of both Host-assisted and Driver-assisted.
4) Cancelling migration past first phase.
5) Changing driver modes through migration.
@ -75,7 +76,7 @@ class MigrationNFSTest(base.BaseSharesAdminTest):
variation='opposite_driver_modes'))
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
@base.skip_if_microversion_lt("2.22")
@base.skip_if_microversion_lt("2.29")
@ddt.data(True, False)
def test_migration_cancel(self, force_host_assisted):
@ -112,7 +113,7 @@ class MigrationNFSTest(base.BaseSharesAdminTest):
complete=False)
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
@base.skip_if_microversion_lt("2.22")
@base.skip_if_microversion_lt("2.29")
@ddt.data(True, False)
def test_migration_opposite_driver_modes(self, force_host_assisted):
@ -172,7 +173,7 @@ class MigrationNFSTest(base.BaseSharesAdminTest):
share_type_id=new_share_type_id)
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
@base.skip_if_microversion_lt("2.22")
@base.skip_if_microversion_lt("2.29")
@ddt.data(True, False)
def test_migration_2phase(self, force_host_assisted):

View File

@ -30,7 +30,7 @@ CONF = config.CONF
@ddt.ddt
class MigrationTest(base.BaseSharesAdminTest):
class MigrationNegativeTest(base.BaseSharesAdminTest):
"""Tests Share Migration.
Tests share migration in multi-backend environment.
@ -40,7 +40,7 @@ class MigrationTest(base.BaseSharesAdminTest):
@classmethod
def resource_setup(cls):
super(MigrationTest, cls).resource_setup()
super(MigrationNegativeTest, cls).resource_setup()
if cls.protocol not in CONF.share.enable_protocols:
message = "%s tests are disabled." % cls.protocol
raise cls.skipException(message)
@ -57,11 +57,11 @@ class MigrationTest(base.BaseSharesAdminTest):
cls.share = cls.create_share(cls.protocol)
cls.share = cls.shares_client.get_share(cls.share['id'])
default_type = cls.shares_v2_client.list_share_types(
cls.default_type = cls.shares_v2_client.list_share_types(
default=True)['share_type']
dest_pool = utils.choose_matching_backend(
cls.share, pools, default_type)
cls.share, pools, cls.default_type)
if not dest_pool or dest_pool.get('name') is None:
raise share_exceptions.ShareMigrationException(
@ -121,44 +121,65 @@ class MigrationTest(base.BaseSharesAdminTest):
'invalid_share_id')
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
@base.skip_if_microversion_lt("2.22")
@base.skip_if_microversion_lt("2.29")
@testtools.skipUnless(CONF.share.run_snapshot_tests,
"Snapshot tests are disabled.")
def test_migrate_share_with_snapshot(self):
snap = self.create_snapshot_wait_for_active(self.share['id'])
self.assertRaises(
lib_exc.BadRequest, self.shares_v2_client.migrate_share,
self.share['id'], self.dest_pool)
lib_exc.Conflict, self.shares_v2_client.migrate_share,
self.share['id'], self.dest_pool,
force_host_assisted_migration=True)
self.shares_client.delete_snapshot(snap['id'])
self.shares_client.wait_for_resource_deletion(snapshot_id=snap["id"])
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
@base.skip_if_microversion_lt("2.22")
def test_migrate_share_same_host(self):
self.assertRaises(
lib_exc.BadRequest, self.shares_v2_client.migrate_share,
self.share['id'], self.share['host'])
@base.skip_if_microversion_lt("2.29")
@ddt.data(True, False)
def test_migrate_share_same_host(self, specified):
new_share_type_id = None
new_share_network_id = None
if specified:
new_share_type_id = self.default_type['id']
new_share_network_id = self.share['share_network_id']
self.migrate_share(
self.share['id'], self.share['host'],
wait_for_status=constants.TASK_STATE_MIGRATION_SUCCESS,
new_share_type_id=new_share_type_id,
new_share_network_id=new_share_network_id)
# NOTE(ganso): No need to assert, it is already waiting for correct
# status (migration_success).
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
@base.skip_if_microversion_lt("2.22")
@base.skip_if_microversion_lt("2.29")
def test_migrate_share_host_invalid(self):
self.assertRaises(
lib_exc.NotFound, self.shares_v2_client.migrate_share,
self.share['id'], 'invalid_host')
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
@base.skip_if_microversion_lt("2.22")
def test_migrate_share_host_assisted_not_allowed(self):
self.shares_v2_client.migrate_share(
@base.skip_if_microversion_lt("2.29")
@ddt.data({'writable': False, 'preserve_metadata': False,
'preserve_snapshots': False, 'nondisruptive': True},
{'writable': False, 'preserve_metadata': False,
'preserve_snapshots': True, 'nondisruptive': False},
{'writable': False, 'preserve_metadata': True,
'preserve_snapshots': False, 'nondisruptive': False},
{'writable': True, 'preserve_metadata': False,
'preserve_snapshots': False, 'nondisruptive': False})
@ddt.unpack
def test_migrate_share_host_assisted_not_allowed_API(
self, writable, preserve_metadata, preserve_snapshots,
nondisruptive):
self.assertRaises(
lib_exc.BadRequest, self.shares_v2_client.migrate_share,
self.share['id'], self.dest_pool,
force_host_assisted_migration=True, writable=True,
preserve_metadata=True)
self.shares_v2_client.wait_for_migration_status(
self.share['id'], self.dest_pool,
constants.TASK_STATE_MIGRATION_ERROR)
force_host_assisted_migration=True, writable=writable,
preserve_metadata=preserve_metadata, nondisruptive=nondisruptive,
preserve_snapshots=preserve_snapshots)
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
@base.skip_if_microversion_lt("2.22")
@base.skip_if_microversion_lt("2.29")
def test_migrate_share_change_type_no_valid_host(self):
if not CONF.share.multitenancy_enabled:
new_share_network_id = self.create_share_network(
@ -176,14 +197,14 @@ class MigrationTest(base.BaseSharesAdminTest):
constants.TASK_STATE_MIGRATION_ERROR)
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
@base.skip_if_microversion_lt("2.22")
@base.skip_if_microversion_lt("2.29")
def test_migrate_share_not_found(self):
self.assertRaises(
lib_exc.NotFound, self.shares_v2_client.migrate_share,
'invalid_share_id', self.dest_pool)
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
@base.skip_if_microversion_lt("2.22")
@base.skip_if_microversion_lt("2.29")
def test_migrate_share_not_available(self):
self.shares_client.reset_state(self.share['id'],
constants.STATUS_ERROR)
@ -198,7 +219,7 @@ class MigrationTest(base.BaseSharesAdminTest):
constants.STATUS_AVAILABLE)
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
@base.skip_if_microversion_lt("2.22")
@base.skip_if_microversion_lt("2.29")
def test_migrate_share_invalid_share_network(self):
self.assertRaises(
lib_exc.BadRequest, self.shares_v2_client.migrate_share,
@ -206,7 +227,7 @@ class MigrationTest(base.BaseSharesAdminTest):
new_share_network_id='invalid_net_id')
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
@base.skip_if_microversion_lt("2.22")
@base.skip_if_microversion_lt("2.29")
def test_migrate_share_invalid_share_type(self):
self.assertRaises(
lib_exc.BadRequest, self.shares_v2_client.migrate_share,
@ -214,7 +235,7 @@ class MigrationTest(base.BaseSharesAdminTest):
new_share_type_id='invalid_type_id')
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
@base.skip_if_microversion_lt("2.22")
@base.skip_if_microversion_lt("2.29")
def test_migrate_share_opposite_type_share_network_invalid(self):
extra_specs = utils.get_configured_extra_specs(

View File

@ -424,14 +424,17 @@ class BaseSharesTest(test.BaseTestCase):
@classmethod
def migrate_share(
cls, share_id, dest_host, wait_for_status, client=None,
force_host_assisted_migration=False, new_share_network_id=None,
force_host_assisted_migration=False, writable=False,
nondisruptive=False, preserve_metadata=False,
preserve_snapshots=False, new_share_network_id=None,
new_share_type_id=None, **kwargs):
client = client or cls.shares_v2_client
client.migrate_share(
share_id, dest_host,
force_host_assisted_migration=force_host_assisted_migration,
writable=writable, preserve_metadata=preserve_metadata,
nondisruptive=nondisruptive, preserve_snapshots=preserve_snapshots,
new_share_network_id=new_share_network_id,
writable=False, preserve_metadata=False, nondisruptive=False,
new_share_type_id=new_share_type_id, **kwargs)
share = client.wait_for_migration_status(
share_id, dest_host, wait_for_status, **kwargs)

View File

@ -212,10 +212,13 @@ class ShareScenarioTest(manager.NetworkScenarioTest):
return linux_client
def _migrate_share(self, share_id, dest_host, status, client=None):
def _migrate_share(self, share_id, dest_host, status, force_host_assisted,
client=None):
client = client or self.shares_admin_v2_client
client.migrate_share(share_id, dest_host, writable=False,
preserve_metadata=False, nondisruptive=False)
client.migrate_share(
share_id, dest_host, writable=False, preserve_metadata=False,
nondisruptive=False, preserve_snapshots=False,
force_host_assisted_migration=force_host_assisted)
share = client.wait_for_migration_status(share_id, dest_host, status)
return share

View File

@ -13,6 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
import ddt
from oslo_log import log as logging
from tempest.common import waiters
from tempest import config
@ -32,6 +34,7 @@ CONF = config.CONF
LOG = logging.getLogger(__name__)
@ddt.ddt
class ShareBasicOpsBase(manager.ShareScenarioTest):
"""This smoke test case follows this basic set of operations:
@ -135,12 +138,15 @@ class ShareBasicOpsBase(manager.ShareScenarioTest):
data = ssh_client.exec_command("sudo cat /mnt/t1")
return data.rstrip()
def migrate_share(self, share_id, dest_host, status):
share = self._migrate_share(share_id, dest_host, status,
def migrate_share(self, share_id, dest_host, status, force_host_assisted):
share = self._migrate_share(
share_id, dest_host, status, force_host_assisted,
self.shares_admin_v2_client)
share = self._migration_complete(share['id'], dest_host)
return share
def migration_complete(self, share_id, dest_host):
return self._migration_complete(share_id, dest_host)
def create_share_network(self):
self.net = self._create_network(namestart="manila-share")
self.subnet = self._create_subnet(network=self.net,
@ -266,10 +272,21 @@ class ShareBasicOpsBase(manager.ShareScenarioTest):
self.assertEqual(test_data, data)
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
@base.skip_if_microversion_lt("2.29")
@testtools.skipUnless(CONF.share.run_host_assisted_migration_tests or
CONF.share.run_driver_assisted_migration_tests,
"Share migration tests are disabled.")
def test_migration_files(self):
@ddt.data(True, False)
def test_migration_files(self, force_host_assisted):
if (force_host_assisted and
not CONF.share.run_host_assisted_migration_tests):
raise self.skipException("Host-assisted migration tests are "
"disabled.")
elif (not force_host_assisted and
not CONF.share.run_driver_assisted_migration_tests):
raise self.skipException("Driver-assisted migration tests are "
"disabled.")
if self.protocol != "nfs":
raise self.skipException("Only NFS protocol supported "
@ -300,9 +317,6 @@ class ShareBasicOpsBase(manager.ShareScenarioTest):
ssh_client = self.init_ssh(instance)
self.provide_access_to_auxiliary_instance(instance)
if utils.is_microversion_lt(CONF.share.max_api_microversion, "2.9"):
exports = self.share['export_locations']
else:
exports = self.shares_v2_client.list_share_export_locations(
self.share['id'])
self.assertNotEmpty(exports)
@ -330,18 +344,26 @@ class ShareBasicOpsBase(manager.ShareScenarioTest):
ssh_client.exec_command("sudo chmod -R 555 /mnt/f3")
ssh_client.exec_command("sudo chmod -R 777 /mnt/f4")
self.umount_share(ssh_client)
task_state = (constants.TASK_STATE_DATA_COPYING_COMPLETED,
constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE)
task_state = (constants.TASK_STATE_DATA_COPYING_COMPLETED
if force_host_assisted
else constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE)
self.share = self.migrate_share(
self.share['id'], dest_pool, task_state)
self.share['id'], dest_pool, task_state, force_host_assisted)
read_only = False
if force_host_assisted:
try:
ssh_client.exec_command(
"dd if=/dev/zero of=/mnt/f1/1m6.bin bs=1M count=1")
except Exception:
read_only = True
self.assertTrue(read_only)
self.umount_share(ssh_client)
self.share = self.migration_complete(self.share['id'], dest_pool)
if utils.is_microversion_lt(CONF.share.max_api_microversion, "2.9"):
new_exports = self.share['export_locations']
self.assertNotEmpty(new_exports)
else:
new_exports = self.shares_v2_client.list_share_export_locations(
self.share['id'])
self.assertNotEmpty(new_exports)

View File

@ -0,0 +1,26 @@
---
prelude: >
The share migration feature was improved to support
migrating snapshots where possible and provide a more
deterministic user experience.
features:
- Added 'preserve_snapshots' parameter to share migration API.
upgrade:
- All share migration driver-assisted API parameters are
now mandatory.
- Improvements to the share migration API have been qualified
with the driver assisted migration support that exists in the
ZFSOnLinux driver. However, this driver does not currently
support preserving snapshots on migration.
- Snapshot restriction in share migration API has been
changed to return error only when parameter
force-host-assisted-migration is True.
deprecations:
- Support for the experimental share migration APIs has been
dropped for API microversions prior to 2.29.
fixes:
- Added check to validate that host assisted migration cannot
be forced while specifying driver assisted migration options.
- The share migration API can only be invoked when at least one
parameter within (host, share-network, share-type) is expected
to be changed.