Fix Share Migration improper behavior for drivers

Tempest tests were not appropriate for driver-assisted migration,
so this was fixed.

Also, improved docstrings and fixed workflow for drivers when
implementing 2-phase migration to be accurate with tempest and
handle AZs, which were previously locked to the source share's
AZ.

Driver-assisted migration now creates an additional
share instance to better handle and support driver methods.

Updated allow_access and deny_access APIs to allow users to mount
migrating shares before issuing 'migration-complete'.

APIImpact

Closes-bug: #1594922
Change-Id: If4bfaf7e9d963b83c13a6fea241c2eda14f7f409
This commit is contained in:
Rodrigo Barbieri 2016-08-24 22:01:31 -03:00
parent 0ef0da0137
commit c7fe51e79b
24 changed files with 2074 additions and 721 deletions

View File

@ -96,11 +96,9 @@ class ViewBuilder(common.ViewBuilder):
return {'share': share_dict} return {'share': share_dict}
def migration_get_progress(self, progress): def migration_get_progress(self, progress):
result = {
'total_progress': progress['total_progress'], result = {'total_progress': progress['total_progress']}
'current_file_path': progress['current_file_path'],
'current_file_progress': progress['current_file_progress']
}
return result return result
@common.ViewBuilder.versioned_method("2.2") @common.ViewBuilder.versioned_method("2.2")

View File

@ -73,6 +73,8 @@ class DataManager(manager.Manager):
'dest_instance_id': dest_share_instance_id}) 'dest_instance_id': dest_share_instance_id})
share_ref = self.db.share_get(context, share_id) share_ref = self.db.share_get(context, share_id)
share_instance_ref = self.db.share_instance_get(
context, share_instance_id, with_share_data=True)
share_rpcapi = share_rpc.ShareAPI() share_rpcapi = share_rpc.ShareAPI()
@ -90,7 +92,7 @@ class DataManager(manager.Manager):
migration_info_dest) migration_info_dest)
except exception.ShareDataCopyCancelled: except exception.ShareDataCopyCancelled:
share_rpcapi.migration_complete( share_rpcapi.migration_complete(
context, share_ref, share_instance_id, dest_share_instance_id) context, share_instance_ref, dest_share_instance_id)
return return
except Exception: except Exception:
self.db.share_update( self.db.share_update(
@ -101,7 +103,7 @@ class DataManager(manager.Manager):
'dest': dest_share_instance_id} 'dest': dest_share_instance_id}
LOG.exception(msg) LOG.exception(msg)
share_rpcapi.migration_complete( share_rpcapi.migration_complete(
context, share_ref, share_instance_id, dest_share_instance_id) context, share_instance_ref, dest_share_instance_id)
raise exception.ShareDataCopyFailed(reason=msg) raise exception.ShareDataCopyFailed(reason=msg)
finally: finally:
self.busy_tasks_shares.pop(share_id, None) self.busy_tasks_shares.pop(share_id, None)
@ -121,7 +123,7 @@ class DataManager(manager.Manager):
'dest_instance_id': dest_share_instance_id}) 'dest_instance_id': dest_share_instance_id})
share_rpcapi.migration_complete( share_rpcapi.migration_complete(
context, share_ref, share_instance_id, dest_share_instance_id) context, share_instance_ref, dest_share_instance_id)
def data_copy_cancel(self, context, share_id): def data_copy_cancel(self, context, share_id):
LOG.info(_LI("Received request to cancel share migration " LOG.info(_LI("Received request to cancel share migration "

View File

@ -242,6 +242,10 @@ class InvalidShareServer(Invalid):
message = _("Share server %(share_server_id)s is not valid.") message = _("Share server %(share_server_id)s is not valid.")
class ShareMigrationError(ManilaException):
message = _("Error in share migration: %(reason)s")
class ShareMigrationFailed(ManilaException): class ShareMigrationFailed(ManilaException):
message = _("Share migration failed: %(reason)s") message = _("Share migration failed: %(reason)s")
@ -267,6 +271,11 @@ class ShareServerNotCreated(ManilaException):
message = _("Share server %(share_server_id)s failed on creation.") message = _("Share server %(share_server_id)s failed on creation.")
class ShareServerNotReady(ManilaException):
message = _("Share server %(share_server_id)s failed to reach '%(state)s' "
"within %(time)s seconds.")
class ServiceNotFound(NotFound): class ServiceNotFound(NotFound):
message = _("Service %(service_id)s could not be found.") message = _("Service %(service_id)s could not be found.")

View File

@ -159,9 +159,6 @@ class SchedulerManager(manager.Manager):
request_spec, request_spec,
filter_properties) filter_properties)
except exception.NoValidHost as ex:
with excutils.save_and_reraise_exception():
_migrate_share_set_error(self, context, ex, request_spec)
except Exception as ex: except Exception as ex:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
_migrate_share_set_error(self, context, ex, request_spec) _migrate_share_set_error(self, context, ex, request_spec)
@ -169,7 +166,8 @@ class SchedulerManager(manager.Manager):
share_ref = db.share_get(context, share_id) share_ref = db.share_get(context, share_id)
try: try:
share_rpcapi.ShareAPI().migration_start( share_rpcapi.ShareAPI().migration_start(
context, share_ref, tgt_host, force_host_copy, notify) context, share_ref, tgt_host.host, force_host_copy,
notify)
except Exception as ex: except Exception as ex:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
_migrate_share_set_error(self, context, ex, request_spec) _migrate_share_set_error(self, context, ex, request_spec)

View File

@ -271,7 +271,7 @@ class API(base.Base):
policy.check_policy(context, 'share', 'create') policy.check_policy(context, 'share', 'create')
request_spec, share_instance = ( request_spec, share_instance = (
self._create_share_instance_and_get_request_spec( self.create_share_instance_and_get_request_spec(
context, share, availability_zone=availability_zone, context, share, availability_zone=availability_zone,
consistency_group=consistency_group, host=host, consistency_group=consistency_group, host=host,
share_network_id=share_network_id)) share_network_id=share_network_id))
@ -307,7 +307,7 @@ class API(base.Base):
return share_instance return share_instance
def _create_share_instance_and_get_request_spec( def create_share_instance_and_get_request_spec(
self, context, share, availability_zone=None, self, context, share, availability_zone=None,
consistency_group=None, host=None, share_network_id=None): consistency_group=None, host=None, share_network_id=None):
@ -393,7 +393,7 @@ class API(base.Base):
raise exception.ReplicationException(reason=msg % share['id']) raise exception.ReplicationException(reason=msg % share['id'])
request_spec, share_replica = ( request_spec, share_replica = (
self._create_share_instance_and_get_request_spec( self.create_share_instance_and_get_request_spec(
context, share, availability_zone=availability_zone, context, share, availability_zone=availability_zone,
share_network_id=share_network_id)) share_network_id=share_network_id))
@ -874,7 +874,7 @@ class API(base.Base):
return snapshot return snapshot
def migration_start(self, context, share, host, force_host_copy, def migration_start(self, context, share, dest_host, force_host_copy,
notify=True): notify=True):
"""Migrates share to a new host.""" """Migrates share to a new host."""
@ -899,10 +899,10 @@ class API(base.Base):
self._check_is_share_busy(share) self._check_is_share_busy(share)
# Make sure the destination host is different than the current one # Make sure the destination host is different than the current one
if host == share_instance['host']: if dest_host == share_instance['host']:
msg = _('Destination host %(dest_host)s must be different ' msg = _('Destination host %(dest_host)s must be different '
'than the current host %(src_host)s.') % { 'than the current host %(src_host)s.') % {
'dest_host': host, 'dest_host': dest_host,
'src_host': share_instance['host']} 'src_host': share_instance['host']}
raise exception.InvalidHost(reason=msg) raise exception.InvalidHost(reason=msg)
@ -912,8 +912,23 @@ class API(base.Base):
msg = _("Share %s must not have snapshots.") % share['id'] msg = _("Share %s must not have snapshots.") % share['id']
raise exception.InvalidShare(reason=msg) raise exception.InvalidShare(reason=msg)
dest_host_host = share_utils.extract_host(dest_host)
# Make sure the host is in the list of available hosts # Make sure the host is in the list of available hosts
utils.validate_service_host(context, share_utils.extract_host(host)) utils.validate_service_host(context, dest_host_host)
service = self.db.service_get_by_args(
context, dest_host_host, 'manila-share')
share_type = {}
share_type_id = share['share_type_id']
if share_type_id:
share_type = share_types.get_share_type(context, share_type_id)
request_spec = self._get_request_spec_dict(
share,
share_type,
availability_zone_id=service['availability_zone_id'])
# NOTE(ganso): there is the possibility of an error between here and # NOTE(ganso): there is the possibility of an error between here and
# manager code, which will cause the share to be stuck in # manager code, which will cause the share to be stuck in
@ -925,21 +940,14 @@ class API(base.Base):
context, share, context, share,
{'task_state': constants.TASK_STATE_MIGRATION_STARTING}) {'task_state': constants.TASK_STATE_MIGRATION_STARTING})
share_type = {}
share_type_id = share['share_type_id']
if share_type_id:
share_type = share_types.get_share_type(context, share_type_id)
request_spec = self._get_request_spec_dict(share, share_type)
try: try:
self.scheduler_rpcapi.migrate_share_to_host(context, share['id'], self.scheduler_rpcapi.migrate_share_to_host(
host, force_host_copy, context, share['id'], dest_host, force_host_copy, notify,
notify, request_spec) request_spec)
except Exception: except Exception:
msg = _('Destination host %(dest_host)s did not pass validation ' msg = _('Destination host %(dest_host)s did not pass validation '
'for migration of share %(share)s.') % { 'for migration of share %(share)s.') % {
'dest_host': host, 'dest_host': dest_host,
'share': share['id']} 'share': share['id']}
raise exception.InvalidHost(reason=msg) raise exception.InvalidHost(reason=msg)
@ -948,64 +956,150 @@ class API(base.Base):
if share['task_state'] not in ( if share['task_state'] not in (
constants.TASK_STATE_DATA_COPYING_COMPLETED, constants.TASK_STATE_DATA_COPYING_COMPLETED,
constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE): constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE):
msg = _("First migration phase of share %s not completed" msg = self._migration_validate_error_message(share)
" yet.") % share['id'] if msg is None:
msg = _("First migration phase of share %s not completed"
" yet.") % share['id']
LOG.error(msg) LOG.error(msg)
raise exception.InvalidShare(reason=msg) raise exception.InvalidShare(reason=msg)
share_instance_id, new_share_instance_id = (
self.get_migrating_instances(share))
share_instance_ref = self.db.share_instance_get(
context, share_instance_id, with_share_data=True)
self.share_rpcapi.migration_complete(context, share_instance_ref,
new_share_instance_id)
def get_migrating_instances(self, share):
share_instance_id = None share_instance_id = None
new_share_instance_id = None new_share_instance_id = None
if share['task_state'] == ( for instance in share.instances:
constants.TASK_STATE_DATA_COPYING_COMPLETED): if instance['status'] == constants.STATUS_MIGRATING:
share_instance_id = instance['id']
if instance['status'] == constants.STATUS_MIGRATING_TO:
new_share_instance_id = instance['id']
for instance in share.instances: if None in (share_instance_id, new_share_instance_id):
if instance['status'] == constants.STATUS_MIGRATING: msg = _("Share instances %(instance_id)s and "
share_instance_id = instance['id'] "%(new_instance_id)s in inconsistent states, cannot"
if instance['status'] == constants.STATUS_MIGRATING_TO: " continue share migration for share %(share_id)s"
new_share_instance_id = instance['id'] ".") % {'instance_id': share_instance_id,
'new_instance_id': new_share_instance_id,
'share_id': share['id']}
raise exception.ShareMigrationFailed(reason=msg)
if None in (share_instance_id, new_share_instance_id): return share_instance_id, new_share_instance_id
msg = _("Share instances %(instance_id)s and "
"%(new_instance_id)s in inconsistent states, cannot"
" continue share migration for share %(share_id)s"
".") % {'instance_id': share_instance_id,
'new_instance_id': new_share_instance_id,
'share_id': share['id']}
raise exception.ShareMigrationFailed(reason=msg)
share_rpc = share_rpcapi.ShareAPI()
share_rpc.migration_complete(context, share, share_instance_id,
new_share_instance_id)
def migration_get_progress(self, context, share): def migration_get_progress(self, context, share):
if share['task_state'] == ( if share['task_state'] == (
constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS): constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS):
share_rpc = share_rpcapi.ShareAPI() share_instance_id, migrating_instance_id = (
return share_rpc.migration_get_progress(context, share) self.get_migrating_instances(share))
share_instance_ref = self.db.share_instance_get(
context, share_instance_id, with_share_data=True)
service_host = share_utils.extract_host(share_instance_ref['host'])
service = self.db.service_get_by_args(
context, service_host, 'manila-share')
if utils.service_is_up(service):
try:
result = self.share_rpcapi.migration_get_progress(
context, share_instance_ref, migrating_instance_id)
except Exception:
msg = _("Failed to obtain migration progress of share "
"%s.") % share['id']
LOG.exception(msg)
raise exception.ShareMigrationError(reason=msg)
else:
result = None
elif share['task_state'] == ( elif share['task_state'] == (
constants.TASK_STATE_DATA_COPYING_IN_PROGRESS): constants.TASK_STATE_DATA_COPYING_IN_PROGRESS):
data_rpc = data_rpcapi.DataAPI() data_rpc = data_rpcapi.DataAPI()
LOG.info(_LI("Sending request to get share migration information" LOG.info(_LI("Sending request to get share migration information"
" of share %s.") % share['id']) " of share %s.") % share['id'])
return data_rpc.data_copy_get_progress(context, share['id'])
services = self.db.service_get_all_by_topic(context, 'manila-data')
if len(services) > 0 and utils.service_is_up(services[0]):
try:
result = data_rpc.data_copy_get_progress(
context, share['id'])
except Exception:
msg = _("Failed to obtain migration progress of share "
"%s.") % share['id']
LOG.exception(msg)
raise exception.ShareMigrationError(reason=msg)
else:
result = None
else: else:
msg = _("Migration of share %s data copy progress cannot be " result = None
"obtained at this moment.") % share['id']
if not (result and result.get('total_progress') is not None):
msg = self._migration_validate_error_message(share)
if msg is None:
msg = _("Migration progress of share %s cannot be obtained at "
"this moment.") % share['id']
LOG.error(msg) LOG.error(msg)
raise exception.InvalidShare(reason=msg) raise exception.InvalidShare(reason=msg)
return result
def _migration_validate_error_message(self, share):
task_state = share['task_state']
if task_state == constants.TASK_STATE_MIGRATION_SUCCESS:
msg = _("Migration of share %s has already "
"completed.") % share['id']
elif task_state in (None, constants.TASK_STATE_MIGRATION_ERROR):
msg = _("There is no migration being performed for share %s "
"at this moment.") % share['id']
elif task_state == constants.TASK_STATE_MIGRATION_CANCELLED:
msg = _("Migration of share %s was already "
"cancelled.") % share['id']
elif task_state in (constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE,
constants.TASK_STATE_DATA_COPYING_COMPLETED):
msg = _("Migration of share %s has already completed first "
"phase.") % share['id']
else:
return None
return msg
def migration_cancel(self, context, share): def migration_cancel(self, context, share):
if share['task_state'] == ( migrating = True
if share['task_state'] in (
constants.TASK_STATE_DATA_COPYING_COMPLETED,
constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE,
constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS): constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS):
share_rpc = share_rpcapi.ShareAPI() share_instance_id, migrating_instance_id = (
share_rpc.migration_cancel(context, share) self.get_migrating_instances(share))
share_instance_ref = self.db.share_instance_get(
context, share_instance_id, with_share_data=True)
service_host = share_utils.extract_host(share_instance_ref['host'])
service = self.db.service_get_by_args(
context, service_host, 'manila-share')
if utils.service_is_up(service):
self.share_rpcapi.migration_cancel(
context, share_instance_ref, migrating_instance_id)
else:
migrating = False
elif share['task_state'] == ( elif share['task_state'] == (
constants.TASK_STATE_DATA_COPYING_IN_PROGRESS): constants.TASK_STATE_DATA_COPYING_IN_PROGRESS):
@ -1013,11 +1107,28 @@ class API(base.Base):
data_rpc = data_rpcapi.DataAPI() data_rpc = data_rpcapi.DataAPI()
LOG.info(_LI("Sending request to cancel migration of " LOG.info(_LI("Sending request to cancel migration of "
"share %s.") % share['id']) "share %s.") % share['id'])
data_rpc.data_copy_cancel(context, share['id'])
services = self.db.service_get_all_by_topic(context, 'manila-data')
if len(services) > 0 and utils.service_is_up(services[0]):
try:
data_rpc.data_copy_cancel(context, share['id'])
except Exception:
msg = _("Failed to cancel migration of share "
"%s.") % share['id']
LOG.exception(msg)
raise exception.ShareMigrationError(reason=msg)
else:
migrating = False
else: else:
msg = _("Data copy for migration of share %s cannot be cancelled" migrating = False
" at this moment.") % share['id']
if not migrating:
msg = self._migration_validate_error_message(share)
if msg is None:
msg = _("Migration of share %s cannot be cancelled at this "
"moment.") % share['id']
LOG.error(msg) LOG.error(msg)
raise exception.InvalidShare(reason=msg) raise exception.InvalidShare(reason=msg)
@ -1186,7 +1297,20 @@ class API(base.Base):
policy.check_policy(ctx, 'share', 'allow_access') policy.check_policy(ctx, 'share', 'allow_access')
share = self.db.share_get(ctx, share['id']) share = self.db.share_get(ctx, share['id'])
if share['status'] != constants.STATUS_AVAILABLE: if share['status'] != constants.STATUS_AVAILABLE:
msg = _("Share status must be %s") % constants.STATUS_AVAILABLE if not (share['status'] in (constants.STATUS_MIGRATING,
constants.STATUS_MIGRATING_TO) and
share['task_state'] in (
constants.TASK_STATE_DATA_COPYING_ERROR,
constants.TASK_STATE_MIGRATION_ERROR,
constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE,
constants.TASK_STATE_DATA_COPYING_COMPLETED)):
msg = _("Share status must be %(available)s, or %(migrating)s "
"while first phase of migration is completed.") % {
'available': constants.STATUS_AVAILABLE,
'migrating': constants.STATUS_MIGRATING
}
else:
msg = _("Share status must be %s") % constants.STATUS_AVAILABLE
raise exception.InvalidShare(reason=msg) raise exception.InvalidShare(reason=msg)
values = { values = {
'share_id': share['id'], 'share_id': share['id'],
@ -1258,7 +1382,20 @@ class API(base.Base):
msg = _("Share doesn't have any instances") msg = _("Share doesn't have any instances")
raise exception.InvalidShare(reason=msg) raise exception.InvalidShare(reason=msg)
if share['status'] != constants.STATUS_AVAILABLE: if share['status'] != constants.STATUS_AVAILABLE:
msg = _("Share status must be %s") % constants.STATUS_AVAILABLE if not (share['status'] in (constants.STATUS_MIGRATING,
constants.STATUS_MIGRATING_TO) and
share['task_state'] in (
constants.TASK_STATE_DATA_COPYING_ERROR,
constants.TASK_STATE_MIGRATION_ERROR,
constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE,
constants.TASK_STATE_DATA_COPYING_COMPLETED)):
msg = _("Share status must be %(available)s, or %(migrating)s "
"while first phase of migration is completed.") % {
'available': constants.STATUS_AVAILABLE,
'migrating': constants.STATUS_MIGRATING
}
else:
msg = _("Share status must be %s") % constants.STATUS_AVAILABLE
raise exception.InvalidShare(reason=msg) raise exception.InvalidShare(reason=msg)
for share_instance in share.instances: for share_instance in share.instances:

View File

@ -316,86 +316,156 @@ class ShareDriver(object):
{'actual': self.driver_handles_share_servers, {'actual': self.driver_handles_share_servers,
'allowed': driver_handles_share_servers}) 'allowed': driver_handles_share_servers})
def migration_start(self, context, share_ref, share_server, host, def migration_check_compatibility(
dest_driver_migration_info, notify): self, context, source_share, destination_share,
"""Is called to perform 1st phase of driver migration of a given share. share_server=None, destination_share_server=None):
"""Checks destination compatibility for migration of a given share.
.. note::
Is called to test compatibility with destination backend.
Based on destination_driver_migration_info, driver should check if it
is compatible with destination backend so optimized migration can
proceed.
:param context: The 'context.RequestContext' object for the request.
:param source_share: Reference to the share to be migrated.
:param destination_share: Reference to the share model to be used by
migrated share.
:param share_server: Share server model or None.
:param destination_share_server: Destination Share server model or
None.
:return: A dictionary containing values indicating if destination
backend is compatible and if share can remain writable during
migration.
Example::
{
'compatible': True,
'writable': True,
}
"""
return {
'compatible': False,
'writable': False,
}
def migration_start(
self, context, source_share, destination_share,
share_server=None, destination_share_server=None):
"""Starts migration of a given share to another host.
.. note::
Is called in source share's backend to start migration.
Driver should implement this method if willing to perform migration Driver should implement this method if willing to perform migration
in an optimized way, useful for when driver understands destination in an optimized way, useful for when source share's backend driver
backend. is compatible with destination backend driver. This method should
start the migration procedure in the backend and end. Following steps
should be done in 'migration_continue'.
:param context: The 'context.RequestContext' object for the request. :param context: The 'context.RequestContext' object for the request.
:param share_ref: Reference to the share being migrated. :param source_share: Reference to the original share model.
:param destination_share: Reference to the share model to be used by
migrated share.
:param share_server: Share server model or None. :param share_server: Share server model or None.
:param host: Destination host and its capabilities. :param destination_share_server: Destination Share server model or
:param dest_driver_migration_info: Migration information provided by None.
destination host.
:param notify: whether the migration should complete or wait for
2nd phase call. Driver may throw exception when validating this
parameter, exception if does not support 1-phase or 2-phase approach.
:returns: Boolean value indicating if driver migration succeeded.
:returns: Dictionary containing a model update with relevant data to
be updated after migration, such as export locations.
"""
return None, None
def migration_complete(self, context, share_ref, share_server,
dest_driver_migration_info):
"""Is called to perform 2nd phase of driver migration of a given share.
If driver is implementing 2-phase migration, this method should
perform tasks related to the 2nd phase of migration, thus completing
it.
:param context: The 'context.RequestContext' object for the request.
:param share_ref: Reference to the share being migrated.
:param share_server: Share server model or None.
:param dest_driver_migration_info: Migration information provided by
destination host.
:returns: Dictionary containing a model update with relevant data to
be updated after migration, such as export locations.
"""
return None
def migration_cancel(self, context, share_ref, share_server,
dest_driver_migration_info):
"""Is called to cancel driver migration.
If possible, driver can implement a way to cancel an in-progress
migration.
:param context: The 'context.RequestContext' object for the request.
:param share_ref: Reference to the share being migrated.
:param share_server: Share server model or None.
:param dest_driver_migration_info: Migration information provided by
destination host.
""" """
raise NotImplementedError() raise NotImplementedError()
def migration_get_progress(self, context, share_ref, share_server, def migration_continue(
dest_driver_migration_info): self, context, source_share, destination_share,
"""Is called to get migration progress. share_server=None, destination_share_server=None):
"""Continues migration of a given share to another host.
.. note::
Is called in source share's backend to continue migration.
Driver should implement this method to continue monitor the migration
progress in storage and perform following steps until 1st phase is
completed.
:param context: The 'context.RequestContext' object for the request.
:param source_share: Reference to the original share model.
:param destination_share: Reference to the share model to be used by
migrated share.
:param share_server: Share server model or None.
:param destination_share_server: Destination Share server model or
None.
:return: Boolean value to indicate if 1st phase is finished.
"""
raise NotImplementedError()
def migration_complete(
self, context, source_share, destination_share,
share_server=None, destination_share_server=None):
"""Completes migration of a given share to another host.
.. note::
Is called in source share's backend to complete migration.
If driver is implementing 2-phase migration, this method should
perform the disruptive tasks related to the 2nd phase of migration,
thus completing it. Driver should also delete all original share data
from source backend.
:param context: The 'context.RequestContext' object for the request.
:param source_share: Reference to the original share model.
:param destination_share: Reference to the share model to be used by
migrated share.
:param share_server: Share server model or None.
:param destination_share_server: Destination Share server model or
None.
:return: List of export locations to update the share with.
"""
raise NotImplementedError()
def migration_cancel(
self, context, source_share, destination_share,
share_server=None, destination_share_server=None):
"""Cancels migration of a given share to another host.
.. note::
Is called in source share's backend to cancel migration.
If possible, driver can implement a way to cancel an in-progress
migration.
:param context: The 'context.RequestContext' object for the request.
:param source_share: Reference to the original share model.
:param destination_share: Reference to the share model to be used by
migrated share.
:param share_server: Share server model or None.
:param destination_share_server: Destination Share server model or
None.
"""
raise NotImplementedError()
def migration_get_progress(
self, context, source_share, destination_share,
share_server=None, destination_share_server=None):
"""Obtains progress of migration of a given share to another host.
.. note::
Is called in source share's backend to obtain migration progress.
If possible, driver can implement a way to return migration progress If possible, driver can implement a way to return migration progress
information. information.
:param context: The 'context.RequestContext' object for the request. :param context: The 'context.RequestContext' object for the request.
:param share_ref: Reference to the share being migrated. :param source_share: Reference to the original share model.
:param destination_share: Reference to the share model to be used by
migrated share.
:param share_server: Share server model or None. :param share_server: Share server model or None.
:param dest_driver_migration_info: Migration information provided by :param destination_share_server: Destination Share server model or
destination host. None.
:return: A dictionary with 'total_progress' field containing the :return: A dictionary with at least 'total_progress' field containing
percentage value. the percentage value.
""" """
raise NotImplementedError() raise NotImplementedError()
def migration_get_driver_info(self, context, share, share_server): def migration_get_info(self, context, share, share_server=None):
"""Is called to provide necessary driver migration logic.
:param context: The 'context.RequestContext' object for the request.
:param share: Reference to the share being migrated.
:param share_server: Share server model or None.
:return: A dictionary with migration information.
"""
return None
def migration_get_info(self, context, share, share_server):
"""Is called to provide necessary generic migration logic. """Is called to provide necessary generic migration logic.
:param context: The 'context.RequestContext' object for the request. :param context: The 'context.RequestContext' object for the request.
@ -411,7 +481,7 @@ class ShareDriver(object):
return {'mount': mount_template, return {'mount': mount_template,
'unmount': unmount_template} 'unmount': unmount_template}
def _get_mount_command(self, context, share_instance, share_server): def _get_mount_command(self, context, share_instance, share_server=None):
"""Is called to delegate mounting share logic.""" """Is called to delegate mounting share logic."""
mount_template = self.configuration.safe_get('share_mount_template') mount_template = self.configuration.safe_get('share_mount_template')
@ -424,7 +494,7 @@ class ShareDriver(object):
return mount_template % format_template return mount_template % format_template
def _get_mount_export(self, share_instance, share_server): def _get_mount_export(self, share_instance, share_server=None):
# NOTE(ganso): If drivers want to override the export_location IP, # NOTE(ganso): If drivers want to override the export_location IP,
# they can do so using this configuration. This method can also be # they can do so using this configuration. This method can also be
# overridden if necessary. # overridden if necessary.
@ -434,7 +504,8 @@ class ShareDriver(object):
path = share_instance['export_locations'][0]['path'] path = share_instance['export_locations'][0]['path']
return path return path
def _get_unmount_command(self, context, share_instance, share_server): def _get_unmount_command(self, context, share_instance,
share_server=None):
return self.configuration.safe_get('share_unmount_template') return self.configuration.safe_get('share_unmount_template')
def create_share(self, context, share, share_server=None): def create_share(self, context, share, share_server=None):

View File

@ -22,6 +22,7 @@
import copy import copy
import datetime import datetime
import functools import functools
import time
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log from oslo_log import log
@ -44,6 +45,7 @@ from manila.i18n import _LW
from manila import manager from manila import manager
from manila import quota from manila import quota
from manila.share import access from manila.share import access
from manila.share import api
import manila.share.configuration import manila.share.configuration
from manila.share import drivers_private_data from manila.share import drivers_private_data
from manila.share import migration from manila.share import migration
@ -182,7 +184,7 @@ def add_hooks(f):
class ShareManager(manager.SchedulerDependentManager): class ShareManager(manager.SchedulerDependentManager):
"""Manages NAS storages.""" """Manages NAS storages."""
RPC_API_VERSION = '1.11' RPC_API_VERSION = '1.12'
def __init__(self, share_driver=None, service_name=None, *args, **kwargs): def __init__(self, share_driver=None, service_name=None, *args, **kwargs):
"""Load the driver from args, or from flags.""" """Load the driver from args, or from flags."""
@ -284,6 +286,14 @@ class ShareManager(manager.SchedulerDependentManager):
LOG.debug("Re-exporting %s shares", len(share_instances)) LOG.debug("Re-exporting %s shares", len(share_instances))
for share_instance in share_instances: for share_instance in share_instances:
share_ref = self.db.share_get(ctxt, share_instance['share_id']) share_ref = self.db.share_get(ctxt, share_instance['share_id'])
if (share_ref['task_state'] == (
constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS) and
share_instance['status'] == constants.STATUS_MIGRATING):
rpcapi = share_rpcapi.ShareAPI()
rpcapi.migration_driver_recovery(ctxt, share_ref, self.host)
continue
if share_ref.is_busy: if share_ref.is_busy:
LOG.info( LOG.info(
_LI("Share instance %(id)s: skipping export, " _LI("Share instance %(id)s: skipping export, "
@ -343,7 +353,8 @@ class ShareManager(manager.SchedulerDependentManager):
def _provide_share_server_for_share(self, context, share_network_id, def _provide_share_server_for_share(self, context, share_network_id,
share_instance, snapshot=None, share_instance, snapshot=None,
consistency_group=None): consistency_group=None,
create_on_backend=True):
"""Gets or creates share_server and updates share with its id. """Gets or creates share_server and updates share with its id.
Active share_server can be deleted if there are no dependent shares Active share_server can be deleted if there are no dependent shares
@ -362,6 +373,9 @@ class ShareManager(manager.SchedulerDependentManager):
share_network_id from provided snapshot. share_network_id from provided snapshot.
:param share_instance: Share Instance model :param share_instance: Share Instance model
:param snapshot: Optional -- Snapshot model :param snapshot: Optional -- Snapshot model
:param create_on_backend: Boolean. If True, driver will be asked to
create the share server if no share server
is available.
:returns: dict, dict -- first value is share_server, that :returns: dict, dict -- first value is share_server, that
has been chosen for share schedule. Second value is has been chosen for share schedule. Second value is
@ -461,20 +475,74 @@ class ShareManager(manager.SchedulerDependentManager):
{'share_server_id': compatible_share_server['id']}, {'share_server_id': compatible_share_server['id']},
with_share_data=True with_share_data=True
) )
if create_on_backend:
compatible_share_server = (
self._create_share_server_in_backend(
context, compatible_share_server))
if compatible_share_server['status'] == constants.STATUS_CREATING:
# Create share server on backend with data from db.
compatible_share_server = self._setup_server(
context, compatible_share_server)
LOG.info(_LI("Share server created successfully."))
else:
LOG.info(_LI("Used preexisting share server "
"'%(share_server_id)s'"),
{'share_server_id': compatible_share_server['id']})
return compatible_share_server, share_instance_ref return compatible_share_server, share_instance_ref
return _provide_share_server_for_share() return _provide_share_server_for_share()
def _create_share_server_in_backend(self, context, share_server):
if share_server['status'] == constants.STATUS_CREATING:
# Create share server on backend with data from db.
share_server = self._setup_server(context, share_server)
LOG.info(_LI("Share server created successfully."))
else:
LOG.info(_LI("Using preexisting share server: "
"'%(share_server_id)s'"),
{'share_server_id': share_server['id']})
return share_server
def create_share_server(self, context, share_server_id):
"""Invoked to create a share server in this backend.
This method is invoked to create the share server defined in the model
obtained by the supplied id.
:param context: The 'context.RequestContext' object for the request.
:param share_server_id: The id of the server to be created.
"""
share_server = self.db.share_server_get(context, share_server_id)
self._create_share_server_in_backend(context, share_server)
def provide_share_server(self, context, share_instance_id,
share_network_id, snapshot_id=None):
"""Invoked to provide a compatible share server.
This method is invoked to find a compatible share server among the
existing ones or create a share server database instance with the share
server properties that will be used to create the share server later.
:param context: The 'context.RequestContext' object for the request.
:param share_instance_id: The id of the share instance whose model
attributes will be used to provide the share server.
:param share_network_id: The id of the share network the share server
to be provided has to be related to.
:param snapshot_id: The id of the snapshot to be used to obtain the
share server if applicable.
:return: The id of the share server that is being provided.
"""
share_instance = self.db.share_instance_get(context, share_instance_id,
with_share_data=True)
snapshot_ref = None
if snapshot_id:
snapshot_ref = self.db.share_snapshot_get(context, snapshot_id)
consistency_group_ref = None
if share_instance.get('consistency_group_id'):
consistency_group_ref = self.db.consistency_group_get(
context, share_instance['consistency_group_id'])
share_server, share_instance = self._provide_share_server_for_share(
context, share_network_id, share_instance, snapshot_ref,
consistency_group_ref, create_on_backend=False)
return share_server['id']
def _provide_share_server_for_cg(self, context, share_network_id, def _provide_share_server_for_cg(self, context, share_network_id,
cg_ref, cgsnapshot=None): cg_ref, cgsnapshot=None):
"""Gets or creates share_server and updates share with its id. """Gets or creates share_server and updates share with its id.
@ -592,21 +660,187 @@ class ShareManager(manager.SchedulerDependentManager):
return self.driver.migration_get_info(context, share_instance, return self.driver.migration_get_info(context, share_instance,
share_server) share_server)
@utils.require_driver_initialized def _migration_start_driver(self, context, share_ref, src_share_instance,
def migration_get_driver_info(self, context, share_instance_id): dest_host, notify, new_az_id):
share_instance = self.db.share_instance_get(
context, share_instance_id, with_share_data=True)
share_server = None share_server = self._get_share_server(context, src_share_instance)
if share_instance.get('share_server_id'):
share_server = self.db.share_server_get(
context, share_instance['share_server_id'])
return self.driver.migration_get_driver_info(context, share_instance, share_api = api.API()
share_server)
request_spec, dest_share_instance = (
share_api.create_share_instance_and_get_request_spec(
context, share_ref, new_az_id, None, dest_host,
src_share_instance['share_network_id']))
self.db.share_instance_update(
context, dest_share_instance['id'],
{'status': constants.STATUS_MIGRATING_TO})
# refresh and obtain proxified properties
dest_share_instance = self.db.share_instance_get(
context, dest_share_instance['id'], with_share_data=True)
helper = migration.ShareMigrationHelper(context, self.db, share_ref)
try:
if dest_share_instance['share_network_id']:
rpcapi = share_rpcapi.ShareAPI()
# NOTE(ganso): Obtaining the share_server_id asynchronously so
# we can wait for it to be ready.
dest_share_server_id = rpcapi.provide_share_server(
context, dest_share_instance,
dest_share_instance['share_network_id'])
rpcapi.create_share_server(
context, dest_share_instance, dest_share_server_id)
dest_share_server = helper.wait_for_share_server(
dest_share_server_id)
else:
dest_share_server = None
compatibility = self.driver.migration_check_compatibility(
context, src_share_instance, dest_share_instance,
share_server, dest_share_server)
if not compatibility.get('compatible'):
msg = _("Destination host %(host)s is not compatible with "
"share %(share)s's source backend for driver-assisted "
"migration.") % {
'host': dest_host,
'share': share_ref['id'],
}
raise exception.ShareMigrationFailed(reason=msg)
if not compatibility.get('writable'):
readonly_support = self.driver.configuration.safe_get(
'migration_readonly_rules_support')
helper.change_to_read_only(src_share_instance, share_server,
readonly_support, self.driver)
LOG.debug("Initiating driver migration for share %s.",
share_ref['id'])
self.db.share_update(
context, share_ref['id'],
{'task_state': (
constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS)})
self.driver.migration_start(
context, src_share_instance, dest_share_instance,
share_server, dest_share_server)
# prevent invoking _migration_driver_continue immediately
time.sleep(5)
self._migration_driver_continue(
context, share_ref, src_share_instance, dest_share_instance,
share_server, dest_share_server, notify)
except Exception:
# NOTE(ganso): Cleaning up error'ed destination share instance from
# database. It is assumed that driver cleans up leftovers in
# backend when migration fails.
self._migration_delete_instance(context, dest_share_instance['id'])
# NOTE(ganso): For now source share instance should remain in
# migrating status for fallback migration.
msg = _("Driver optimized migration of share %s "
"failed.") % share_ref['id']
LOG.exception(msg)
raise exception.ShareMigrationFailed(reason=msg)
return True
def _migration_driver_continue(
self, context, share_ref, src_share_instance, dest_share_instance,
src_share_server, dest_share_server, notify=False):
finished = False
share_ref = self.db.share_get(context, share_ref['id'])
while (not finished and share_ref['task_state'] ==
constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS):
finished = self.driver.migration_continue(
context, src_share_instance, dest_share_instance,
src_share_server, dest_share_server)
time.sleep(5)
share_ref = self.db.share_get(context, share_ref['id'])
if finished:
self.db.share_update(
context, share_ref['id'],
{'task_state':
constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE})
if notify:
self._migration_complete_driver(
context, share_ref, src_share_instance,
dest_share_instance)
LOG.info(_LI("Share Migration for share %s"
" completed successfully."), share_ref['id'])
else:
LOG.info(_LI("Share Migration for share %s completed "
"first phase successfully."), share_ref['id'])
else:
if (share_ref['task_state'] ==
constants.TASK_STATE_MIGRATION_CANCELLED):
LOG.warning(_LW("Share Migration for share %s was cancelled."),
share_ref['id'])
else:
msg = (_("Share Migration for share %s did not complete "
"first phase successfully."), share_ref['id'])
raise exception.ShareMigrationFailed(reason=msg)
@utils.require_driver_initialized @utils.require_driver_initialized
def migration_start(self, context, share_id, host, force_host_copy, def migration_driver_recovery(self, context, share_id):
"""Resumes a migration after a service restart."""
share = self.db.share_get(context, share_id)
share_api = api.API()
src_share_instance_id, dest_share_instance_id = (
share_api.get_migrating_instances(share))
src_share_instance = self.db.share_instance_get(
context, src_share_instance_id, with_share_data=True)
dest_share_instance = self.db.share_instance_get(
context, dest_share_instance_id, with_share_data=True)
src_share_server = self._get_share_server(context, src_share_instance)
dest_share_server = self._get_share_server(
context, dest_share_instance)
try:
self._migration_driver_continue(
context, share, src_share_instance, dest_share_instance,
src_share_server, dest_share_server)
except Exception:
# NOTE(ganso): Cleaning up error'ed destination share instance from
# database. It is assumed that driver cleans up leftovers in
# backend when migration fails.
self._migration_delete_instance(context, dest_share_instance['id'])
self.db.share_instance_update(
context, src_share_instance['id'],
{'status': constants.STATUS_AVAILABLE})
self.db.share_update(
context, share['id'],
{'task_state': constants.TASK_STATE_MIGRATION_ERROR})
msg = _("Driver optimized migration of share %s "
"failed.") % share['id']
LOG.exception(msg)
raise exception.ShareMigrationFailed(reason=msg)
@utils.require_driver_initialized
def migration_start(self, context, share_id, dest_host, force_host_copy,
notify=True): notify=True):
"""Migrates a share from current host to another host.""" """Migrates a share from current host to another host."""
LOG.debug("Entered migration_start method for share %s.", share_id) LOG.debug("Entered migration_start method for share %s.", share_id)
@ -615,10 +849,14 @@ class ShareManager(manager.SchedulerDependentManager):
context, share_id, context, share_id,
{'task_state': constants.TASK_STATE_MIGRATION_IN_PROGRESS}) {'task_state': constants.TASK_STATE_MIGRATION_IN_PROGRESS})
rpcapi = share_rpcapi.ShareAPI()
share_ref = self.db.share_get(context, share_id) share_ref = self.db.share_get(context, share_id)
share_instance = self._get_share_instance(context, share_ref) share_instance = self._get_share_instance(context, share_ref)
moved = False success = False
host_value = share_utils.extract_host(dest_host)
service = self.db.service_get_by_args(
context, host_value, 'manila-share')
new_az_id = service['availability_zone_id']
self.db.share_instance_update(context, share_instance['id'], self.db.share_instance_update(context, share_instance['id'],
{'status': constants.STATUS_MIGRATING}) {'status': constants.STATUS_MIGRATING})
@ -626,49 +864,27 @@ class ShareManager(manager.SchedulerDependentManager):
if not force_host_copy: if not force_host_copy:
try: try:
dest_driver_migration_info = rpcapi.migration_get_driver_info( success = self._migration_start_driver(
context, share_instance) context, share_ref, share_instance, dest_host, notify,
new_az_id)
share_server = self._get_share_server(context.elevated(),
share_instance)
LOG.debug("Calling driver migration for share %s.", share_id)
self.db.share_update(
context, share_id,
{'task_state': (
constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS)})
moved, model_update = self.driver.migration_start(
context, share_instance, share_server, host,
dest_driver_migration_info, notify)
# NOTE(ganso): Here we are allowing the driver to perform
# changes even if it has not performed migration. While this
# scenario may not be valid, I do not think it should be
# forcefully prevented.
if model_update:
self.db.share_instance_update(
context, share_instance['id'], model_update)
except Exception as e: except Exception as e:
msg = six.text_type(e) if not isinstance(e, NotImplementedError):
LOG.exception(msg) LOG.exception(
LOG.warning(_LW("Driver did not migrate share %s. Proceeding " _LE("The driver could not migrate the share %(shr)s"),
"with generic migration approach.") % share_id) {'shr': share_id})
if not moved: if not success:
LOG.debug("Starting generic migration " LOG.info(_LI("Starting generic migration for share %s."), share_id)
"for share %s.", share_id)
self.db.share_update( self.db.share_update(
context, share_id, context, share_id,
{'task_state': constants.TASK_STATE_MIGRATION_IN_PROGRESS}) {'task_state': constants.TASK_STATE_MIGRATION_IN_PROGRESS})
try: try:
self._migration_start_generic(context, share_ref, self._migration_start_generic(
share_instance, host, notify) context, share_ref, share_instance, dest_host, notify,
new_az_id)
except Exception: except Exception:
msg = _("Generic migration failed for share %s.") % share_id msg = _("Generic migration failed for share %s.") % share_id
LOG.exception(msg) LOG.exception(msg)
@ -679,52 +895,36 @@ class ShareManager(manager.SchedulerDependentManager):
context, share_instance['id'], context, share_instance['id'],
{'status': constants.STATUS_AVAILABLE}) {'status': constants.STATUS_AVAILABLE})
raise exception.ShareMigrationFailed(reason=msg) raise exception.ShareMigrationFailed(reason=msg)
elif not notify:
self.db.share_update(
context, share_ref['id'],
{'task_state':
constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE})
else:
self.db.share_instance_update(
context, share_instance['id'],
{'status': constants.STATUS_AVAILABLE,
'host': host['host']})
self.db.share_update(
context, share_ref['id'],
{'task_state': constants.TASK_STATE_MIGRATION_SUCCESS})
LOG.info(_LI("Share Migration for share %s" def _migration_start_generic(self, context, share, src_share_instance,
" completed successfully."), share_ref['id']) dest_host, notify, new_az_id):
def _migration_start_generic(self, context, share, share_instance, host,
notify):
rpcapi = share_rpcapi.ShareAPI() rpcapi = share_rpcapi.ShareAPI()
helper = migration.ShareMigrationHelper(context, self.db, share) helper = migration.ShareMigrationHelper(context, self.db, share)
share_server = self._get_share_server(context.elevated(), share_server = self._get_share_server(context.elevated(),
share_instance) src_share_instance)
readonly_support = self.driver.configuration.safe_get( readonly_support = self.driver.configuration.safe_get(
'migration_readonly_rules_support') 'migration_readonly_rules_support')
helper.change_to_read_only(share_instance, share_server, helper.change_to_read_only(src_share_instance, share_server,
readonly_support, self.driver) readonly_support, self.driver)
try: try:
new_share_instance = helper.create_instance_and_wait( dest_share_instance = helper.create_instance_and_wait(
share, share_instance, host) share, src_share_instance, dest_host, new_az_id)
self.db.share_instance_update( self.db.share_instance_update(
context, new_share_instance['id'], context, dest_share_instance['id'],
{'status': constants.STATUS_MIGRATING_TO}) {'status': constants.STATUS_MIGRATING_TO})
except Exception: except Exception:
msg = _("Failed to create instance on destination " msg = _("Failed to create instance on destination "
"backend during migration of share %s.") % share['id'] "backend during migration of share %s.") % share['id']
LOG.exception(msg) LOG.exception(msg)
helper.cleanup_access_rules(share_instance, share_server, helper.cleanup_access_rules(src_share_instance, share_server,
self.driver) self.driver)
raise exception.ShareMigrationFailed(reason=msg) raise exception.ShareMigrationFailed(reason=msg)
@ -735,17 +935,17 @@ class ShareManager(manager.SchedulerDependentManager):
try: try:
src_migration_info = self.driver.migration_get_info( src_migration_info = self.driver.migration_get_info(
context, share_instance, share_server) context, src_share_instance, share_server)
dest_migration_info = rpcapi.migration_get_info( dest_migration_info = rpcapi.migration_get_info(
context, new_share_instance) context, dest_share_instance)
LOG.debug("Time to start copying in migration" LOG.debug("Time to start copying in migration"
" for share %s.", share['id']) " for share %s.", share['id'])
data_rpc.migration_start( data_rpc.migration_start(
context, share['id'], ignore_list, share_instance['id'], context, share['id'], ignore_list, src_share_instance['id'],
new_share_instance['id'], src_migration_info, dest_share_instance['id'], src_migration_info,
dest_migration_info, notify) dest_migration_info, notify)
except Exception: except Exception:
@ -753,77 +953,128 @@ class ShareManager(manager.SchedulerDependentManager):
" invoking Data Service for migration of " " invoking Data Service for migration of "
"share %s.") % share['id'] "share %s.") % share['id']
LOG.exception(msg) LOG.exception(msg)
helper.cleanup_new_instance(new_share_instance) helper.cleanup_new_instance(dest_share_instance)
helper.cleanup_access_rules(share_instance, share_server, helper.cleanup_access_rules(src_share_instance, share_server,
self.driver) self.driver)
raise exception.ShareMigrationFailed(reason=msg) raise exception.ShareMigrationFailed(reason=msg)
def _migration_complete_driver(
self, context, share_ref, src_share_instance, dest_share_instance):
share_server = self._get_share_server(context, src_share_instance)
dest_share_server = self._get_share_server(
context, dest_share_instance)
export_locations = self.driver.migration_complete(
context, src_share_instance, dest_share_instance,
share_server, dest_share_server)
if export_locations:
self.db.share_export_locations_update(
context, dest_share_instance['id'], export_locations)
helper = migration.ShareMigrationHelper(context, self.db, share_ref)
helper.apply_new_access_rules(dest_share_instance)
self.db.share_instance_update(
context, dest_share_instance['id'],
{'status': constants.STATUS_AVAILABLE})
self._migration_delete_instance(context, src_share_instance['id'])
self.db.share_update(
context, dest_share_instance['share_id'],
{'task_state': constants.TASK_STATE_MIGRATION_SUCCESS})
def _migration_delete_instance(self, context, instance_id):
share_instance = self.db.share_instance_get(
context, instance_id, with_share_data=True)
self.db.share_instance_update(
context, instance_id, {'status': constants.STATUS_INACTIVE})
rules = self.db.share_access_get_all_for_instance(
context, instance_id)
for rule in rules:
access_mapping = self.db.share_instance_access_get(
context, rule['id'], instance_id)
self.db.share_instance_access_delete(
context, access_mapping['id'])
self.db.share_instance_delete(context, instance_id)
LOG.info(_LI("Share instance %s: deleted successfully."),
instance_id)
self._check_delete_share_server(context, share_instance)
@utils.require_driver_initialized @utils.require_driver_initialized
def migration_complete(self, context, share_id, share_instance_id, def migration_complete(self, context, src_instance_id, dest_instance_id):
new_share_instance_id):
src_share_instance = self.db.share_instance_get(
context, src_instance_id, with_share_data=True)
dest_share_instance = self.db.share_instance_get(
context, dest_instance_id, with_share_data=True)
share_ref = self.db.share_get(context, src_share_instance['share_id'])
LOG.info(_LI("Received request to finish Share Migration for " LOG.info(_LI("Received request to finish Share Migration for "
"share %s."), share_id) "share %s."), share_ref['id'])
share_ref = self.db.share_get(context, share_id)
if share_ref['task_state'] == ( if share_ref['task_state'] == (
constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE): constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE):
rpcapi = share_rpcapi.ShareAPI()
share_instance = self._get_share_instance(context, share_ref)
share_server = self._get_share_server(context, share_instance)
try: try:
dest_driver_migration_info = rpcapi.migration_get_driver_info( self._migration_complete_driver(
context, share_instance) context, share_ref, src_share_instance,
dest_share_instance)
model_update = self.driver.migration_complete(
context, share_instance, share_server,
dest_driver_migration_info)
if model_update:
self.db.share_instance_update(
context, share_instance['id'], model_update)
self.db.share_update(
context, share_id,
{'task_state': constants.TASK_STATE_MIGRATION_SUCCESS})
except Exception: except Exception:
msg = _("Driver migration completion failed for" msg = _("Driver migration completion failed for"
" share %s.") % share_id " share %s.") % share_ref['id']
LOG.exception(msg) LOG.exception(msg)
self.db.share_instance_update(
context, src_instance_id,
{'status': constants.STATUS_AVAILABLE})
self.db.share_instance_update(
context, dest_instance_id,
{'status': constants.STATUS_ERROR})
self.db.share_update( self.db.share_update(
context, share_id, context, share_ref['id'],
{'task_state': constants.TASK_STATE_MIGRATION_ERROR}) {'task_state': constants.TASK_STATE_MIGRATION_ERROR})
raise exception.ShareMigrationFailed(reason=msg) raise exception.ShareMigrationFailed(reason=msg)
else: else:
try: try:
self._migration_complete( self._migration_complete_generic(
context, share_ref, share_instance_id, context, share_ref, src_instance_id,
new_share_instance_id) dest_instance_id)
except Exception: except Exception:
msg = _("Generic migration completion failed for" msg = _("Generic migration completion failed for"
" share %s.") % share_id " share %s.") % share_ref['id']
LOG.exception(msg) LOG.exception(msg)
self.db.share_update( self.db.share_update(
context, share_id, context, share_ref['id'],
{'task_state': constants.TASK_STATE_MIGRATION_ERROR}) {'task_state': constants.TASK_STATE_MIGRATION_ERROR})
self.db.share_instance_update( self.db.share_instance_update(
context, share_instance_id, context, src_instance_id,
{'status': constants.STATUS_AVAILABLE}) {'status': constants.STATUS_AVAILABLE})
raise exception.ShareMigrationFailed(reason=msg) raise exception.ShareMigrationFailed(reason=msg)
def _migration_complete(self, context, share_ref, share_instance_id, LOG.info(_LI("Share Migration for share %s"
new_share_instance_id): " completed successfully."), share_ref['id'])
share_instance = self.db.share_instance_get( def _migration_complete_generic(self, context, share_ref,
context, share_instance_id, with_share_data=True) src_instance_id, dest_instance_id):
new_share_instance = self.db.share_instance_get(
context, new_share_instance_id, with_share_data=True)
share_server = self._get_share_server(context, share_instance) src_share_instance = self.db.share_instance_get(
context, src_instance_id, with_share_data=True)
dest_share_instance = self.db.share_instance_get(
context, dest_instance_id, with_share_data=True)
share_server = self._get_share_server(context, src_share_instance)
helper = migration.ShareMigrationHelper(context, self.db, share_ref) helper = migration.ShareMigrationHelper(context, self.db, share_ref)
@ -833,13 +1084,13 @@ class ShareManager(manager.SchedulerDependentManager):
msg = _("Data copy of generic migration for share %s has not " msg = _("Data copy of generic migration for share %s has not "
"completed successfully.") % share_ref['id'] "completed successfully.") % share_ref['id']
LOG.warning(msg) LOG.warning(msg)
helper.cleanup_new_instance(new_share_instance) helper.cleanup_new_instance(dest_share_instance)
helper.cleanup_access_rules(share_instance, share_server, helper.cleanup_access_rules(src_share_instance, share_server,
self.driver) self.driver)
if task_state == constants.TASK_STATE_DATA_COPYING_CANCELLED: if task_state == constants.TASK_STATE_DATA_COPYING_CANCELLED:
self.db.share_instance_update( self.db.share_instance_update(
context, share_instance_id, context, src_instance_id,
{'status': constants.STATUS_AVAILABLE}) {'status': constants.STATUS_AVAILABLE})
self.db.share_update( self.db.share_update(
context, share_ref['id'], context, share_ref['id'],
@ -858,13 +1109,13 @@ class ShareManager(manager.SchedulerDependentManager):
raise exception.ShareMigrationFailed(reason=msg) raise exception.ShareMigrationFailed(reason=msg)
try: try:
helper.apply_new_access_rules(new_share_instance) helper.apply_new_access_rules(dest_share_instance)
except Exception: except Exception:
msg = _("Failed to apply new access rules during migration " msg = _("Failed to apply new access rules during migration "
"of share %s.") % share_ref['id'] "of share %s.") % share_ref['id']
LOG.exception(msg) LOG.exception(msg)
helper.cleanup_new_instance(new_share_instance) helper.cleanup_new_instance(dest_share_instance)
helper.cleanup_access_rules(share_instance, share_server, helper.cleanup_access_rules(src_share_instance, share_server,
self.driver) self.driver)
raise exception.ShareMigrationFailed(reason=msg) raise exception.ShareMigrationFailed(reason=msg)
@ -872,75 +1123,107 @@ class ShareManager(manager.SchedulerDependentManager):
context, share_ref['id'], context, share_ref['id'],
{'task_state': constants.TASK_STATE_MIGRATION_COMPLETING}) {'task_state': constants.TASK_STATE_MIGRATION_COMPLETING})
self.db.share_instance_update(context, new_share_instance_id, self.db.share_instance_update(context, dest_instance_id,
{'status': constants.STATUS_AVAILABLE}) {'status': constants.STATUS_AVAILABLE})
self.db.share_instance_update(context, share_instance_id, self.db.share_instance_update(context, src_instance_id,
{'status': constants.STATUS_INACTIVE}) {'status': constants.STATUS_INACTIVE})
helper.delete_instance_and_wait(share_instance) helper.delete_instance_and_wait(src_share_instance)
self._check_delete_share_server(context, src_share_instance)
self.db.share_update( self.db.share_update(
context, share_ref['id'], context, share_ref['id'],
{'task_state': constants.TASK_STATE_MIGRATION_SUCCESS}) {'task_state': constants.TASK_STATE_MIGRATION_SUCCESS})
LOG.info(_LI("Share Migration for share %s"
" completed successfully."), share_ref['id'])
@utils.require_driver_initialized @utils.require_driver_initialized
def migration_cancel(self, context, share_id): def migration_cancel(self, context, src_instance_id, dest_instance_id):
share_ref = self.db.share_get(context, share_id) src_share_instance = self.db.share_instance_get(
context, src_instance_id, with_share_data=True)
dest_share_instance = self.db.share_instance_get(
context, dest_instance_id, with_share_data=True)
# Confirm that it is driver migration scenario share_ref = self.db.share_get(context, src_share_instance['share_id'])
if share_ref['task_state'] == (
if share_ref['task_state'] not in (
constants.TASK_STATE_DATA_COPYING_COMPLETED,
constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE,
constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS): constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS):
msg = _("Migration of share %s cannot be cancelled at this "
"moment.") % share_ref['id']
raise exception.InvalidShare(reason=msg)
share_server = None share_server = self._get_share_server(context, src_share_instance)
if share_ref.instance.get('share_server_id'):
share_server = self.db.share_server_get(
context, share_ref.instance['share_server_id'])
share_rpc = share_rpcapi.ShareAPI() dest_share_server = self._get_share_server(
context, dest_share_instance)
driver_migration_info = share_rpc.migration_get_driver_info( if share_ref['task_state'] == (
context, share_ref.instance) constants.TASK_STATE_DATA_COPYING_COMPLETED):
helper = migration.ShareMigrationHelper(
context, self.db, share_ref)
self.db.share_instance_update(
context, dest_share_instance['id'],
{'status': constants.STATUS_INACTIVE})
helper.cleanup_new_instance(dest_share_instance)
helper.cleanup_access_rules(src_share_instance, share_server,
self.driver)
else:
self.driver.migration_cancel( self.driver.migration_cancel(
context, share_ref.instance, share_server, context, src_share_instance, dest_share_instance,
driver_migration_info) share_server, dest_share_server)
else:
msg = _("Driver is not performing migration for" self._migration_delete_instance(context, dest_share_instance['id'])
" share %s") % share_id
raise exception.InvalidShare(reason=msg) self.db.share_update(
context, share_ref['id'],
{'task_state': constants.TASK_STATE_MIGRATION_CANCELLED})
self.db.share_instance_update(
context, src_share_instance['id'],
{'status': constants.STATUS_AVAILABLE})
LOG.info(_LI("Share Migration for share %s"
" was cancelled."), share_ref['id'])
@utils.require_driver_initialized @utils.require_driver_initialized
def migration_get_progress(self, context, share_id): def migration_get_progress(self, context, src_instance_id,
dest_instance_id):
share_ref = self.db.share_get(context, share_id) src_share_instance = self.db.share_instance_get(
context, src_instance_id, with_share_data=True)
dest_share_instance = self.db.share_instance_get(
context, dest_instance_id, with_share_data=True)
share_ref = self.db.share_get(context, src_share_instance['share_id'])
# Confirm that it is driver migration scenario # Confirm that it is driver migration scenario
if share_ref['task_state'] == ( if share_ref['task_state'] != (
constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS): constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS):
share_server = None
if share_ref.instance.get('share_server_id'):
share_server = self.db.share_server_get(
context, share_ref.instance['share_server_id'])
share_rpc = share_rpcapi.ShareAPI()
driver_migration_info = share_rpc.migration_get_driver_info(
context, share_ref.instance)
return self.driver.migration_get_progress(
context, share_ref.instance, share_server,
driver_migration_info)
else:
msg = _("Driver is not performing migration for" msg = _("Driver is not performing migration for"
" share %s") % share_id " share %s at this moment.") % share_ref['id']
raise exception.InvalidShare(reason=msg) raise exception.InvalidShare(reason=msg)
share_server = None
if share_ref.instance.get('share_server_id'):
share_server = self.db.share_server_get(
context, src_share_instance['share_server_id'])
dest_share_server = None
if dest_share_instance.get('share_server_id'):
dest_share_server = self.db.share_server_get(
context, dest_share_instance['share_server_id'])
return self.driver.migration_get_progress(
context, src_share_instance, dest_share_instance,
share_server, dest_share_server)
def _get_share_instance(self, context, share): def _get_share_instance(self, context, share):
if isinstance(share, six.string_types): if isinstance(share, six.string_types):
id = share id = share
@ -1879,6 +2162,10 @@ class ShareManager(manager.SchedulerDependentManager):
LOG.info(_LI("Share instance %s: deleted successfully."), LOG.info(_LI("Share instance %s: deleted successfully."),
share_instance_id) share_instance_id)
self._check_delete_share_server(context, share_instance)
def _check_delete_share_server(self, context, share_instance):
if CONF.delete_share_server_with_last_share: if CONF.delete_share_server_with_last_share:
share_server = self._get_share_server(context, share_instance) share_server = self._get_share_server(context, share_instance)
if share_server and len(share_server.share_instances) == 0: if share_server and len(share_server.share_instances) == 0:

View File

@ -84,11 +84,12 @@ class ShareMigrationHelper(object):
else: else:
time.sleep(tries ** 2) time.sleep(tries ** 2)
def create_instance_and_wait(self, share, share_instance, host): def create_instance_and_wait(
self, share, share_instance, dest_host, new_az_id):
new_share_instance = self.api.create_instance( new_share_instance = self.api.create_instance(
self.context, share, share_instance['share_network_id'], self.context, share, share_instance['share_network_id'],
host['host']) dest_host, new_az_id)
# Wait for new_share_instance to become ready # Wait for new_share_instance to become ready
starttime = time.time() starttime = time.time()
@ -103,14 +104,14 @@ class ShareMigrationHelper(object):
msg = _("Failed to create new share instance" msg = _("Failed to create new share instance"
" (from %(share_id)s) on " " (from %(share_id)s) on "
"destination host %(host_name)s") % { "destination host %(host_name)s") % {
'share_id': share['id'], 'host_name': host['host']} 'share_id': share['id'], 'host_name': dest_host}
self.cleanup_new_instance(new_share_instance) self.cleanup_new_instance(new_share_instance)
raise exception.ShareMigrationFailed(reason=msg) raise exception.ShareMigrationFailed(reason=msg)
elif now > deadline: elif now > deadline:
msg = _("Timeout creating new share instance " msg = _("Timeout creating new share instance "
"(from %(share_id)s) on " "(from %(share_id)s) on "
"destination host %(host_name)s") % { "destination host %(host_name)s") % {
'share_id': share['id'], 'host_name': host['host']} 'share_id': share['id'], 'host_name': dest_host}
self.cleanup_new_instance(new_share_instance) self.cleanup_new_instance(new_share_instance)
raise exception.ShareMigrationFailed(reason=msg) raise exception.ShareMigrationFailed(reason=msg)
else: else:
@ -199,3 +200,16 @@ class ShareMigrationHelper(object):
utils.wait_for_access_update( utils.wait_for_access_update(
self.context, self.db, new_share_instance, self.context, self.db, new_share_instance,
self.migration_wait_access_rules_timeout) self.migration_wait_access_rules_timeout)
@utils.retry(exception.ShareServerNotReady, retries=8)
def wait_for_share_server(self, share_server_id):
share_server = self.db.share_server_get(self.context, share_server_id)
if share_server['status'] == constants.STATUS_ERROR:
raise exception.ShareServerNotCreated(
share_server_id=share_server_id)
elif share_server['status'] == constants.STATUS_ACTIVE:
return share_server
else:
raise exception.ShareServerNotReady(
share_server_id=share_server_id, time=511,
state=constants.STATUS_AVAILABLE)

View File

@ -59,6 +59,10 @@ class ShareAPI(object):
migration_get_driver_info() migration_get_driver_info()
1.11 - Add create_replicated_snapshot() and 1.11 - Add create_replicated_snapshot() and
delete_replicated_snapshot() methods delete_replicated_snapshot() methods
1.12 - Add provide_share_server(), create_share_server() and
migration_driver_recovery(), remove migration_get_driver_info(),
update migration_cancel(), migration_complete() and
migration_get_progress method signature
""" """
BASE_RPC_API_VERSION = '1.0' BASE_RPC_API_VERSION = '1.0'
@ -67,7 +71,7 @@ class ShareAPI(object):
super(ShareAPI, self).__init__() super(ShareAPI, self).__init__()
target = messaging.Target(topic=CONF.share_topic, target = messaging.Target(topic=CONF.share_topic,
version=self.BASE_RPC_API_VERSION) version=self.BASE_RPC_API_VERSION)
self.client = rpc.get_client(target, version_cap='1.11') self.client = rpc.get_client(target, version_cap='1.12')
def create_share_instance(self, context, share_instance, host, def create_share_instance(self, context, share_instance, host,
request_spec, filter_properties, request_spec, filter_properties,
@ -123,15 +127,19 @@ class ShareAPI(object):
notify): notify):
new_host = utils.extract_host(share['instance']['host']) new_host = utils.extract_host(share['instance']['host'])
call_context = self.client.prepare(server=new_host, version='1.6') call_context = self.client.prepare(server=new_host, version='1.6')
host_p = {'host': dest_host.host,
'capabilities': dest_host.capabilities}
call_context.cast(context, call_context.cast(context,
'migration_start', 'migration_start',
share_id=share['id'], share_id=share['id'],
host=host_p, dest_host=dest_host,
force_host_copy=force_host_copy, force_host_copy=force_host_copy,
notify=notify) notify=notify)
def migration_driver_recovery(self, context, share, host):
call_context = self.client.prepare(server=host, version='1.12')
call_context.cast(context,
'migration_driver_recovery',
share_id=share['id'])
def migration_get_info(self, context, share_instance): def migration_get_info(self, context, share_instance):
new_host = utils.extract_host(share_instance['host']) new_host = utils.extract_host(share_instance['host'])
call_context = self.client.prepare(server=new_host, version='1.6') call_context = self.client.prepare(server=new_host, version='1.6')
@ -139,13 +147,6 @@ class ShareAPI(object):
'migration_get_info', 'migration_get_info',
share_instance_id=share_instance['id']) share_instance_id=share_instance['id'])
def migration_get_driver_info(self, context, share_instance):
new_host = utils.extract_host(share_instance['host'])
call_context = self.client.prepare(server=new_host, version='1.6')
return call_context.call(context,
'migration_get_driver_info',
share_instance_id=share_instance['id'])
def delete_share_server(self, context, share_server): def delete_share_server(self, context, share_server):
host = utils.extract_host(share_server['host']) host = utils.extract_host(share_server['host'])
call_context = self.client.prepare(server=host, version='1.0') call_context = self.client.prepare(server=host, version='1.0')
@ -296,24 +297,45 @@ class ShareAPI(object):
share_replica_id=share_replica['id'], share_replica_id=share_replica['id'],
share_id=share_replica['share_id']) share_id=share_replica['share_id'])
def migration_complete(self, context, share, share_instance_id, def migration_complete(self, context, src_share_instance,
new_share_instance_id): dest_instance_id):
new_host = utils.extract_host(share['host']) new_host = utils.extract_host(src_share_instance['host'])
call_context = self.client.prepare(server=new_host, version='1.10') call_context = self.client.prepare(server=new_host, version='1.12')
call_context.cast(context, call_context.cast(context,
'migration_complete', 'migration_complete',
share_id=share['id'], src_instance_id=src_share_instance['id'],
share_instance_id=share_instance_id, dest_instance_id=dest_instance_id)
new_share_instance_id=new_share_instance_id)
def migration_cancel(self, context, share): def migration_cancel(self, context, src_share_instance, dest_instance_id):
new_host = utils.extract_host(share['host']) new_host = utils.extract_host(src_share_instance['host'])
call_context = self.client.prepare(server=new_host, version='1.10') call_context = self.client.prepare(server=new_host, version='1.12')
call_context.call(context, 'migration_cancel', share_id=share['id']) call_context.cast(context,
'migration_cancel',
src_instance_id=src_share_instance['id'],
dest_instance_id=dest_instance_id)
def migration_get_progress(self, context, share): def migration_get_progress(self, context, src_share_instance,
new_host = utils.extract_host(share['host']) dest_instance_id):
call_context = self.client.prepare(server=new_host, version='1.10') new_host = utils.extract_host(src_share_instance['host'])
call_context = self.client.prepare(server=new_host, version='1.12')
return call_context.call(context, return call_context.call(context,
'migration_get_progress', 'migration_get_progress',
share_id=share['id']) src_instance_id=src_share_instance['id'],
dest_instance_id=dest_instance_id)
def provide_share_server(self, context, share_instance, share_network_id,
snapshot_id=None):
new_host = utils.extract_host(share_instance['host'])
call_context = self.client.prepare(server=new_host, version='1.12')
return call_context.call(context,
'provide_share_server',
share_instance_id=share_instance['id'],
share_network_id=share_network_id,
snapshot_id=snapshot_id)
def create_share_server(self, context, share_instance, share_server_id):
new_host = utils.extract_host(share_instance['host'])
call_context = self.client.prepare(server=new_host, version='1.12')
call_context.cast(context,
'create_share_server',
share_server_id=share_server_id)

View File

@ -599,10 +599,7 @@ class ShareAPITest(test.TestCase):
req.api_version_request.experimental = True req.api_version_request.experimental = True
body = {'migration_get_progress': None} body = {'migration_get_progress': None}
expected = {'total_progress': 'fake', expected = {'total_progress': 'fake'}
'current_file_progress': 'fake',
'current_file_path': 'fake',
}
self.mock_object(share_api.API, 'get', self.mock_object(share_api.API, 'get',
mock.Mock(return_value=share)) mock.Mock(return_value=share))

View File

@ -82,6 +82,8 @@ class DataManagerTestCase(test.TestCase):
# mocks # mocks
self.mock_object(db, 'share_get', mock.Mock(return_value=self.share)) self.mock_object(db, 'share_get', mock.Mock(return_value=self.share))
self.mock_object(db, 'share_instance_get', mock.Mock(
return_value=self.share.instance))
self.mock_object(data_utils, 'Copy', self.mock_object(data_utils, 'Copy',
mock.Mock(return_value='fake_copy')) mock.Mock(return_value='fake_copy'))
@ -122,7 +124,7 @@ class DataManagerTestCase(test.TestCase):
if notify or exc: if notify or exc:
share_rpc.ShareAPI.migration_complete.assert_called_once_with( share_rpc.ShareAPI.migration_complete.assert_called_once_with(
self.context, self.share, 'ins1_id', 'ins2_id') self.context, self.share.instance, 'ins2_id')
@ddt.data({'cancelled': False, 'exc': None}, @ddt.data({'cancelled': False, 'exc': None},
{'cancelled': False, 'exc': Exception('fake')}, {'cancelled': False, 'exc': Exception('fake')},

View File

@ -218,8 +218,11 @@ class SchedulerManagerTestCase(test.TestCase):
def test_migrate_share_to_host(self): def test_migrate_share_to_host(self):
class fake_host(object):
host = 'fake@backend#pool'
share = db_utils.create_share() share = db_utils.create_share()
host = 'fake@backend#pool' host = fake_host()
self.mock_object(db, 'share_get', mock.Mock(return_value=share)) self.mock_object(db, 'share_get', mock.Mock(return_value=share))
self.mock_object(share_rpcapi.ShareAPI, 'migration_start') self.mock_object(share_rpcapi.ShareAPI, 'migration_start')
@ -227,8 +230,8 @@ class SchedulerManagerTestCase(test.TestCase):
'host_passes_filters', 'host_passes_filters',
mock.Mock(return_value=host)) mock.Mock(return_value=host))
self.manager.migrate_share_to_host(self.context, share['id'], host, self.manager.migrate_share_to_host(self.context, share['id'],
False, True, {}, None) host.host, False, True, {}, None)
def test_migrate_share_to_host_no_valid_host(self): def test_migrate_share_to_host_no_valid_host(self):

View File

@ -33,7 +33,6 @@ from manila import policy
from manila import quota from manila import quota
from manila import share from manila import share
from manila.share import api as share_api from manila.share import api as share_api
from manila.share import rpcapi as share_rpc
from manila.share import share_types from manila.share import share_types
from manila import test from manila import test
from manila.tests import db_utils from manila.tests import db_utils
@ -754,7 +753,7 @@ class ShareAPITestCase(test.TestCase):
mock_db_share_instance_update = self.mock_object( mock_db_share_instance_update = self.mock_object(
db_api, 'share_instance_update') db_api, 'share_instance_update')
self.mock_object( self.mock_object(
share_api.API, '_create_share_instance_and_get_request_spec', share_api.API, 'create_share_instance_and_get_request_spec',
mock.Mock(return_value=(fake_req_spec, fake_instance))) mock.Mock(return_value=(fake_req_spec, fake_instance)))
retval = self.api.create_instance(self.context, fake_share, retval = self.api.create_instance(self.context, fake_share,
@ -2014,7 +2013,7 @@ class ShareAPITestCase(test.TestCase):
def test_migration_start(self): def test_migration_start(self):
host = 'fake2@backend#pool' host = 'fake2@backend#pool'
fake_service = {'availability_zone_id': 'fake_az_id'}
fake_type = { fake_type = {
'id': 'fake_type_id', 'id': 'fake_type_id',
'extra_specs': { 'extra_specs': {
@ -2027,17 +2026,21 @@ class ShareAPITestCase(test.TestCase):
host='fake@backend#pool', share_type_id=fake_type['id']) host='fake@backend#pool', share_type_id=fake_type['id'])
request_spec = self._get_request_spec_dict( request_spec = self._get_request_spec_dict(
share, fake_type, size=0) share, fake_type, size=0, availability_zone_id='fake_az_id')
self.mock_object(self.scheduler_rpcapi, 'migrate_share_to_host') self.mock_object(self.scheduler_rpcapi, 'migrate_share_to_host')
self.mock_object(share_types, 'get_share_type', self.mock_object(share_types, 'get_share_type',
mock.Mock(return_value=fake_type)) mock.Mock(return_value=fake_type))
self.mock_object(utils, 'validate_service_host') self.mock_object(utils, 'validate_service_host')
self.mock_object(db_api, 'service_get_by_args',
mock.Mock(return_value=fake_service))
self.api.migration_start(self.context, share, host, True, True) self.api.migration_start(self.context, share, host, True, True)
self.scheduler_rpcapi.migrate_share_to_host.assert_called_once_with( self.scheduler_rpcapi.migrate_share_to_host.assert_called_once_with(
self.context, share['id'], host, True, True, request_spec) self.context, share['id'], host, True, True, request_spec)
db_api.service_get_by_args.assert_called_once_with(
self.context, 'fake2@backend', 'manila-share')
def test_migration_start_status_unavailable(self): def test_migration_start_status_unavailable(self):
host = 'fake2@backend#pool' host = 'fake2@backend#pool'
@ -2111,6 +2114,7 @@ class ShareAPITestCase(test.TestCase):
def test_migration_start_exception(self): def test_migration_start_exception(self):
host = 'fake2@backend#pool' host = 'fake2@backend#pool'
fake_service = {'availability_zone_id': 'fake_az_id'}
fake_type = { fake_type = {
'id': 'fake_type_id', 'id': 'fake_type_id',
'extra_specs': { 'extra_specs': {
@ -2128,6 +2132,8 @@ class ShareAPITestCase(test.TestCase):
self.mock_object(utils, 'validate_service_host') self.mock_object(utils, 'validate_service_host')
self.mock_object(db_api, 'share_snapshot_get_all_for_share', self.mock_object(db_api, 'share_snapshot_get_all_for_share',
mock.Mock(return_value=False)) mock.Mock(return_value=False))
self.mock_object(db_api, 'service_get_by_args',
mock.Mock(return_value=fake_service))
self.mock_object(db_api, 'share_update', mock.Mock(return_value=True)) self.mock_object(db_api, 'share_update', mock.Mock(return_value=True))
self.mock_object(self.scheduler_rpcapi, 'migrate_share_to_host', self.mock_object(self.scheduler_rpcapi, 'migrate_share_to_host',
mock.Mock(side_effect=exception.ShareMigrationFailed( mock.Mock(side_effect=exception.ShareMigrationFailed(
@ -2139,12 +2145,14 @@ class ShareAPITestCase(test.TestCase):
db_api.share_update.assert_any_call( db_api.share_update.assert_any_call(
mock.ANY, share['id'], mock.ANY) mock.ANY, share['id'], mock.ANY)
db_api.service_get_by_args.assert_called_once_with(
self.context, 'fake2@backend', 'manila-share')
@ddt.data({}, {'replication_type': None}) @ddt.data({}, {'replication_type': None})
def test_create_share_replica_invalid_share_type(self, attributes): def test_create_share_replica_invalid_share_type(self, attributes):
share = fakes.fake_share(id='FAKE_SHARE_ID', **attributes) share = fakes.fake_share(id='FAKE_SHARE_ID', **attributes)
mock_request_spec_call = self.mock_object( mock_request_spec_call = self.mock_object(
self.api, '_create_share_instance_and_get_request_spec') self.api, 'create_share_instance_and_get_request_spec')
mock_db_update_call = self.mock_object(db_api, 'share_replica_update') mock_db_update_call = self.mock_object(db_api, 'share_replica_update')
mock_scheduler_rpcapi_call = self.mock_object( mock_scheduler_rpcapi_call = self.mock_object(
self.api.scheduler_rpcapi, 'create_share_replica') self.api.scheduler_rpcapi, 'create_share_replica')
@ -2163,7 +2171,7 @@ class ShareAPITestCase(test.TestCase):
is_busy=True, is_busy=True,
replication_type='dr') replication_type='dr')
mock_request_spec_call = self.mock_object( mock_request_spec_call = self.mock_object(
self.api, '_create_share_instance_and_get_request_spec') self.api, 'create_share_instance_and_get_request_spec')
mock_db_update_call = self.mock_object(db_api, 'share_replica_update') mock_db_update_call = self.mock_object(db_api, 'share_replica_update')
mock_scheduler_rpcapi_call = self.mock_object( mock_scheduler_rpcapi_call = self.mock_object(
self.api.scheduler_rpcapi, 'create_share_replica') self.api.scheduler_rpcapi, 'create_share_replica')
@ -2180,7 +2188,7 @@ class ShareAPITestCase(test.TestCase):
share = fakes.fake_share( share = fakes.fake_share(
id='FAKE_SHARE_ID', replication_type='dr') id='FAKE_SHARE_ID', replication_type='dr')
mock_request_spec_call = self.mock_object( mock_request_spec_call = self.mock_object(
self.api, '_create_share_instance_and_get_request_spec') self.api, 'create_share_instance_and_get_request_spec')
mock_db_update_call = self.mock_object(db_api, 'share_replica_update') mock_db_update_call = self.mock_object(db_api, 'share_replica_update')
mock_scheduler_rpcapi_call = self.mock_object( mock_scheduler_rpcapi_call = self.mock_object(
self.api.scheduler_rpcapi, 'create_share_replica') self.api.scheduler_rpcapi, 'create_share_replica')
@ -2209,7 +2217,7 @@ class ShareAPITestCase(test.TestCase):
self.mock_object(db_api, 'share_replicas_get_available_active_replica', self.mock_object(db_api, 'share_replicas_get_available_active_replica',
mock.Mock(return_value={'host': 'fake_ar_host'})) mock.Mock(return_value={'host': 'fake_ar_host'}))
self.mock_object( self.mock_object(
share_api.API, '_create_share_instance_and_get_request_spec', share_api.API, 'create_share_instance_and_get_request_spec',
mock.Mock(return_value=(fake_request_spec, fake_replica))) mock.Mock(return_value=(fake_request_spec, fake_replica)))
self.mock_object(db_api, 'share_replica_update') self.mock_object(db_api, 'share_replica_update')
mock_sched_rpcapi_call = self.mock_object( mock_sched_rpcapi_call = self.mock_object(
@ -2390,18 +2398,26 @@ class ShareAPITestCase(test.TestCase):
task_state=constants.TASK_STATE_DATA_COPYING_COMPLETED, task_state=constants.TASK_STATE_DATA_COPYING_COMPLETED,
instances=[instance1, instance2]) instances=[instance1, instance2])
self.mock_object(share_rpc.ShareAPI, 'migration_complete') self.mock_object(db_api, 'share_instance_get',
mock.Mock(return_value=instance1))
self.mock_object(self.api.share_rpcapi, 'migration_complete')
self.api.migration_complete(self.context, share) self.api.migration_complete(self.context, share)
share_rpc.ShareAPI.migration_complete.assert_called_once_with( self.api.share_rpcapi.migration_complete.assert_called_once_with(
self.context, share, instance1['id'], instance2['id']) self.context, instance1, instance2['id'])
def test_migration_complete_task_state_invalid(self): @ddt.data(constants.TASK_STATE_DATA_COPYING_STARTING,
constants.TASK_STATE_MIGRATION_SUCCESS,
constants.TASK_STATE_DATA_COPYING_IN_PROGRESS,
constants.TASK_STATE_MIGRATION_ERROR,
constants.TASK_STATE_MIGRATION_CANCELLED,
None)
def test_migration_complete_task_state_invalid(self, task_state):
share = db_utils.create_share( share = db_utils.create_share(
id='fake_id', id='fake_id',
task_state=constants.TASK_STATE_DATA_COPYING_IN_PROGRESS) task_state=task_state)
self.assertRaises(exception.InvalidShare, self.api.migration_complete, self.assertRaises(exception.InvalidShare, self.api.migration_complete,
self.context, share) self.context, share)
@ -2421,86 +2437,301 @@ class ShareAPITestCase(test.TestCase):
self.api.migration_complete, self.context, self.api.migration_complete, self.context,
share) share)
def test_migration_cancel(self): @ddt.data(None, Exception('fake'))
def test_migration_cancel(self, exc):
share = db_utils.create_share( share = db_utils.create_share(
id='fake_id', id='fake_id',
task_state=constants.TASK_STATE_DATA_COPYING_IN_PROGRESS) task_state=constants.TASK_STATE_DATA_COPYING_IN_PROGRESS)
services = ['fake_service']
self.mock_object(data_rpc.DataAPI, 'data_copy_cancel') self.mock_object(utils, 'service_is_up', mock.Mock(return_value=True))
self.mock_object(db_api, 'service_get_all_by_topic',
mock.Mock(return_value=services))
self.mock_object(data_rpc.DataAPI, 'data_copy_cancel',
mock.Mock(side_effect=[exc]))
self.api.migration_cancel(self.context, share) if exc:
self.assertRaises(
exception.ShareMigrationError, self.api.migration_cancel,
self.context, share)
else:
self.api.migration_cancel(self.context, share)
data_rpc.DataAPI.data_copy_cancel.assert_called_once_with( data_rpc.DataAPI.data_copy_cancel.assert_called_once_with(
self.context, share['id']) self.context, share['id'])
db_api.service_get_all_by_topic.assert_called_once_with(
self.context, 'manila-data')
@ddt.unpack
def test_migration_cancel_service_down(self):
service = 'fake_service'
instance1 = db_utils.create_share_instance(
share_id='fake_id', status=constants.STATUS_MIGRATING)
instance2 = db_utils.create_share_instance(
share_id='fake_id', status=constants.STATUS_MIGRATING_TO)
share = db_utils.create_share(
id='fake_id',
task_state=constants.TASK_STATE_DATA_COPYING_IN_PROGRESS,
instances=[instance1, instance2])
self.mock_object(utils, 'service_is_up', mock.Mock(return_value=False))
self.mock_object(db_api, 'share_instance_get',
mock.Mock(return_value=instance1))
self.mock_object(db_api, 'service_get_all_by_topic',
mock.Mock(return_value=service))
self.assertRaises(exception.InvalidShare,
self.api.migration_cancel, self.context, share)
def test_migration_cancel_driver(self): def test_migration_cancel_driver(self):
service = 'fake_service'
instance1 = db_utils.create_share_instance(
share_id='fake_id',
status=constants.STATUS_MIGRATING,
host='some_host')
instance2 = db_utils.create_share_instance(
share_id='fake_id',
status=constants.STATUS_MIGRATING_TO)
share = db_utils.create_share( share = db_utils.create_share(
id='fake_id', id='fake_id',
task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS) task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS,
instances=[instance1, instance2])
self.mock_object(share_rpc.ShareAPI, 'migration_cancel') self.mock_object(db_api, 'share_instance_get',
mock.Mock(return_value=instance1))
self.mock_object(self.api.share_rpcapi, 'migration_cancel')
self.mock_object(db_api, 'service_get_by_args',
mock.Mock(return_value=service))
self.mock_object(utils, 'service_is_up', mock.Mock(return_value=True))
self.api.migration_cancel(self.context, share) self.api.migration_cancel(self.context, share)
share_rpc.ShareAPI.migration_cancel.assert_called_once_with( self.api.share_rpcapi.migration_cancel.assert_called_once_with(
self.context, share) self.context, instance1, instance2['id'])
db_api.service_get_by_args.assert_called_once_with(
self.context, instance1['host'], 'manila-share')
def test_migration_cancel_task_state_invalid(self): @ddt.unpack
def test_migration_cancel_driver_service_down(self):
service = 'fake_service'
instance1 = db_utils.create_share_instance(
share_id='fake_id',
status=constants.STATUS_MIGRATING,
host='some_host')
instance2 = db_utils.create_share_instance(
share_id='fake_id',
status=constants.STATUS_MIGRATING_TO)
share = db_utils.create_share(
id='fake_id',
task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS,
instances=[instance1, instance2])
self.mock_object(utils, 'service_is_up', mock.Mock(return_value=False))
self.mock_object(db_api, 'share_instance_get',
mock.Mock(return_value=instance1))
self.mock_object(db_api, 'service_get_by_args',
mock.Mock(return_value=service))
self.assertRaises(exception.InvalidShare,
self.api.migration_cancel, self.context, share)
@ddt.data(constants.TASK_STATE_DATA_COPYING_STARTING,
constants.TASK_STATE_MIGRATION_SUCCESS,
constants.TASK_STATE_MIGRATION_ERROR,
constants.TASK_STATE_MIGRATION_CANCELLED,
None)
def test_migration_cancel_task_state_invalid(self, task_state):
share = db_utils.create_share( share = db_utils.create_share(
id='fake_id', id='fake_id',
task_state=constants.TASK_STATE_DATA_COPYING_STARTING) task_state=task_state)
self.assertRaises(exception.InvalidShare, self.api.migration_cancel, self.assertRaises(exception.InvalidShare, self.api.migration_cancel,
self.context, share) self.context, share)
def test_migration_get_progress(self): @ddt.data({'total_progress': 0}, Exception('fake'))
def test_migration_get_progress(self, expected):
share = db_utils.create_share( share = db_utils.create_share(
id='fake_id', id='fake_id',
task_state=constants.TASK_STATE_DATA_COPYING_IN_PROGRESS) task_state=constants.TASK_STATE_DATA_COPYING_IN_PROGRESS)
services = ['fake_service']
expected = 'fake_progress' self.mock_object(utils, 'service_is_up', mock.Mock(return_value=True))
self.mock_object(db_api, 'service_get_all_by_topic',
mock.Mock(return_value=services))
self.mock_object(data_rpc.DataAPI, 'data_copy_get_progress', self.mock_object(data_rpc.DataAPI, 'data_copy_get_progress',
mock.Mock(return_value=expected)) mock.Mock(side_effect=[expected]))
result = self.api.migration_get_progress(self.context, share) if not isinstance(expected, Exception):
result = self.api.migration_get_progress(self.context, share)
self.assertEqual(expected, result) self.assertEqual(expected, result)
else:
self.assertRaises(
exception.ShareMigrationError, self.api.migration_get_progress,
self.context, share)
data_rpc.DataAPI.data_copy_get_progress.assert_called_once_with( data_rpc.DataAPI.data_copy_get_progress.assert_called_once_with(
self.context, share['id']) self.context, share['id'])
db_api.service_get_all_by_topic.assert_called_once_with(
self.context, 'manila-data')
@ddt.unpack
def test_migration_get_progress_service_down(self):
instance1 = db_utils.create_share_instance(
share_id='fake_id', status=constants.STATUS_MIGRATING)
instance2 = db_utils.create_share_instance(
share_id='fake_id', status=constants.STATUS_MIGRATING_TO)
share = db_utils.create_share(
id='fake_id',
task_state=constants.TASK_STATE_DATA_COPYING_IN_PROGRESS,
instances=[instance1, instance2])
services = ['fake_service']
self.mock_object(utils, 'service_is_up', mock.Mock(return_value=False))
self.mock_object(db_api, 'service_get_all_by_topic',
mock.Mock(return_value=services))
self.mock_object(db_api, 'share_instance_get',
mock.Mock(return_value=instance1))
self.assertRaises(exception.InvalidShare,
self.api.migration_get_progress, self.context, share)
def test_migration_get_progress_driver(self): def test_migration_get_progress_driver(self):
expected = {'total_progress': 0}
instance1 = db_utils.create_share_instance(
share_id='fake_id',
status=constants.STATUS_MIGRATING,
host='some_host')
instance2 = db_utils.create_share_instance(
share_id='fake_id',
status=constants.STATUS_MIGRATING_TO)
share = db_utils.create_share( share = db_utils.create_share(
id='fake_id', id='fake_id',
task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS) task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS,
instances=[instance1, instance2])
service = 'fake_service'
expected = 'fake_progress' self.mock_object(utils, 'service_is_up', mock.Mock(return_value=True))
self.mock_object(db_api, 'service_get_by_args',
self.mock_object(share_rpc.ShareAPI, 'migration_get_progress', mock.Mock(return_value=service))
self.mock_object(db_api, 'share_instance_get',
mock.Mock(return_value=instance1))
self.mock_object(self.api.share_rpcapi, 'migration_get_progress',
mock.Mock(return_value=expected)) mock.Mock(return_value=expected))
result = self.api.migration_get_progress(self.context, share) result = self.api.migration_get_progress(self.context, share)
self.assertEqual(expected, result) self.assertEqual(expected, result)
share_rpc.ShareAPI.migration_get_progress.assert_called_once_with( self.api.share_rpcapi.migration_get_progress.assert_called_once_with(
self.context, share) self.context, instance1, instance2['id'])
db_api.service_get_by_args.assert_called_once_with(
self.context, instance1['host'], 'manila-share')
def test_migration_get_progress_task_state_invalid(self): def test_migration_get_progress_driver_error(self):
instance1 = db_utils.create_share_instance(
share_id='fake_id',
status=constants.STATUS_MIGRATING,
host='some_host')
instance2 = db_utils.create_share_instance(
share_id='fake_id',
status=constants.STATUS_MIGRATING_TO)
share = db_utils.create_share( share = db_utils.create_share(
id='fake_id', id='fake_id',
task_state=constants.TASK_STATE_DATA_COPYING_STARTING) task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS,
instances=[instance1, instance2])
service = 'fake_service'
self.mock_object(utils, 'service_is_up', mock.Mock(return_value=True))
self.mock_object(db_api, 'service_get_by_args',
mock.Mock(return_value=service))
self.mock_object(db_api, 'share_instance_get',
mock.Mock(return_value=instance1))
self.mock_object(self.api.share_rpcapi, 'migration_get_progress',
mock.Mock(side_effect=Exception('fake')))
self.assertRaises(exception.ShareMigrationError,
self.api.migration_get_progress, self.context, share)
self.api.share_rpcapi.migration_get_progress.assert_called_once_with(
self.context, instance1, instance2['id'])
def test_migration_get_progress_driver_service_down(self):
service = 'fake_service'
instance1 = db_utils.create_share_instance(
share_id='fake_id',
status=constants.STATUS_MIGRATING,
host='some_host')
instance2 = db_utils.create_share_instance(
share_id='fake_id',
status=constants.STATUS_MIGRATING_TO)
share = db_utils.create_share(
id='fake_id',
task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS,
instances=[instance1, instance2])
self.mock_object(utils, 'service_is_up', mock.Mock(return_value=False))
self.mock_object(db_api, 'share_instance_get',
mock.Mock(return_value=instance1))
self.mock_object(db_api, 'service_get_by_args',
mock.Mock(return_value=service))
self.assertRaises(exception.InvalidShare, self.assertRaises(exception.InvalidShare,
self.api.migration_get_progress, self.context, share) self.api.migration_get_progress, self.context, share)
@ddt.data(constants.TASK_STATE_DATA_COPYING_STARTING,
constants.TASK_STATE_MIGRATION_SUCCESS,
constants.TASK_STATE_MIGRATION_ERROR,
constants.TASK_STATE_MIGRATION_CANCELLED,
constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE,
constants.TASK_STATE_DATA_COPYING_COMPLETED,
None)
def test_migration_get_progress_task_state_invalid(self, task_state):
share = db_utils.create_share(
id='fake_id',
task_state=task_state)
self.assertRaises(exception.InvalidShare,
self.api.migration_get_progress, self.context, share)
@ddt.data(None, {'invalid_progress': None}, {})
def test_migration_get_progress_invalid(self, progress):
instance1 = db_utils.create_share_instance(
share_id='fake_id',
status=constants.STATUS_MIGRATING,
host='some_host')
instance2 = db_utils.create_share_instance(
share_id='fake_id',
status=constants.STATUS_MIGRATING_TO)
share = db_utils.create_share(
id='fake_id',
task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS,
instances=[instance1, instance2])
service = 'fake_service'
self.mock_object(utils, 'service_is_up', mock.Mock(return_value=True))
self.mock_object(db_api, 'service_get_by_args',
mock.Mock(return_value=service))
self.mock_object(db_api, 'share_instance_get',
mock.Mock(return_value=instance1))
self.mock_object(self.api.share_rpcapi, 'migration_get_progress',
mock.Mock(return_value=progress))
self.assertRaises(exception.InvalidShare,
self.api.migration_get_progress, self.context, share)
self.api.share_rpcapi.migration_get_progress.assert_called_once_with(
self.context, instance1, instance2['id'])
class OtherTenantsShareActionsTestCase(test.TestCase): class OtherTenantsShareActionsTestCase(test.TestCase):
def setUp(self): def setUp(self):

View File

@ -466,16 +466,24 @@ class ShareDriverTestCase(test.TestCase):
driver.CONF.set_default('driver_handles_share_servers', False) driver.CONF.set_default('driver_handles_share_servers', False)
share_driver = driver.ShareDriver(False) share_driver = driver.ShareDriver(False)
self.assertEqual((None, None), self.assertRaises(NotImplementedError, share_driver.migration_start,
share_driver.migration_start(None, None, None, None, None, None, None, None)
None, None, None))
def test_migration_continue(self):
driver.CONF.set_default('driver_handles_share_servers', False)
share_driver = driver.ShareDriver(False)
self.assertRaises(NotImplementedError, share_driver.migration_continue,
None, None, None, None, None,)
def test_migration_complete(self): def test_migration_complete(self):
driver.CONF.set_default('driver_handles_share_servers', False) driver.CONF.set_default('driver_handles_share_servers', False)
share_driver = driver.ShareDriver(False) share_driver = driver.ShareDriver(False)
share_driver.migration_complete(None, None, None, None) self.assertRaises(NotImplementedError, share_driver.migration_complete,
None, None, None, None, None)
def test_migration_cancel(self): def test_migration_cancel(self):
@ -483,7 +491,7 @@ class ShareDriverTestCase(test.TestCase):
share_driver = driver.ShareDriver(False) share_driver = driver.ShareDriver(False)
self.assertRaises(NotImplementedError, share_driver.migration_cancel, self.assertRaises(NotImplementedError, share_driver.migration_cancel,
None, None, None, None) None, None, None, None, None)
def test_migration_get_progress(self): def test_migration_get_progress(self):
@ -492,15 +500,7 @@ class ShareDriverTestCase(test.TestCase):
self.assertRaises(NotImplementedError, self.assertRaises(NotImplementedError,
share_driver.migration_get_progress, share_driver.migration_get_progress,
None, None, None, None) None, None, None, None, None)
def test_migration_get_driver_info_default(self):
driver.CONF.set_default('driver_handles_share_servers', False)
share_driver = driver.ShareDriver(False)
self.assertIsNone(
share_driver.migration_get_driver_info(None, None, None), None)
@ddt.data(True, False) @ddt.data(True, False)
def test_migration_get_info(self, admin): def test_migration_get_info(self, admin):
@ -521,6 +521,21 @@ class ShareDriverTestCase(test.TestCase):
self.assertEqual(expected, migration_info) self.assertEqual(expected, migration_info)
def test_migration_check_compatibility(self):
driver.CONF.set_default('driver_handles_share_servers', False)
share_driver = driver.ShareDriver(False)
share_driver.configuration = configuration.Configuration(None)
expected = {
'compatible': False,
'writable': False,
}
result = share_driver.migration_check_compatibility(
None, None, None, None, None)
self.assertEqual(expected, result)
def test_update_access(self): def test_update_access(self):
share_driver = driver.ShareDriver(True, configuration=None) share_driver = driver.ShareDriver(True, configuration=None)
self.assertRaises( self.assertRaises(

File diff suppressed because it is too large Load Diff

View File

@ -113,7 +113,7 @@ class ShareMigrationHelperTestCase(test.TestCase):
def test_create_instance_and_wait(self): def test_create_instance_and_wait(self):
host = {'host': 'fake_host'} host = 'fake_host'
share_instance_creating = db_utils.create_share_instance( share_instance_creating = db_utils.create_share_instance(
share_id=self.share['id'], status=constants.STATUS_CREATING, share_id=self.share['id'], status=constants.STATUS_CREATING,
@ -131,13 +131,13 @@ class ShareMigrationHelperTestCase(test.TestCase):
self.mock_object(time, 'sleep') self.mock_object(time, 'sleep')
# run # run
self.helper.create_instance_and_wait(self.share, self.helper.create_instance_and_wait(
share_instance_creating, host) self.share, share_instance_creating, host, 'fake_az_id')
# asserts # asserts
share_api.API.create_instance.assert_called_once_with( share_api.API.create_instance.assert_called_once_with(
self.context, self.share, self.share_instance['share_network_id'], self.context, self.share, self.share_instance['share_network_id'],
'fake_host') 'fake_host', 'fake_az_id')
db.share_instance_get.assert_has_calls([ db.share_instance_get.assert_has_calls([
mock.call(self.context, share_instance_creating['id'], mock.call(self.context, share_instance_creating['id'],
@ -149,7 +149,7 @@ class ShareMigrationHelperTestCase(test.TestCase):
def test_create_instance_and_wait_status_error(self): def test_create_instance_and_wait_status_error(self):
host = {'host': 'fake_host'} host = 'fake_host'
share_instance_error = db_utils.create_share_instance( share_instance_error = db_utils.create_share_instance(
share_id=self.share['id'], status=constants.STATUS_ERROR, share_id=self.share['id'], status=constants.STATUS_ERROR,
@ -165,12 +165,12 @@ class ShareMigrationHelperTestCase(test.TestCase):
# run # run
self.assertRaises(exception.ShareMigrationFailed, self.assertRaises(exception.ShareMigrationFailed,
self.helper.create_instance_and_wait, self.helper.create_instance_and_wait,
self.share, self.share_instance, host) self.share, self.share_instance, host, 'fake_az_id')
# asserts # asserts
share_api.API.create_instance.assert_called_once_with( share_api.API.create_instance.assert_called_once_with(
self.context, self.share, self.share_instance['share_network_id'], self.context, self.share, self.share_instance['share_network_id'],
'fake_host') 'fake_host', 'fake_az_id')
db.share_instance_get.assert_called_once_with( db.share_instance_get.assert_called_once_with(
self.context, share_instance_error['id'], with_share_data=True) self.context, share_instance_error['id'], with_share_data=True)
@ -180,7 +180,7 @@ class ShareMigrationHelperTestCase(test.TestCase):
def test_create_instance_and_wait_timeout(self): def test_create_instance_and_wait_timeout(self):
host = {'host': 'fake_host'} host = 'fake_host'
share_instance_creating = db_utils.create_share_instance( share_instance_creating = db_utils.create_share_instance(
share_id=self.share['id'], status=constants.STATUS_CREATING, share_id=self.share['id'], status=constants.STATUS_CREATING,
@ -204,12 +204,12 @@ class ShareMigrationHelperTestCase(test.TestCase):
# run # run
self.assertRaises(exception.ShareMigrationFailed, self.assertRaises(exception.ShareMigrationFailed,
self.helper.create_instance_and_wait, self.helper.create_instance_and_wait,
self.share, self.share_instance, host) self.share, self.share_instance, host, 'fake_az_id')
# asserts # asserts
share_api.API.create_instance.assert_called_once_with( share_api.API.create_instance.assert_called_once_with(
self.context, self.share, self.share_instance['share_network_id'], self.context, self.share, self.share_instance['share_network_id'],
'fake_host') 'fake_host', 'fake_az_id')
db.share_instance_get.assert_called_once_with( db.share_instance_get.assert_called_once_with(
self.context, share_instance_creating['id'], with_share_data=True) self.context, share_instance_creating['id'], with_share_data=True)
@ -219,6 +219,33 @@ class ShareMigrationHelperTestCase(test.TestCase):
self.helper.cleanup_new_instance.assert_called_once_with( self.helper.cleanup_new_instance.assert_called_once_with(
share_instance_creating) share_instance_creating)
@ddt.data(constants.STATUS_ACTIVE, constants.STATUS_ERROR,
constants.STATUS_CREATING)
def test_wait_for_share_server(self, status):
server = db_utils.create_share_server(status=status)
# mocks
self.mock_object(db, 'share_server_get',
mock.Mock(return_value=server))
# run
if status == constants.STATUS_ACTIVE:
result = self.helper.wait_for_share_server('fake_server_id')
self.assertEqual(server, result)
elif status == constants.STATUS_ERROR:
self.assertRaises(
exception.ShareServerNotCreated,
self.helper.wait_for_share_server, 'fake_server_id')
else:
self.mock_object(time, 'sleep')
self.assertRaises(
exception.ShareServerNotReady,
self.helper.wait_for_share_server, 'fake_server_id')
# asserts
db.share_server_get.assert_called_with(self.context, 'fake_server_id')
def test_change_to_read_only_with_ro_support(self): def test_change_to_read_only_with_ro_support(self):
share_instance = db_utils.create_share_instance( share_instance = db_utils.create_share_instance(

View File

@ -49,7 +49,7 @@ class ShareRpcAPITestCase(test.TestCase):
share_server = db_utils.create_share_server() share_server = db_utils.create_share_server()
cg = {'id': 'fake_cg_id', 'host': 'fake_host'} cg = {'id': 'fake_cg_id', 'host': 'fake_host'}
cgsnapshot = {'id': 'fake_cg_id'} cgsnapshot = {'id': 'fake_cg_id'}
host = {'host': 'fake_host', 'capabilities': 1} host = 'fake_host'
self.fake_share = jsonutils.to_primitive(share) self.fake_share = jsonutils.to_primitive(share)
# mock out the getattr on the share db model object since jsonutils # mock out the getattr on the share db model object since jsonutils
# doesn't know about those extra attributes to pull in # doesn't know about those extra attributes to pull in
@ -101,7 +101,7 @@ class ShareRpcAPITestCase(test.TestCase):
expected_msg['snapshot_id'] = snapshot['id'] expected_msg['snapshot_id'] = snapshot['id']
if 'dest_host' in expected_msg: if 'dest_host' in expected_msg:
del expected_msg['dest_host'] del expected_msg['dest_host']
expected_msg['host'] = self.fake_host expected_msg['dest_host'] = self.fake_host
if 'share_replica' in expected_msg: if 'share_replica' in expected_msg:
share_replica = expected_msg.pop('share_replica', None) share_replica = expected_msg.pop('share_replica', None)
expected_msg['share_replica_id'] = share_replica['id'] expected_msg['share_replica_id'] = share_replica['id']
@ -110,6 +110,9 @@ class ShareRpcAPITestCase(test.TestCase):
snapshot = expected_msg.pop('replicated_snapshot', None) snapshot = expected_msg.pop('replicated_snapshot', None)
expected_msg['snapshot_id'] = snapshot['id'] expected_msg['snapshot_id'] = snapshot['id']
expected_msg['share_id'] = snapshot['share_id'] expected_msg['share_id'] = snapshot['share_id']
if 'src_share_instance' in expected_msg:
share_instance = expected_msg.pop('src_share_instance', None)
expected_msg['src_instance_id'] = share_instance['id']
if 'host' in kwargs: if 'host' in kwargs:
host = kwargs['host'] host = kwargs['host']
@ -123,8 +126,10 @@ class ShareRpcAPITestCase(test.TestCase):
host = kwargs['share_replica']['host'] host = kwargs['share_replica']['host']
elif 'replicated_snapshot' in kwargs: elif 'replicated_snapshot' in kwargs:
host = kwargs['share']['instance']['host'] host = kwargs['share']['instance']['host']
else: elif 'share' in kwargs:
host = kwargs['share']['host'] host = kwargs['share']['host']
else:
host = self.fake_host
target['server'] = host target['server'] = host
target['topic'] = '%s.%s' % (CONF.share_topic, host) target['topic'] = '%s.%s' % (CONF.share_topic, host)
@ -247,46 +252,48 @@ class ShareRpcAPITestCase(test.TestCase):
host='fake_host1') host='fake_host1')
def test_migration_start(self): def test_migration_start(self):
fake_dest_host = self.Desthost()
self._test_share_api('migration_start', self._test_share_api('migration_start',
rpc_method='cast', rpc_method='cast',
version='1.6', version='1.6',
share=self.fake_share, share=self.fake_share,
dest_host=fake_dest_host, dest_host='fake_host',
force_host_copy=True, force_host_copy=True,
notify=True) notify=True)
def test_migration_driver_recovery(self):
fake_dest_host = "host@backend"
self._test_share_api('migration_driver_recovery',
rpc_method='cast',
version='1.12',
share=self.fake_share,
host=fake_dest_host)
def test_migration_get_info(self): def test_migration_get_info(self):
self._test_share_api('migration_get_info', self._test_share_api('migration_get_info',
rpc_method='call', rpc_method='call',
version='1.6', version='1.6',
share_instance=self.fake_share) share_instance=self.fake_share)
def test_migration_get_driver_info(self):
self._test_share_api('migration_get_driver_info',
rpc_method='call',
version='1.6',
share_instance=self.fake_share)
def test_migration_complete(self): def test_migration_complete(self):
self._test_share_api('migration_complete', self._test_share_api('migration_complete',
rpc_method='cast', rpc_method='cast',
version='1.10', version='1.12',
share=self.fake_share, src_share_instance=self.fake_share['instance'],
share_instance_id='fake_ins_id', dest_instance_id='new_fake_ins_id')
new_share_instance_id='new_fake_ins_id')
def test_migration_cancel(self): def test_migration_cancel(self):
self._test_share_api('migration_cancel', self._test_share_api('migration_cancel',
rpc_method='call', rpc_method='cast',
version='1.10', version='1.12',
share=self.fake_share) src_share_instance=self.fake_share['instance'],
dest_instance_id='ins2_id')
def test_migration_get_progress(self): def test_migration_get_progress(self):
self._test_share_api('migration_get_progress', self._test_share_api('migration_get_progress',
rpc_method='call', rpc_method='call',
version='1.10', version='1.12',
share=self.fake_share) src_share_instance=self.fake_share['instance'],
dest_instance_id='ins2_id')
def test_delete_share_replica(self): def test_delete_share_replica(self):
self._test_share_api('delete_share_replica', self._test_share_api('delete_share_replica',
@ -338,6 +345,17 @@ class ShareRpcAPITestCase(test.TestCase):
force=False, force=False,
host='fake_host') host='fake_host')
class Desthost(object): def test_provide_share_server(self):
host = 'fake_host' self._test_share_api('provide_share_server',
capabilities = 1 rpc_method='call',
version='1.12',
share_instance=self.fake_share['instance'],
share_network_id='fake_network_id',
snapshot_id='fake_snapshot_id')
def test_create_share_server(self):
self._test_share_api('create_share_server',
rpc_method='cast',
version='1.12',
share_instance=self.fake_share['instance'],
share_server_id='fake_server_id')

View File

@ -34,3 +34,19 @@ REPLICATION_STATE_OUT_OF_SYNC = 'out_of_sync'
RULE_STATE_ACTIVE = 'active' RULE_STATE_ACTIVE = 'active'
RULE_STATE_OUT_OF_SYNC = 'out_of_sync' RULE_STATE_OUT_OF_SYNC = 'out_of_sync'
RULE_STATE_ERROR = 'error' RULE_STATE_ERROR = 'error'
TASK_STATE_MIGRATION_STARTING = 'migration_starting'
TASK_STATE_MIGRATION_IN_PROGRESS = 'migration_in_progress'
TASK_STATE_MIGRATION_COMPLETING = 'migration_completing'
TASK_STATE_MIGRATION_SUCCESS = 'migration_success'
TASK_STATE_MIGRATION_ERROR = 'migration_error'
TASK_STATE_MIGRATION_CANCELLED = 'migration_cancelled'
TASK_STATE_MIGRATION_DRIVER_STARTING = 'migration_driver_starting'
TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS = 'migration_driver_in_progress'
TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE = 'migration_driver_phase1_done'
TASK_STATE_DATA_COPYING_STARTING = 'data_copying_starting'
TASK_STATE_DATA_COPYING_IN_PROGRESS = 'data_copying_in_progress'
TASK_STATE_DATA_COPYING_COMPLETING = 'data_copying_completing'
TASK_STATE_DATA_COPYING_COMPLETED = 'data_copying_completed'
TASK_STATE_DATA_COPYING_CANCELLED = 'data_copying_cancelled'
TASK_STATE_DATA_COPYING_ERROR = 'data_copying_error'

View File

@ -14,6 +14,7 @@
# under the License. # under the License.
import json import json
import six
import time import time
from six.moves.urllib import parse as urlparse from six.moves.urllib import parse as urlparse
@ -688,8 +689,11 @@ class SharesV2Client(shares_client.SharesClient):
############### ###############
def list_share_types(self, params=None, version=LATEST_MICROVERSION): def list_share_types(self, params=None, default=False,
version=LATEST_MICROVERSION):
uri = 'types' uri = 'types'
if default:
uri += '/default'
if params is not None: if params is not None:
uri += '?%s' % urlparse.urlencode(params) uri += '?%s' % urlparse.urlencode(params)
resp, body = self.get(uri, version=version) resp, body = self.get(uri, version=version)
@ -1076,22 +1080,25 @@ class SharesV2Client(shares_client.SharesClient):
headers=EXPERIMENTAL, extra_headers=True, headers=EXPERIMENTAL, extra_headers=True,
version=version) version=version)
def wait_for_migration_status(self, share_id, dest_host, status, def wait_for_migration_status(self, share_id, dest_host, status_to_wait,
version=LATEST_MICROVERSION): version=LATEST_MICROVERSION):
"""Waits for a share to migrate to a certain host.""" """Waits for a share to migrate to a certain host."""
statuses = ((status_to_wait,)
if not isinstance(status_to_wait, (tuple, list, set))
else status_to_wait)
share = self.get_share(share_id, version=version) share = self.get_share(share_id, version=version)
migration_timeout = CONF.share.migration_timeout migration_timeout = CONF.share.migration_timeout
start = int(time.time()) start = int(time.time())
while share['task_state'] != status: while share['task_state'] not in statuses:
time.sleep(self.build_interval) time.sleep(self.build_interval)
share = self.get_share(share_id, version=version) share = self.get_share(share_id, version=version)
if share['task_state'] == status: if share['task_state'] in statuses:
return share break
elif share['task_state'] == 'migration_error': elif share['task_state'] == 'migration_error':
raise share_exceptions.ShareMigrationException( raise share_exceptions.ShareMigrationException(
share_id=share['id'], src=share['host'], dest=dest_host) share_id=share['id'], src=share['host'], dest=dest_host)
elif int(time.time()) - start >= migration_timeout: elif int(time.time()) - start >= migration_timeout:
message = ('Share %(share_id)s failed to reach status ' message = ('Share %(share_id)s failed to reach a status in'
'%(status)s when migrating from host %(src)s to ' '%(status)s when migrating from host %(src)s to '
'host %(dest)s within the required time ' 'host %(dest)s within the required time '
'%(timeout)s.' % { '%(timeout)s.' % {
@ -1099,9 +1106,10 @@ class SharesV2Client(shares_client.SharesClient):
'dest': dest_host, 'dest': dest_host,
'share_id': share['id'], 'share_id': share['id'],
'timeout': self.build_timeout, 'timeout': self.build_timeout,
'status': status, 'status': six.text_type(statuses),
}) })
raise exceptions.TimeoutException(message) raise exceptions.TimeoutException(message)
return share
################ ################

View File

@ -16,6 +16,7 @@
from tempest import config from tempest import config
from tempest import test from tempest import test
from manila_tempest_tests.common import constants
from manila_tempest_tests.tests.api import base from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils from manila_tempest_tests import utils
@ -25,7 +26,7 @@ CONF = config.CONF
class MigrationNFSTest(base.BaseSharesAdminTest): class MigrationNFSTest(base.BaseSharesAdminTest):
"""Tests Share Migration. """Tests Share Migration.
Tests migration in multi-backend environment. Tests share migration in multi-backend environment.
""" """
protocol = "nfs" protocol = "nfs"
@ -37,7 +38,36 @@ class MigrationNFSTest(base.BaseSharesAdminTest):
message = "%s tests are disabled" % cls.protocol message = "%s tests are disabled" % cls.protocol
raise cls.skipException(message) raise cls.skipException(message)
if not CONF.share.run_migration_tests: if not CONF.share.run_migration_tests:
raise cls.skipException("Migration tests disabled. Skipping.") raise cls.skipException("Share migration tests are disabled.")
@test.attr(type=[base.TAG_POSITIVE, base.TAG_BACKEND])
@base.skip_if_microversion_lt("2.15")
def test_migration_cancel(self):
share, dest_pool = self._setup_migration()
old_exports = self.shares_v2_client.list_share_export_locations(
share['id'], version='2.15')
self.assertNotEmpty(old_exports)
old_exports = [x['path'] for x in old_exports
if x['is_admin_only'] is False]
self.assertNotEmpty(old_exports)
task_states = (constants.TASK_STATE_DATA_COPYING_COMPLETED,
constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE)
share = self.migrate_share(
share['id'], dest_pool, version='2.15', notify=False,
wait_for_status=task_states)
self._validate_migration_successful(
dest_pool, share, task_states, '2.15', notify=False)
share = self.migration_cancel(share['id'], dest_pool)
self._validate_migration_successful(
dest_pool, share, constants.TASK_STATE_MIGRATION_CANCELLED,
'2.15', notify=False)
@test.attr(type=[base.TAG_POSITIVE, base.TAG_BACKEND]) @test.attr(type=[base.TAG_POSITIVE, base.TAG_BACKEND])
@base.skip_if_microversion_lt("2.5") @base.skip_if_microversion_lt("2.5")
@ -45,12 +75,11 @@ class MigrationNFSTest(base.BaseSharesAdminTest):
share, dest_pool = self._setup_migration() share, dest_pool = self._setup_migration()
old_exports = share['export_locations']
share = self.migrate_share(share['id'], dest_pool, version='2.5') share = self.migrate_share(share['id'], dest_pool, version='2.5')
self._validate_migration_successful(dest_pool, share, old_exports, self._validate_migration_successful(
version='2.5') dest_pool, share, constants.TASK_STATE_MIGRATION_SUCCESS,
version='2.5')
@test.attr(type=[base.TAG_POSITIVE, base.TAG_BACKEND]) @test.attr(type=[base.TAG_POSITIVE, base.TAG_BACKEND])
@base.skip_if_microversion_lt("2.15") @base.skip_if_microversion_lt("2.15")
@ -65,26 +94,29 @@ class MigrationNFSTest(base.BaseSharesAdminTest):
if x['is_admin_only'] is False] if x['is_admin_only'] is False]
self.assertNotEmpty(old_exports) self.assertNotEmpty(old_exports)
task_states = (constants.TASK_STATE_DATA_COPYING_COMPLETED,
constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE)
share = self.migrate_share( share = self.migrate_share(
share['id'], dest_pool, version='2.15', notify=False, share['id'], dest_pool, version='2.15', notify=False,
wait_for_status='data_copying_completed') wait_for_status=task_states)
self._validate_migration_successful(dest_pool, share, self._validate_migration_successful(
old_exports, '2.15', notify=False) dest_pool, share, task_states, '2.15', notify=False)
share = self.migration_complete(share['id'], dest_pool, version='2.15') share = self.migration_complete(share['id'], dest_pool, version='2.15')
self._validate_migration_successful(dest_pool, share, old_exports, self._validate_migration_successful(
version='2.15') dest_pool, share, constants.TASK_STATE_MIGRATION_SUCCESS,
version='2.15')
def _setup_migration(self): def _setup_migration(self):
pools = self.shares_client.list_pools()['pools'] pools = self.shares_v2_client.list_pools(detail=True)['pools']
if len(pools) < 2: if len(pools) < 2:
raise self.skipException("At least two different pool entries " raise self.skipException("At least two different pool entries are "
"are needed to run migration tests. " "needed to run share migration tests.")
"Skipping.")
share = self.create_share(self.protocol) share = self.create_share(self.protocol)
share = self.shares_client.get_share(share['id']) share = self.shares_client.get_share(share['id'])
@ -101,8 +133,10 @@ class MigrationNFSTest(base.BaseSharesAdminTest):
self.shares_v2_client.wait_for_share_status( self.shares_v2_client.wait_for_share_status(
share['id'], 'active', status_attr='access_rules_status') share['id'], 'active', status_attr='access_rules_status')
dest_pool = next((x for x in pools if x['name'] != share['host']), default_type = self.shares_v2_client.list_share_types(
None) default=True)['share_type']
dest_pool = utils.choose_matching_backend(share, pools, default_type)
self.assertIsNotNone(dest_pool) self.assertIsNotNone(dest_pool)
self.assertIsNotNone(dest_pool.get('name')) self.assertIsNotNone(dest_pool.get('name'))
@ -112,7 +146,12 @@ class MigrationNFSTest(base.BaseSharesAdminTest):
return share, dest_pool return share, dest_pool
def _validate_migration_successful(self, dest_pool, share, def _validate_migration_successful(self, dest_pool, share,
old_exports, version, notify=True): status_to_wait, version, notify=True):
statuses = ((status_to_wait,)
if not isinstance(status_to_wait, (tuple, list, set))
else status_to_wait)
if utils.is_microversion_lt(version, '2.9'): if utils.is_microversion_lt(version, '2.9'):
new_exports = share['export_locations'] new_exports = share['export_locations']
self.assertNotEmpty(new_exports) self.assertNotEmpty(new_exports)
@ -127,12 +166,7 @@ class MigrationNFSTest(base.BaseSharesAdminTest):
# Share migrated # Share migrated
if notify: if notify:
self.assertEqual(dest_pool, share['host']) self.assertEqual(dest_pool, share['host'])
for export in old_exports:
self.assertFalse(export in new_exports)
self.assertEqual('migration_success', share['task_state'])
# Share not migrated yet # Share not migrated yet
else: else:
self.assertNotEqual(dest_pool, share['host']) self.assertNotEqual(dest_pool, share['host'])
for export in old_exports: self.assertIn(share['task_state'], statuses)
self.assertTrue(export in new_exports)
self.assertEqual('data_copying_completed', share['task_state'])

View File

@ -18,7 +18,10 @@ from tempest.lib import exceptions as lib_exc
from tempest import test from tempest import test
import testtools import testtools
from manila_tempest_tests.common import constants
from manila_tempest_tests import share_exceptions
from manila_tempest_tests.tests.api import base from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
CONF = config.CONF CONF = config.CONF
@ -26,7 +29,7 @@ CONF = config.CONF
class MigrationNFSTest(base.BaseSharesAdminTest): class MigrationNFSTest(base.BaseSharesAdminTest):
"""Tests Share Migration. """Tests Share Migration.
Tests migration in multi-backend environment. Tests share migration in multi-backend environment.
""" """
protocol = "nfs" protocol = "nfs"
@ -35,18 +38,28 @@ class MigrationNFSTest(base.BaseSharesAdminTest):
def resource_setup(cls): def resource_setup(cls):
super(MigrationNFSTest, cls).resource_setup() super(MigrationNFSTest, cls).resource_setup()
if not CONF.share.run_migration_tests: if not CONF.share.run_migration_tests:
raise cls.skipException("Migration tests disabled. Skipping.") raise cls.skipException("Share migration tests are disabled.")
cls.share = cls.create_share(cls.protocol) pools = cls.shares_client.list_pools(detail=True)['pools']
cls.share = cls.shares_client.get_share(cls.share['id'])
pools = cls.shares_client.list_pools()['pools']
if len(pools) < 2: if len(pools) < 2:
raise cls.skipException("At least two different pool entries " raise cls.skipException("At least two different pool entries "
"are needed to run migration tests. " "are needed to run share migration tests.")
"Skipping.")
cls.dest_pool = next((x for x in pools cls.share = cls.create_share(cls.protocol)
if x['name'] != cls.share['host']), None) cls.share = cls.shares_client.get_share(cls.share['id'])
default_type = cls.shares_v2_client.list_share_types(
default=True)['share_type']
dest_pool = utils.choose_matching_backend(
cls.share, pools, default_type)
if not dest_pool or dest_pool.get('name') is None:
raise share_exceptions.ShareMigrationException(
"No valid pool entries to run share migration tests.")
cls.dest_pool = dest_pool['name']
@test.attr(type=[base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND]) @test.attr(type=[base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND])
@base.skip_if_microversion_lt("2.15") @base.skip_if_microversion_lt("2.15")
@ -91,10 +104,14 @@ class MigrationNFSTest(base.BaseSharesAdminTest):
@test.attr(type=[base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND]) @test.attr(type=[base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND])
@base.skip_if_microversion_lt("2.5") @base.skip_if_microversion_lt("2.5")
def test_migrate_share_not_available_v2_5(self): def test_migrate_share_not_available_v2_5(self):
self.shares_client.reset_state(self.share['id'], 'error') self.shares_client.reset_state(
self.shares_client.wait_for_share_status(self.share['id'], 'error') self.share['id'], constants.STATUS_ERROR)
self.shares_client.wait_for_share_status(self.share['id'],
constants.STATUS_ERROR)
self.assertRaises( self.assertRaises(
lib_exc.BadRequest, self.shares_v2_client.migrate_share, lib_exc.BadRequest, self.shares_v2_client.migrate_share,
self.share['id'], self.dest_pool, True, version='2.5') self.share['id'], self.dest_pool, True, version='2.5')
self.shares_client.reset_state(self.share['id'], 'available') self.shares_client.reset_state(self.share['id'],
self.shares_client.wait_for_share_status(self.share['id'], 'available') constants.STATUS_AVAILABLE)
self.shares_client.wait_for_share_status(self.share['id'],
constants.STATUS_AVAILABLE)

View File

@ -419,6 +419,14 @@ class BaseSharesTest(test.BaseTestCase):
version=kwargs.get('version')) version=kwargs.get('version'))
return share return share
@classmethod
def migration_cancel(cls, share_id, dest_host, client=None, **kwargs):
client = client or cls.shares_v2_client
client.migration_cancel(share_id, **kwargs)
share = client.wait_for_migration_status(
share_id, dest_host, 'migration_cancelled', **kwargs)
return share
@classmethod @classmethod
def create_share(cls, *args, **kwargs): def create_share(cls, *args, **kwargs):
"""Create one share and wait for available state. Retry if allowed.""" """Create one share and wait for available state. Retry if allowed."""

View File

@ -20,7 +20,9 @@ from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils from tempest.lib.common.utils import test_utils
from tempest.lib import exceptions from tempest.lib import exceptions
from tempest import test from tempest import test
import testtools
from manila_tempest_tests.common import constants
from manila_tempest_tests.tests.api import base from manila_tempest_tests.tests.api import base
from manila_tempest_tests.tests.scenario import manager_share as manager from manila_tempest_tests.tests.scenario import manager_share as manager
from manila_tempest_tests import utils from manila_tempest_tests import utils
@ -244,29 +246,30 @@ class ShareBasicOpsBase(manager.ShareScenarioTest):
@test.services('compute', 'network') @test.services('compute', 'network')
@test.attr(type=[base.TAG_POSITIVE, base.TAG_BACKEND]) @test.attr(type=[base.TAG_POSITIVE, base.TAG_BACKEND])
@testtools.skipUnless(CONF.share.run_migration_tests,
"Share migration tests are disabled.")
def test_migration_files(self): def test_migration_files(self):
if self.protocol == "CIFS": if self.protocol == "CIFS":
raise self.skipException("Test for CIFS protocol not supported " raise self.skipException("Test for CIFS protocol not supported "
"at this moment. Skipping.") "at this moment.")
if not CONF.share.run_migration_tests: pools = self.shares_admin_v2_client.list_pools(detail=True)['pools']
raise self.skipException("Migration tests disabled. Skipping.")
pools = self.shares_admin_client.list_pools()['pools']
if len(pools) < 2: if len(pools) < 2:
raise self.skipException("At least two different pool entries " raise self.skipException("At least two different pool entries are "
"are needed to run migration tests. " "needed to run share migration tests.")
"Skipping.")
instance = self.boot_instance(wait_until="BUILD") instance = self.boot_instance(wait_until="BUILD")
self.create_share() self.create_share()
instance = self.wait_for_active_instance(instance["id"]) instance = self.wait_for_active_instance(instance["id"])
share = self.shares_client.get_share(self.share['id']) self.share = self.shares_client.get_share(self.share['id'])
dest_pool = next((x for x in pools if x['name'] != share['host']), default_type = self.shares_v2_client.list_share_types(
None) default=True)['share_type']
dest_pool = utils.choose_matching_backend(
self.share, pools, default_type)
self.assertIsNotNone(dest_pool) self.assertIsNotNone(dest_pool)
self.assertIsNotNone(dest_pool.get('name')) self.assertIsNotNone(dest_pool.get('name'))
@ -307,7 +310,7 @@ class ShareBasicOpsBase(manager.ShareScenarioTest):
self.umount_share(ssh_client) self.umount_share(ssh_client)
share = self.migrate_share(share['id'], dest_pool) self.share = self.migrate_share(self.share['id'], dest_pool)
if utils.is_microversion_lt(CONF.share.max_api_microversion, "2.9"): if utils.is_microversion_lt(CONF.share.max_api_microversion, "2.9"):
new_locations = self.share['export_locations'] new_locations = self.share['export_locations']
else: else:
@ -315,11 +318,12 @@ class ShareBasicOpsBase(manager.ShareScenarioTest):
self.share['id']) self.share['id'])
new_locations = [x['path'] for x in new_exports] new_locations = [x['path'] for x in new_exports]
self.assertEqual(dest_pool, share['host']) self.assertEqual(dest_pool, self.share['host'])
locations.sort() locations.sort()
new_locations.sort() new_locations.sort()
self.assertNotEqual(locations, new_locations) self.assertNotEqual(locations, new_locations)
self.assertEqual('migration_success', share['task_state']) self.assertEqual(constants.TASK_STATE_MIGRATION_SUCCESS,
self.share['task_state'])
self.mount_share(new_locations[0], ssh_client) self.mount_share(new_locations[0], ssh_client)

View File

@ -100,3 +100,18 @@ def rand_ip():
TEST_NET_3 = '203.0.113.' TEST_NET_3 = '203.0.113.'
final_octet = six.text_type(random.randint(0, 255)) final_octet = six.text_type(random.randint(0, 255))
return TEST_NET_3 + final_octet return TEST_NET_3 + final_octet
def choose_matching_backend(share, pools, share_type):
extra_specs = {}
# fix extra specs with string values instead of boolean
for k, v in share_type['extra_specs'].items():
extra_specs[k] = (True if six.text_type(v).lower() == 'true'
else False if six.text_type(v).lower() == 'false'
else v)
selected_pool = next(
(x for x in pools if (x['name'] != share['host'] and all(
y in x['capabilities'].items() for y in extra_specs.items()))),
None)
return selected_pool