From c7fe51e79b63f3e0556cce19a48252aaee317823 Mon Sep 17 00:00:00 2001 From: Rodrigo Barbieri Date: Wed, 24 Aug 2016 22:01:31 -0300 Subject: [PATCH] Fix Share Migration improper behavior for drivers Tempest tests were not appropriate for driver-assisted migration, so this was fixed. Also, improved docstrings and fixed workflow for drivers when implementing 2-phase migration to be accurate with tempest and handle AZs, which were previously locked to the source share's AZ. Driver-assisted migration now creates an additional share instance to better handle and support driver methods. Updated allow_access and deny_access APIs to allow users to mount migrating shares before issuing 'migration-complete'. APIImpact Closes-bug: #1594922 Change-Id: If4bfaf7e9d963b83c13a6fea241c2eda14f7f409 --- manila/api/views/shares.py | 8 +- manila/data/manager.py | 8 +- manila/exception.py | 9 + manila/scheduler/manager.py | 6 +- manila/share/api.py | 241 ++++- manila/share/driver.py | 207 ++-- manila/share/manager.py | 635 +++++++++---- manila/share/migration.py | 22 +- manila/share/rpcapi.py | 74 +- manila/tests/api/v2/test_shares.py | 5 +- manila/tests/data/test_manager.py | 4 +- manila/tests/scheduler/test_manager.py | 9 +- manila/tests/share/test_api.py | 305 +++++- manila/tests/share/test_driver.py | 43 +- manila/tests/share/test_manager.py | 890 +++++++++++++----- manila/tests/share/test_migration.py | 47 +- manila/tests/share/test_rpcapi.py | 64 +- manila_tempest_tests/common/constants.py | 16 + .../services/share/v2/json/shares_client.py | 22 +- .../tests/api/admin/test_migration.py | 82 +- .../api/admin/test_migration_negative.py | 43 +- manila_tempest_tests/tests/api/base.py | 8 + .../tests/scenario/test_share_basic_ops.py | 32 +- manila_tempest_tests/utils.py | 15 + 24 files changed, 2074 insertions(+), 721 deletions(-) diff --git a/manila/api/views/shares.py b/manila/api/views/shares.py index 0e3e569cb4..20c4f95370 100644 --- a/manila/api/views/shares.py +++ b/manila/api/views/shares.py @@ -96,11 +96,9 @@ class ViewBuilder(common.ViewBuilder): return {'share': share_dict} def migration_get_progress(self, progress): - result = { - 'total_progress': progress['total_progress'], - 'current_file_path': progress['current_file_path'], - 'current_file_progress': progress['current_file_progress'] - } + + result = {'total_progress': progress['total_progress']} + return result @common.ViewBuilder.versioned_method("2.2") diff --git a/manila/data/manager.py b/manila/data/manager.py index e61bee362d..895d455410 100644 --- a/manila/data/manager.py +++ b/manila/data/manager.py @@ -73,6 +73,8 @@ class DataManager(manager.Manager): 'dest_instance_id': dest_share_instance_id}) share_ref = self.db.share_get(context, share_id) + share_instance_ref = self.db.share_instance_get( + context, share_instance_id, with_share_data=True) share_rpcapi = share_rpc.ShareAPI() @@ -90,7 +92,7 @@ class DataManager(manager.Manager): migration_info_dest) except exception.ShareDataCopyCancelled: share_rpcapi.migration_complete( - context, share_ref, share_instance_id, dest_share_instance_id) + context, share_instance_ref, dest_share_instance_id) return except Exception: self.db.share_update( @@ -101,7 +103,7 @@ class DataManager(manager.Manager): 'dest': dest_share_instance_id} LOG.exception(msg) share_rpcapi.migration_complete( - context, share_ref, share_instance_id, dest_share_instance_id) + context, share_instance_ref, dest_share_instance_id) raise exception.ShareDataCopyFailed(reason=msg) finally: self.busy_tasks_shares.pop(share_id, None) @@ -121,7 +123,7 @@ class DataManager(manager.Manager): 'dest_instance_id': dest_share_instance_id}) share_rpcapi.migration_complete( - context, share_ref, share_instance_id, dest_share_instance_id) + context, share_instance_ref, dest_share_instance_id) def data_copy_cancel(self, context, share_id): LOG.info(_LI("Received request to cancel share migration " diff --git a/manila/exception.py b/manila/exception.py index 7ca784ed36..faca680170 100644 --- a/manila/exception.py +++ b/manila/exception.py @@ -242,6 +242,10 @@ class InvalidShareServer(Invalid): message = _("Share server %(share_server_id)s is not valid.") +class ShareMigrationError(ManilaException): + message = _("Error in share migration: %(reason)s") + + class ShareMigrationFailed(ManilaException): message = _("Share migration failed: %(reason)s") @@ -267,6 +271,11 @@ class ShareServerNotCreated(ManilaException): message = _("Share server %(share_server_id)s failed on creation.") +class ShareServerNotReady(ManilaException): + message = _("Share server %(share_server_id)s failed to reach '%(state)s' " + "within %(time)s seconds.") + + class ServiceNotFound(NotFound): message = _("Service %(service_id)s could not be found.") diff --git a/manila/scheduler/manager.py b/manila/scheduler/manager.py index ac294a8ee5..9f2e3480cb 100644 --- a/manila/scheduler/manager.py +++ b/manila/scheduler/manager.py @@ -159,9 +159,6 @@ class SchedulerManager(manager.Manager): request_spec, filter_properties) - except exception.NoValidHost as ex: - with excutils.save_and_reraise_exception(): - _migrate_share_set_error(self, context, ex, request_spec) except Exception as ex: with excutils.save_and_reraise_exception(): _migrate_share_set_error(self, context, ex, request_spec) @@ -169,7 +166,8 @@ class SchedulerManager(manager.Manager): share_ref = db.share_get(context, share_id) try: share_rpcapi.ShareAPI().migration_start( - context, share_ref, tgt_host, force_host_copy, notify) + context, share_ref, tgt_host.host, force_host_copy, + notify) except Exception as ex: with excutils.save_and_reraise_exception(): _migrate_share_set_error(self, context, ex, request_spec) diff --git a/manila/share/api.py b/manila/share/api.py index ae068fd2e0..3dbff7fc70 100644 --- a/manila/share/api.py +++ b/manila/share/api.py @@ -271,7 +271,7 @@ class API(base.Base): policy.check_policy(context, 'share', 'create') request_spec, share_instance = ( - self._create_share_instance_and_get_request_spec( + self.create_share_instance_and_get_request_spec( context, share, availability_zone=availability_zone, consistency_group=consistency_group, host=host, share_network_id=share_network_id)) @@ -307,7 +307,7 @@ class API(base.Base): return share_instance - def _create_share_instance_and_get_request_spec( + def create_share_instance_and_get_request_spec( self, context, share, availability_zone=None, consistency_group=None, host=None, share_network_id=None): @@ -393,7 +393,7 @@ class API(base.Base): raise exception.ReplicationException(reason=msg % share['id']) request_spec, share_replica = ( - self._create_share_instance_and_get_request_spec( + self.create_share_instance_and_get_request_spec( context, share, availability_zone=availability_zone, share_network_id=share_network_id)) @@ -874,7 +874,7 @@ class API(base.Base): return snapshot - def migration_start(self, context, share, host, force_host_copy, + def migration_start(self, context, share, dest_host, force_host_copy, notify=True): """Migrates share to a new host.""" @@ -899,10 +899,10 @@ class API(base.Base): self._check_is_share_busy(share) # Make sure the destination host is different than the current one - if host == share_instance['host']: + if dest_host == share_instance['host']: msg = _('Destination host %(dest_host)s must be different ' 'than the current host %(src_host)s.') % { - 'dest_host': host, + 'dest_host': dest_host, 'src_host': share_instance['host']} raise exception.InvalidHost(reason=msg) @@ -912,8 +912,23 @@ class API(base.Base): msg = _("Share %s must not have snapshots.") % share['id'] raise exception.InvalidShare(reason=msg) + dest_host_host = share_utils.extract_host(dest_host) + # Make sure the host is in the list of available hosts - utils.validate_service_host(context, share_utils.extract_host(host)) + utils.validate_service_host(context, dest_host_host) + + service = self.db.service_get_by_args( + context, dest_host_host, 'manila-share') + + share_type = {} + share_type_id = share['share_type_id'] + if share_type_id: + share_type = share_types.get_share_type(context, share_type_id) + + request_spec = self._get_request_spec_dict( + share, + share_type, + availability_zone_id=service['availability_zone_id']) # NOTE(ganso): there is the possibility of an error between here and # manager code, which will cause the share to be stuck in @@ -925,21 +940,14 @@ class API(base.Base): context, share, {'task_state': constants.TASK_STATE_MIGRATION_STARTING}) - share_type = {} - share_type_id = share['share_type_id'] - if share_type_id: - share_type = share_types.get_share_type(context, share_type_id) - - request_spec = self._get_request_spec_dict(share, share_type) - try: - self.scheduler_rpcapi.migrate_share_to_host(context, share['id'], - host, force_host_copy, - notify, request_spec) + self.scheduler_rpcapi.migrate_share_to_host( + context, share['id'], dest_host, force_host_copy, notify, + request_spec) except Exception: msg = _('Destination host %(dest_host)s did not pass validation ' 'for migration of share %(share)s.') % { - 'dest_host': host, + 'dest_host': dest_host, 'share': share['id']} raise exception.InvalidHost(reason=msg) @@ -948,64 +956,150 @@ class API(base.Base): if share['task_state'] not in ( constants.TASK_STATE_DATA_COPYING_COMPLETED, constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE): - msg = _("First migration phase of share %s not completed" - " yet.") % share['id'] + msg = self._migration_validate_error_message(share) + if msg is None: + msg = _("First migration phase of share %s not completed" + " yet.") % share['id'] LOG.error(msg) raise exception.InvalidShare(reason=msg) + share_instance_id, new_share_instance_id = ( + self.get_migrating_instances(share)) + + share_instance_ref = self.db.share_instance_get( + context, share_instance_id, with_share_data=True) + + self.share_rpcapi.migration_complete(context, share_instance_ref, + new_share_instance_id) + + def get_migrating_instances(self, share): + share_instance_id = None new_share_instance_id = None - if share['task_state'] == ( - constants.TASK_STATE_DATA_COPYING_COMPLETED): + for instance in share.instances: + if instance['status'] == constants.STATUS_MIGRATING: + share_instance_id = instance['id'] + if instance['status'] == constants.STATUS_MIGRATING_TO: + new_share_instance_id = instance['id'] - for instance in share.instances: - if instance['status'] == constants.STATUS_MIGRATING: - share_instance_id = instance['id'] - if instance['status'] == constants.STATUS_MIGRATING_TO: - new_share_instance_id = instance['id'] + if None in (share_instance_id, new_share_instance_id): + msg = _("Share instances %(instance_id)s and " + "%(new_instance_id)s in inconsistent states, cannot" + " continue share migration for share %(share_id)s" + ".") % {'instance_id': share_instance_id, + 'new_instance_id': new_share_instance_id, + 'share_id': share['id']} + raise exception.ShareMigrationFailed(reason=msg) - if None in (share_instance_id, new_share_instance_id): - msg = _("Share instances %(instance_id)s and " - "%(new_instance_id)s in inconsistent states, cannot" - " continue share migration for share %(share_id)s" - ".") % {'instance_id': share_instance_id, - 'new_instance_id': new_share_instance_id, - 'share_id': share['id']} - raise exception.ShareMigrationFailed(reason=msg) - - share_rpc = share_rpcapi.ShareAPI() - share_rpc.migration_complete(context, share, share_instance_id, - new_share_instance_id) + return share_instance_id, new_share_instance_id def migration_get_progress(self, context, share): if share['task_state'] == ( constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS): - share_rpc = share_rpcapi.ShareAPI() - return share_rpc.migration_get_progress(context, share) + share_instance_id, migrating_instance_id = ( + self.get_migrating_instances(share)) + + share_instance_ref = self.db.share_instance_get( + context, share_instance_id, with_share_data=True) + + service_host = share_utils.extract_host(share_instance_ref['host']) + + service = self.db.service_get_by_args( + context, service_host, 'manila-share') + + if utils.service_is_up(service): + try: + result = self.share_rpcapi.migration_get_progress( + context, share_instance_ref, migrating_instance_id) + except Exception: + msg = _("Failed to obtain migration progress of share " + "%s.") % share['id'] + LOG.exception(msg) + raise exception.ShareMigrationError(reason=msg) + else: + result = None elif share['task_state'] == ( constants.TASK_STATE_DATA_COPYING_IN_PROGRESS): data_rpc = data_rpcapi.DataAPI() LOG.info(_LI("Sending request to get share migration information" " of share %s.") % share['id']) - return data_rpc.data_copy_get_progress(context, share['id']) + + services = self.db.service_get_all_by_topic(context, 'manila-data') + + if len(services) > 0 and utils.service_is_up(services[0]): + + try: + result = data_rpc.data_copy_get_progress( + context, share['id']) + except Exception: + msg = _("Failed to obtain migration progress of share " + "%s.") % share['id'] + LOG.exception(msg) + raise exception.ShareMigrationError(reason=msg) + else: + result = None else: - msg = _("Migration of share %s data copy progress cannot be " - "obtained at this moment.") % share['id'] + result = None + + if not (result and result.get('total_progress') is not None): + msg = self._migration_validate_error_message(share) + if msg is None: + msg = _("Migration progress of share %s cannot be obtained at " + "this moment.") % share['id'] LOG.error(msg) raise exception.InvalidShare(reason=msg) + return result + + def _migration_validate_error_message(self, share): + + task_state = share['task_state'] + if task_state == constants.TASK_STATE_MIGRATION_SUCCESS: + msg = _("Migration of share %s has already " + "completed.") % share['id'] + elif task_state in (None, constants.TASK_STATE_MIGRATION_ERROR): + msg = _("There is no migration being performed for share %s " + "at this moment.") % share['id'] + elif task_state == constants.TASK_STATE_MIGRATION_CANCELLED: + msg = _("Migration of share %s was already " + "cancelled.") % share['id'] + elif task_state in (constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE, + constants.TASK_STATE_DATA_COPYING_COMPLETED): + msg = _("Migration of share %s has already completed first " + "phase.") % share['id'] + else: + return None + return msg + def migration_cancel(self, context, share): - if share['task_state'] == ( + migrating = True + if share['task_state'] in ( + constants.TASK_STATE_DATA_COPYING_COMPLETED, + constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE, constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS): - share_rpc = share_rpcapi.ShareAPI() - share_rpc.migration_cancel(context, share) + share_instance_id, migrating_instance_id = ( + self.get_migrating_instances(share)) + + share_instance_ref = self.db.share_instance_get( + context, share_instance_id, with_share_data=True) + + service_host = share_utils.extract_host(share_instance_ref['host']) + + service = self.db.service_get_by_args( + context, service_host, 'manila-share') + + if utils.service_is_up(service): + self.share_rpcapi.migration_cancel( + context, share_instance_ref, migrating_instance_id) + else: + migrating = False elif share['task_state'] == ( constants.TASK_STATE_DATA_COPYING_IN_PROGRESS): @@ -1013,11 +1107,28 @@ class API(base.Base): data_rpc = data_rpcapi.DataAPI() LOG.info(_LI("Sending request to cancel migration of " "share %s.") % share['id']) - data_rpc.data_copy_cancel(context, share['id']) + + services = self.db.service_get_all_by_topic(context, 'manila-data') + + if len(services) > 0 and utils.service_is_up(services[0]): + try: + data_rpc.data_copy_cancel(context, share['id']) + except Exception: + msg = _("Failed to cancel migration of share " + "%s.") % share['id'] + LOG.exception(msg) + raise exception.ShareMigrationError(reason=msg) + else: + migrating = False else: - msg = _("Data copy for migration of share %s cannot be cancelled" - " at this moment.") % share['id'] + migrating = False + + if not migrating: + msg = self._migration_validate_error_message(share) + if msg is None: + msg = _("Migration of share %s cannot be cancelled at this " + "moment.") % share['id'] LOG.error(msg) raise exception.InvalidShare(reason=msg) @@ -1186,7 +1297,20 @@ class API(base.Base): policy.check_policy(ctx, 'share', 'allow_access') share = self.db.share_get(ctx, share['id']) if share['status'] != constants.STATUS_AVAILABLE: - msg = _("Share status must be %s") % constants.STATUS_AVAILABLE + if not (share['status'] in (constants.STATUS_MIGRATING, + constants.STATUS_MIGRATING_TO) and + share['task_state'] in ( + constants.TASK_STATE_DATA_COPYING_ERROR, + constants.TASK_STATE_MIGRATION_ERROR, + constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE, + constants.TASK_STATE_DATA_COPYING_COMPLETED)): + msg = _("Share status must be %(available)s, or %(migrating)s " + "while first phase of migration is completed.") % { + 'available': constants.STATUS_AVAILABLE, + 'migrating': constants.STATUS_MIGRATING + } + else: + msg = _("Share status must be %s") % constants.STATUS_AVAILABLE raise exception.InvalidShare(reason=msg) values = { 'share_id': share['id'], @@ -1258,7 +1382,20 @@ class API(base.Base): msg = _("Share doesn't have any instances") raise exception.InvalidShare(reason=msg) if share['status'] != constants.STATUS_AVAILABLE: - msg = _("Share status must be %s") % constants.STATUS_AVAILABLE + if not (share['status'] in (constants.STATUS_MIGRATING, + constants.STATUS_MIGRATING_TO) and + share['task_state'] in ( + constants.TASK_STATE_DATA_COPYING_ERROR, + constants.TASK_STATE_MIGRATION_ERROR, + constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE, + constants.TASK_STATE_DATA_COPYING_COMPLETED)): + msg = _("Share status must be %(available)s, or %(migrating)s " + "while first phase of migration is completed.") % { + 'available': constants.STATUS_AVAILABLE, + 'migrating': constants.STATUS_MIGRATING + } + else: + msg = _("Share status must be %s") % constants.STATUS_AVAILABLE raise exception.InvalidShare(reason=msg) for share_instance in share.instances: diff --git a/manila/share/driver.py b/manila/share/driver.py index ab94e09fb2..314fd40e65 100644 --- a/manila/share/driver.py +++ b/manila/share/driver.py @@ -316,86 +316,156 @@ class ShareDriver(object): {'actual': self.driver_handles_share_servers, 'allowed': driver_handles_share_servers}) - def migration_start(self, context, share_ref, share_server, host, - dest_driver_migration_info, notify): - """Is called to perform 1st phase of driver migration of a given share. + def migration_check_compatibility( + self, context, source_share, destination_share, + share_server=None, destination_share_server=None): + """Checks destination compatibility for migration of a given share. + + .. note:: + Is called to test compatibility with destination backend. + + Based on destination_driver_migration_info, driver should check if it + is compatible with destination backend so optimized migration can + proceed. + + :param context: The 'context.RequestContext' object for the request. + :param source_share: Reference to the share to be migrated. + :param destination_share: Reference to the share model to be used by + migrated share. + :param share_server: Share server model or None. + :param destination_share_server: Destination Share server model or + None. + :return: A dictionary containing values indicating if destination + backend is compatible and if share can remain writable during + migration. + + Example:: + + { + 'compatible': True, + 'writable': True, + } + """ + return { + 'compatible': False, + 'writable': False, + } + + def migration_start( + self, context, source_share, destination_share, + share_server=None, destination_share_server=None): + """Starts migration of a given share to another host. + + .. note:: + Is called in source share's backend to start migration. Driver should implement this method if willing to perform migration - in an optimized way, useful for when driver understands destination - backend. + in an optimized way, useful for when source share's backend driver + is compatible with destination backend driver. This method should + start the migration procedure in the backend and end. Following steps + should be done in 'migration_continue'. + :param context: The 'context.RequestContext' object for the request. - :param share_ref: Reference to the share being migrated. + :param source_share: Reference to the original share model. + :param destination_share: Reference to the share model to be used by + migrated share. :param share_server: Share server model or None. - :param host: Destination host and its capabilities. - :param dest_driver_migration_info: Migration information provided by - destination host. - :param notify: whether the migration should complete or wait for - 2nd phase call. Driver may throw exception when validating this - parameter, exception if does not support 1-phase or 2-phase approach. - :returns: Boolean value indicating if driver migration succeeded. - :returns: Dictionary containing a model update with relevant data to - be updated after migration, such as export locations. - """ - return None, None - - def migration_complete(self, context, share_ref, share_server, - dest_driver_migration_info): - """Is called to perform 2nd phase of driver migration of a given share. - - If driver is implementing 2-phase migration, this method should - perform tasks related to the 2nd phase of migration, thus completing - it. - :param context: The 'context.RequestContext' object for the request. - :param share_ref: Reference to the share being migrated. - :param share_server: Share server model or None. - :param dest_driver_migration_info: Migration information provided by - destination host. - :returns: Dictionary containing a model update with relevant data to - be updated after migration, such as export locations. - """ - return None - - def migration_cancel(self, context, share_ref, share_server, - dest_driver_migration_info): - """Is called to cancel driver migration. - - If possible, driver can implement a way to cancel an in-progress - migration. - :param context: The 'context.RequestContext' object for the request. - :param share_ref: Reference to the share being migrated. - :param share_server: Share server model or None. - :param dest_driver_migration_info: Migration information provided by - destination host. + :param destination_share_server: Destination Share server model or + None. """ raise NotImplementedError() - def migration_get_progress(self, context, share_ref, share_server, - dest_driver_migration_info): - """Is called to get migration progress. + def migration_continue( + self, context, source_share, destination_share, + share_server=None, destination_share_server=None): + """Continues migration of a given share to another host. + + .. note:: + Is called in source share's backend to continue migration. + + Driver should implement this method to continue monitor the migration + progress in storage and perform following steps until 1st phase is + completed. + + :param context: The 'context.RequestContext' object for the request. + :param source_share: Reference to the original share model. + :param destination_share: Reference to the share model to be used by + migrated share. + :param share_server: Share server model or None. + :param destination_share_server: Destination Share server model or + None. + :return: Boolean value to indicate if 1st phase is finished. + """ + raise NotImplementedError() + + def migration_complete( + self, context, source_share, destination_share, + share_server=None, destination_share_server=None): + """Completes migration of a given share to another host. + + .. note:: + Is called in source share's backend to complete migration. + + If driver is implementing 2-phase migration, this method should + perform the disruptive tasks related to the 2nd phase of migration, + thus completing it. Driver should also delete all original share data + from source backend. + + :param context: The 'context.RequestContext' object for the request. + :param source_share: Reference to the original share model. + :param destination_share: Reference to the share model to be used by + migrated share. + :param share_server: Share server model or None. + :param destination_share_server: Destination Share server model or + None. + :return: List of export locations to update the share with. + """ + raise NotImplementedError() + + def migration_cancel( + self, context, source_share, destination_share, + share_server=None, destination_share_server=None): + """Cancels migration of a given share to another host. + + .. note:: + Is called in source share's backend to cancel migration. + + If possible, driver can implement a way to cancel an in-progress + migration. + + :param context: The 'context.RequestContext' object for the request. + :param source_share: Reference to the original share model. + :param destination_share: Reference to the share model to be used by + migrated share. + :param share_server: Share server model or None. + :param destination_share_server: Destination Share server model or + None. + """ + raise NotImplementedError() + + def migration_get_progress( + self, context, source_share, destination_share, + share_server=None, destination_share_server=None): + """Obtains progress of migration of a given share to another host. + + .. note:: + Is called in source share's backend to obtain migration progress. If possible, driver can implement a way to return migration progress information. :param context: The 'context.RequestContext' object for the request. - :param share_ref: Reference to the share being migrated. + :param source_share: Reference to the original share model. + :param destination_share: Reference to the share model to be used by + migrated share. :param share_server: Share server model or None. - :param dest_driver_migration_info: Migration information provided by - destination host. - :return: A dictionary with 'total_progress' field containing the - percentage value. + :param destination_share_server: Destination Share server model or + None. + :return: A dictionary with at least 'total_progress' field containing + the percentage value. """ raise NotImplementedError() - def migration_get_driver_info(self, context, share, share_server): - """Is called to provide necessary driver migration logic. - - :param context: The 'context.RequestContext' object for the request. - :param share: Reference to the share being migrated. - :param share_server: Share server model or None. - :return: A dictionary with migration information. - """ - return None - - def migration_get_info(self, context, share, share_server): + def migration_get_info(self, context, share, share_server=None): """Is called to provide necessary generic migration logic. :param context: The 'context.RequestContext' object for the request. @@ -411,7 +481,7 @@ class ShareDriver(object): return {'mount': mount_template, 'unmount': unmount_template} - def _get_mount_command(self, context, share_instance, share_server): + def _get_mount_command(self, context, share_instance, share_server=None): """Is called to delegate mounting share logic.""" mount_template = self.configuration.safe_get('share_mount_template') @@ -424,7 +494,7 @@ class ShareDriver(object): return mount_template % format_template - def _get_mount_export(self, share_instance, share_server): + def _get_mount_export(self, share_instance, share_server=None): # NOTE(ganso): If drivers want to override the export_location IP, # they can do so using this configuration. This method can also be # overridden if necessary. @@ -434,7 +504,8 @@ class ShareDriver(object): path = share_instance['export_locations'][0]['path'] return path - def _get_unmount_command(self, context, share_instance, share_server): + def _get_unmount_command(self, context, share_instance, + share_server=None): return self.configuration.safe_get('share_unmount_template') def create_share(self, context, share, share_server=None): diff --git a/manila/share/manager.py b/manila/share/manager.py index d1e350191c..22cf9a5695 100644 --- a/manila/share/manager.py +++ b/manila/share/manager.py @@ -22,6 +22,7 @@ import copy import datetime import functools +import time from oslo_config import cfg from oslo_log import log @@ -44,6 +45,7 @@ from manila.i18n import _LW from manila import manager from manila import quota from manila.share import access +from manila.share import api import manila.share.configuration from manila.share import drivers_private_data from manila.share import migration @@ -182,7 +184,7 @@ def add_hooks(f): class ShareManager(manager.SchedulerDependentManager): """Manages NAS storages.""" - RPC_API_VERSION = '1.11' + RPC_API_VERSION = '1.12' def __init__(self, share_driver=None, service_name=None, *args, **kwargs): """Load the driver from args, or from flags.""" @@ -284,6 +286,14 @@ class ShareManager(manager.SchedulerDependentManager): LOG.debug("Re-exporting %s shares", len(share_instances)) for share_instance in share_instances: share_ref = self.db.share_get(ctxt, share_instance['share_id']) + + if (share_ref['task_state'] == ( + constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS) and + share_instance['status'] == constants.STATUS_MIGRATING): + rpcapi = share_rpcapi.ShareAPI() + rpcapi.migration_driver_recovery(ctxt, share_ref, self.host) + continue + if share_ref.is_busy: LOG.info( _LI("Share instance %(id)s: skipping export, " @@ -343,7 +353,8 @@ class ShareManager(manager.SchedulerDependentManager): def _provide_share_server_for_share(self, context, share_network_id, share_instance, snapshot=None, - consistency_group=None): + consistency_group=None, + create_on_backend=True): """Gets or creates share_server and updates share with its id. Active share_server can be deleted if there are no dependent shares @@ -362,6 +373,9 @@ class ShareManager(manager.SchedulerDependentManager): share_network_id from provided snapshot. :param share_instance: Share Instance model :param snapshot: Optional -- Snapshot model + :param create_on_backend: Boolean. If True, driver will be asked to + create the share server if no share server + is available. :returns: dict, dict -- first value is share_server, that has been chosen for share schedule. Second value is @@ -461,20 +475,74 @@ class ShareManager(manager.SchedulerDependentManager): {'share_server_id': compatible_share_server['id']}, with_share_data=True ) + if create_on_backend: + compatible_share_server = ( + self._create_share_server_in_backend( + context, compatible_share_server)) - if compatible_share_server['status'] == constants.STATUS_CREATING: - # Create share server on backend with data from db. - compatible_share_server = self._setup_server( - context, compatible_share_server) - LOG.info(_LI("Share server created successfully.")) - else: - LOG.info(_LI("Used preexisting share server " - "'%(share_server_id)s'"), - {'share_server_id': compatible_share_server['id']}) return compatible_share_server, share_instance_ref return _provide_share_server_for_share() + def _create_share_server_in_backend(self, context, share_server): + + if share_server['status'] == constants.STATUS_CREATING: + # Create share server on backend with data from db. + share_server = self._setup_server(context, share_server) + LOG.info(_LI("Share server created successfully.")) + else: + LOG.info(_LI("Using preexisting share server: " + "'%(share_server_id)s'"), + {'share_server_id': share_server['id']}) + return share_server + + def create_share_server(self, context, share_server_id): + """Invoked to create a share server in this backend. + + This method is invoked to create the share server defined in the model + obtained by the supplied id. + + :param context: The 'context.RequestContext' object for the request. + :param share_server_id: The id of the server to be created. + """ + share_server = self.db.share_server_get(context, share_server_id) + + self._create_share_server_in_backend(context, share_server) + + def provide_share_server(self, context, share_instance_id, + share_network_id, snapshot_id=None): + """Invoked to provide a compatible share server. + + This method is invoked to find a compatible share server among the + existing ones or create a share server database instance with the share + server properties that will be used to create the share server later. + + :param context: The 'context.RequestContext' object for the request. + :param share_instance_id: The id of the share instance whose model + attributes will be used to provide the share server. + :param share_network_id: The id of the share network the share server + to be provided has to be related to. + :param snapshot_id: The id of the snapshot to be used to obtain the + share server if applicable. + :return: The id of the share server that is being provided. + """ + share_instance = self.db.share_instance_get(context, share_instance_id, + with_share_data=True) + snapshot_ref = None + if snapshot_id: + snapshot_ref = self.db.share_snapshot_get(context, snapshot_id) + + consistency_group_ref = None + if share_instance.get('consistency_group_id'): + consistency_group_ref = self.db.consistency_group_get( + context, share_instance['consistency_group_id']) + + share_server, share_instance = self._provide_share_server_for_share( + context, share_network_id, share_instance, snapshot_ref, + consistency_group_ref, create_on_backend=False) + + return share_server['id'] + def _provide_share_server_for_cg(self, context, share_network_id, cg_ref, cgsnapshot=None): """Gets or creates share_server and updates share with its id. @@ -592,21 +660,187 @@ class ShareManager(manager.SchedulerDependentManager): return self.driver.migration_get_info(context, share_instance, share_server) - @utils.require_driver_initialized - def migration_get_driver_info(self, context, share_instance_id): - share_instance = self.db.share_instance_get( - context, share_instance_id, with_share_data=True) + def _migration_start_driver(self, context, share_ref, src_share_instance, + dest_host, notify, new_az_id): - share_server = None - if share_instance.get('share_server_id'): - share_server = self.db.share_server_get( - context, share_instance['share_server_id']) + share_server = self._get_share_server(context, src_share_instance) - return self.driver.migration_get_driver_info(context, share_instance, - share_server) + share_api = api.API() + + request_spec, dest_share_instance = ( + share_api.create_share_instance_and_get_request_spec( + context, share_ref, new_az_id, None, dest_host, + src_share_instance['share_network_id'])) + + self.db.share_instance_update( + context, dest_share_instance['id'], + {'status': constants.STATUS_MIGRATING_TO}) + + # refresh and obtain proxified properties + dest_share_instance = self.db.share_instance_get( + context, dest_share_instance['id'], with_share_data=True) + + helper = migration.ShareMigrationHelper(context, self.db, share_ref) + + try: + if dest_share_instance['share_network_id']: + rpcapi = share_rpcapi.ShareAPI() + + # NOTE(ganso): Obtaining the share_server_id asynchronously so + # we can wait for it to be ready. + dest_share_server_id = rpcapi.provide_share_server( + context, dest_share_instance, + dest_share_instance['share_network_id']) + + rpcapi.create_share_server( + context, dest_share_instance, dest_share_server_id) + + dest_share_server = helper.wait_for_share_server( + dest_share_server_id) + else: + dest_share_server = None + + compatibility = self.driver.migration_check_compatibility( + context, src_share_instance, dest_share_instance, + share_server, dest_share_server) + + if not compatibility.get('compatible'): + msg = _("Destination host %(host)s is not compatible with " + "share %(share)s's source backend for driver-assisted " + "migration.") % { + 'host': dest_host, + 'share': share_ref['id'], + } + raise exception.ShareMigrationFailed(reason=msg) + + if not compatibility.get('writable'): + readonly_support = self.driver.configuration.safe_get( + 'migration_readonly_rules_support') + + helper.change_to_read_only(src_share_instance, share_server, + readonly_support, self.driver) + + LOG.debug("Initiating driver migration for share %s.", + share_ref['id']) + + self.db.share_update( + context, share_ref['id'], + {'task_state': ( + constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS)}) + + self.driver.migration_start( + context, src_share_instance, dest_share_instance, + share_server, dest_share_server) + + # prevent invoking _migration_driver_continue immediately + time.sleep(5) + + self._migration_driver_continue( + context, share_ref, src_share_instance, dest_share_instance, + share_server, dest_share_server, notify) + + except Exception: + # NOTE(ganso): Cleaning up error'ed destination share instance from + # database. It is assumed that driver cleans up leftovers in + # backend when migration fails. + self._migration_delete_instance(context, dest_share_instance['id']) + + # NOTE(ganso): For now source share instance should remain in + # migrating status for fallback migration. + msg = _("Driver optimized migration of share %s " + "failed.") % share_ref['id'] + LOG.exception(msg) + raise exception.ShareMigrationFailed(reason=msg) + + return True + + def _migration_driver_continue( + self, context, share_ref, src_share_instance, dest_share_instance, + src_share_server, dest_share_server, notify=False): + + finished = False + share_ref = self.db.share_get(context, share_ref['id']) + + while (not finished and share_ref['task_state'] == + constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS): + finished = self.driver.migration_continue( + context, src_share_instance, dest_share_instance, + src_share_server, dest_share_server) + time.sleep(5) + share_ref = self.db.share_get(context, share_ref['id']) + + if finished: + self.db.share_update( + context, share_ref['id'], + {'task_state': + constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE}) + + if notify: + self._migration_complete_driver( + context, share_ref, src_share_instance, + dest_share_instance) + + LOG.info(_LI("Share Migration for share %s" + " completed successfully."), share_ref['id']) + else: + LOG.info(_LI("Share Migration for share %s completed " + "first phase successfully."), share_ref['id']) + else: + if (share_ref['task_state'] == + constants.TASK_STATE_MIGRATION_CANCELLED): + LOG.warning(_LW("Share Migration for share %s was cancelled."), + share_ref['id']) + else: + msg = (_("Share Migration for share %s did not complete " + "first phase successfully."), share_ref['id']) + raise exception.ShareMigrationFailed(reason=msg) @utils.require_driver_initialized - def migration_start(self, context, share_id, host, force_host_copy, + def migration_driver_recovery(self, context, share_id): + """Resumes a migration after a service restart.""" + + share = self.db.share_get(context, share_id) + + share_api = api.API() + + src_share_instance_id, dest_share_instance_id = ( + share_api.get_migrating_instances(share)) + + src_share_instance = self.db.share_instance_get( + context, src_share_instance_id, with_share_data=True) + + dest_share_instance = self.db.share_instance_get( + context, dest_share_instance_id, with_share_data=True) + + src_share_server = self._get_share_server(context, src_share_instance) + + dest_share_server = self._get_share_server( + context, dest_share_instance) + + try: + + self._migration_driver_continue( + context, share, src_share_instance, dest_share_instance, + src_share_server, dest_share_server) + + except Exception: + # NOTE(ganso): Cleaning up error'ed destination share instance from + # database. It is assumed that driver cleans up leftovers in + # backend when migration fails. + self._migration_delete_instance(context, dest_share_instance['id']) + self.db.share_instance_update( + context, src_share_instance['id'], + {'status': constants.STATUS_AVAILABLE}) + self.db.share_update( + context, share['id'], + {'task_state': constants.TASK_STATE_MIGRATION_ERROR}) + msg = _("Driver optimized migration of share %s " + "failed.") % share['id'] + LOG.exception(msg) + raise exception.ShareMigrationFailed(reason=msg) + + @utils.require_driver_initialized + def migration_start(self, context, share_id, dest_host, force_host_copy, notify=True): """Migrates a share from current host to another host.""" LOG.debug("Entered migration_start method for share %s.", share_id) @@ -615,10 +849,14 @@ class ShareManager(manager.SchedulerDependentManager): context, share_id, {'task_state': constants.TASK_STATE_MIGRATION_IN_PROGRESS}) - rpcapi = share_rpcapi.ShareAPI() share_ref = self.db.share_get(context, share_id) share_instance = self._get_share_instance(context, share_ref) - moved = False + success = False + + host_value = share_utils.extract_host(dest_host) + service = self.db.service_get_by_args( + context, host_value, 'manila-share') + new_az_id = service['availability_zone_id'] self.db.share_instance_update(context, share_instance['id'], {'status': constants.STATUS_MIGRATING}) @@ -626,49 +864,27 @@ class ShareManager(manager.SchedulerDependentManager): if not force_host_copy: try: - dest_driver_migration_info = rpcapi.migration_get_driver_info( - context, share_instance) - - share_server = self._get_share_server(context.elevated(), - share_instance) - - LOG.debug("Calling driver migration for share %s.", share_id) - - self.db.share_update( - context, share_id, - {'task_state': ( - constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS)}) - - moved, model_update = self.driver.migration_start( - context, share_instance, share_server, host, - dest_driver_migration_info, notify) - - # NOTE(ganso): Here we are allowing the driver to perform - # changes even if it has not performed migration. While this - # scenario may not be valid, I do not think it should be - # forcefully prevented. - - if model_update: - self.db.share_instance_update( - context, share_instance['id'], model_update) + success = self._migration_start_driver( + context, share_ref, share_instance, dest_host, notify, + new_az_id) except Exception as e: - msg = six.text_type(e) - LOG.exception(msg) - LOG.warning(_LW("Driver did not migrate share %s. Proceeding " - "with generic migration approach.") % share_id) + if not isinstance(e, NotImplementedError): + LOG.exception( + _LE("The driver could not migrate the share %(shr)s"), + {'shr': share_id}) - if not moved: - LOG.debug("Starting generic migration " - "for share %s.", share_id) + if not success: + LOG.info(_LI("Starting generic migration for share %s."), share_id) self.db.share_update( context, share_id, {'task_state': constants.TASK_STATE_MIGRATION_IN_PROGRESS}) try: - self._migration_start_generic(context, share_ref, - share_instance, host, notify) + self._migration_start_generic( + context, share_ref, share_instance, dest_host, notify, + new_az_id) except Exception: msg = _("Generic migration failed for share %s.") % share_id LOG.exception(msg) @@ -679,52 +895,36 @@ class ShareManager(manager.SchedulerDependentManager): context, share_instance['id'], {'status': constants.STATUS_AVAILABLE}) raise exception.ShareMigrationFailed(reason=msg) - elif not notify: - self.db.share_update( - context, share_ref['id'], - {'task_state': - constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE}) - else: - self.db.share_instance_update( - context, share_instance['id'], - {'status': constants.STATUS_AVAILABLE, - 'host': host['host']}) - self.db.share_update( - context, share_ref['id'], - {'task_state': constants.TASK_STATE_MIGRATION_SUCCESS}) - LOG.info(_LI("Share Migration for share %s" - " completed successfully."), share_ref['id']) - - def _migration_start_generic(self, context, share, share_instance, host, - notify): + def _migration_start_generic(self, context, share, src_share_instance, + dest_host, notify, new_az_id): rpcapi = share_rpcapi.ShareAPI() helper = migration.ShareMigrationHelper(context, self.db, share) share_server = self._get_share_server(context.elevated(), - share_instance) + src_share_instance) readonly_support = self.driver.configuration.safe_get( 'migration_readonly_rules_support') - helper.change_to_read_only(share_instance, share_server, + helper.change_to_read_only(src_share_instance, share_server, readonly_support, self.driver) try: - new_share_instance = helper.create_instance_and_wait( - share, share_instance, host) + dest_share_instance = helper.create_instance_and_wait( + share, src_share_instance, dest_host, new_az_id) self.db.share_instance_update( - context, new_share_instance['id'], + context, dest_share_instance['id'], {'status': constants.STATUS_MIGRATING_TO}) except Exception: msg = _("Failed to create instance on destination " "backend during migration of share %s.") % share['id'] LOG.exception(msg) - helper.cleanup_access_rules(share_instance, share_server, + helper.cleanup_access_rules(src_share_instance, share_server, self.driver) raise exception.ShareMigrationFailed(reason=msg) @@ -735,17 +935,17 @@ class ShareManager(manager.SchedulerDependentManager): try: src_migration_info = self.driver.migration_get_info( - context, share_instance, share_server) + context, src_share_instance, share_server) dest_migration_info = rpcapi.migration_get_info( - context, new_share_instance) + context, dest_share_instance) LOG.debug("Time to start copying in migration" " for share %s.", share['id']) data_rpc.migration_start( - context, share['id'], ignore_list, share_instance['id'], - new_share_instance['id'], src_migration_info, + context, share['id'], ignore_list, src_share_instance['id'], + dest_share_instance['id'], src_migration_info, dest_migration_info, notify) except Exception: @@ -753,77 +953,128 @@ class ShareManager(manager.SchedulerDependentManager): " invoking Data Service for migration of " "share %s.") % share['id'] LOG.exception(msg) - helper.cleanup_new_instance(new_share_instance) - helper.cleanup_access_rules(share_instance, share_server, + helper.cleanup_new_instance(dest_share_instance) + helper.cleanup_access_rules(src_share_instance, share_server, self.driver) raise exception.ShareMigrationFailed(reason=msg) + def _migration_complete_driver( + self, context, share_ref, src_share_instance, dest_share_instance): + + share_server = self._get_share_server(context, src_share_instance) + dest_share_server = self._get_share_server( + context, dest_share_instance) + + export_locations = self.driver.migration_complete( + context, src_share_instance, dest_share_instance, + share_server, dest_share_server) + + if export_locations: + self.db.share_export_locations_update( + context, dest_share_instance['id'], export_locations) + + helper = migration.ShareMigrationHelper(context, self.db, share_ref) + + helper.apply_new_access_rules(dest_share_instance) + + self.db.share_instance_update( + context, dest_share_instance['id'], + {'status': constants.STATUS_AVAILABLE}) + + self._migration_delete_instance(context, src_share_instance['id']) + + self.db.share_update( + context, dest_share_instance['share_id'], + {'task_state': constants.TASK_STATE_MIGRATION_SUCCESS}) + + def _migration_delete_instance(self, context, instance_id): + + share_instance = self.db.share_instance_get( + context, instance_id, with_share_data=True) + + self.db.share_instance_update( + context, instance_id, {'status': constants.STATUS_INACTIVE}) + + rules = self.db.share_access_get_all_for_instance( + context, instance_id) + + for rule in rules: + access_mapping = self.db.share_instance_access_get( + context, rule['id'], instance_id) + + self.db.share_instance_access_delete( + context, access_mapping['id']) + + self.db.share_instance_delete(context, instance_id) + LOG.info(_LI("Share instance %s: deleted successfully."), + instance_id) + + self._check_delete_share_server(context, share_instance) + @utils.require_driver_initialized - def migration_complete(self, context, share_id, share_instance_id, - new_share_instance_id): + def migration_complete(self, context, src_instance_id, dest_instance_id): + + src_share_instance = self.db.share_instance_get( + context, src_instance_id, with_share_data=True) + dest_share_instance = self.db.share_instance_get( + context, dest_instance_id, with_share_data=True) + + share_ref = self.db.share_get(context, src_share_instance['share_id']) LOG.info(_LI("Received request to finish Share Migration for " - "share %s."), share_id) - - share_ref = self.db.share_get(context, share_id) + "share %s."), share_ref['id']) if share_ref['task_state'] == ( constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE): - rpcapi = share_rpcapi.ShareAPI() - - share_instance = self._get_share_instance(context, share_ref) - - share_server = self._get_share_server(context, share_instance) - try: - dest_driver_migration_info = rpcapi.migration_get_driver_info( - context, share_instance) + self._migration_complete_driver( + context, share_ref, src_share_instance, + dest_share_instance) - model_update = self.driver.migration_complete( - context, share_instance, share_server, - dest_driver_migration_info) - if model_update: - self.db.share_instance_update( - context, share_instance['id'], model_update) - self.db.share_update( - context, share_id, - {'task_state': constants.TASK_STATE_MIGRATION_SUCCESS}) except Exception: msg = _("Driver migration completion failed for" - " share %s.") % share_id + " share %s.") % share_ref['id'] LOG.exception(msg) + self.db.share_instance_update( + context, src_instance_id, + {'status': constants.STATUS_AVAILABLE}) + self.db.share_instance_update( + context, dest_instance_id, + {'status': constants.STATUS_ERROR}) self.db.share_update( - context, share_id, + context, share_ref['id'], {'task_state': constants.TASK_STATE_MIGRATION_ERROR}) raise exception.ShareMigrationFailed(reason=msg) - else: try: - self._migration_complete( - context, share_ref, share_instance_id, - new_share_instance_id) + self._migration_complete_generic( + context, share_ref, src_instance_id, + dest_instance_id) except Exception: msg = _("Generic migration completion failed for" - " share %s.") % share_id + " share %s.") % share_ref['id'] LOG.exception(msg) self.db.share_update( - context, share_id, + context, share_ref['id'], {'task_state': constants.TASK_STATE_MIGRATION_ERROR}) self.db.share_instance_update( - context, share_instance_id, + context, src_instance_id, {'status': constants.STATUS_AVAILABLE}) raise exception.ShareMigrationFailed(reason=msg) - def _migration_complete(self, context, share_ref, share_instance_id, - new_share_instance_id): + LOG.info(_LI("Share Migration for share %s" + " completed successfully."), share_ref['id']) - share_instance = self.db.share_instance_get( - context, share_instance_id, with_share_data=True) - new_share_instance = self.db.share_instance_get( - context, new_share_instance_id, with_share_data=True) + def _migration_complete_generic(self, context, share_ref, + src_instance_id, dest_instance_id): - share_server = self._get_share_server(context, share_instance) + src_share_instance = self.db.share_instance_get( + context, src_instance_id, with_share_data=True) + dest_share_instance = self.db.share_instance_get( + context, dest_instance_id, with_share_data=True) + + share_server = self._get_share_server(context, src_share_instance) helper = migration.ShareMigrationHelper(context, self.db, share_ref) @@ -833,13 +1084,13 @@ class ShareManager(manager.SchedulerDependentManager): msg = _("Data copy of generic migration for share %s has not " "completed successfully.") % share_ref['id'] LOG.warning(msg) - helper.cleanup_new_instance(new_share_instance) + helper.cleanup_new_instance(dest_share_instance) - helper.cleanup_access_rules(share_instance, share_server, + helper.cleanup_access_rules(src_share_instance, share_server, self.driver) if task_state == constants.TASK_STATE_DATA_COPYING_CANCELLED: self.db.share_instance_update( - context, share_instance_id, + context, src_instance_id, {'status': constants.STATUS_AVAILABLE}) self.db.share_update( context, share_ref['id'], @@ -858,13 +1109,13 @@ class ShareManager(manager.SchedulerDependentManager): raise exception.ShareMigrationFailed(reason=msg) try: - helper.apply_new_access_rules(new_share_instance) + helper.apply_new_access_rules(dest_share_instance) except Exception: msg = _("Failed to apply new access rules during migration " "of share %s.") % share_ref['id'] LOG.exception(msg) - helper.cleanup_new_instance(new_share_instance) - helper.cleanup_access_rules(share_instance, share_server, + helper.cleanup_new_instance(dest_share_instance) + helper.cleanup_access_rules(src_share_instance, share_server, self.driver) raise exception.ShareMigrationFailed(reason=msg) @@ -872,75 +1123,107 @@ class ShareManager(manager.SchedulerDependentManager): context, share_ref['id'], {'task_state': constants.TASK_STATE_MIGRATION_COMPLETING}) - self.db.share_instance_update(context, new_share_instance_id, + self.db.share_instance_update(context, dest_instance_id, {'status': constants.STATUS_AVAILABLE}) - self.db.share_instance_update(context, share_instance_id, + self.db.share_instance_update(context, src_instance_id, {'status': constants.STATUS_INACTIVE}) - helper.delete_instance_and_wait(share_instance) + helper.delete_instance_and_wait(src_share_instance) + + self._check_delete_share_server(context, src_share_instance) self.db.share_update( context, share_ref['id'], {'task_state': constants.TASK_STATE_MIGRATION_SUCCESS}) - LOG.info(_LI("Share Migration for share %s" - " completed successfully."), share_ref['id']) - @utils.require_driver_initialized - def migration_cancel(self, context, share_id): + def migration_cancel(self, context, src_instance_id, dest_instance_id): - share_ref = self.db.share_get(context, share_id) + src_share_instance = self.db.share_instance_get( + context, src_instance_id, with_share_data=True) + dest_share_instance = self.db.share_instance_get( + context, dest_instance_id, with_share_data=True) - # Confirm that it is driver migration scenario - if share_ref['task_state'] == ( + share_ref = self.db.share_get(context, src_share_instance['share_id']) + + if share_ref['task_state'] not in ( + constants.TASK_STATE_DATA_COPYING_COMPLETED, + constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE, constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS): + msg = _("Migration of share %s cannot be cancelled at this " + "moment.") % share_ref['id'] + raise exception.InvalidShare(reason=msg) - share_server = None - if share_ref.instance.get('share_server_id'): - share_server = self.db.share_server_get( - context, share_ref.instance['share_server_id']) + share_server = self._get_share_server(context, src_share_instance) - share_rpc = share_rpcapi.ShareAPI() + dest_share_server = self._get_share_server( + context, dest_share_instance) - driver_migration_info = share_rpc.migration_get_driver_info( - context, share_ref.instance) + if share_ref['task_state'] == ( + constants.TASK_STATE_DATA_COPYING_COMPLETED): + helper = migration.ShareMigrationHelper( + context, self.db, share_ref) + + self.db.share_instance_update( + context, dest_share_instance['id'], + {'status': constants.STATUS_INACTIVE}) + + helper.cleanup_new_instance(dest_share_instance) + + helper.cleanup_access_rules(src_share_instance, share_server, + self.driver) + else: self.driver.migration_cancel( - context, share_ref.instance, share_server, - driver_migration_info) - else: - msg = _("Driver is not performing migration for" - " share %s") % share_id - raise exception.InvalidShare(reason=msg) + context, src_share_instance, dest_share_instance, + share_server, dest_share_server) + + self._migration_delete_instance(context, dest_share_instance['id']) + + self.db.share_update( + context, share_ref['id'], + {'task_state': constants.TASK_STATE_MIGRATION_CANCELLED}) + + self.db.share_instance_update( + context, src_share_instance['id'], + {'status': constants.STATUS_AVAILABLE}) + + LOG.info(_LI("Share Migration for share %s" + " was cancelled."), share_ref['id']) @utils.require_driver_initialized - def migration_get_progress(self, context, share_id): + def migration_get_progress(self, context, src_instance_id, + dest_instance_id): - share_ref = self.db.share_get(context, share_id) + src_share_instance = self.db.share_instance_get( + context, src_instance_id, with_share_data=True) + dest_share_instance = self.db.share_instance_get( + context, dest_instance_id, with_share_data=True) + + share_ref = self.db.share_get(context, src_share_instance['share_id']) # Confirm that it is driver migration scenario - if share_ref['task_state'] == ( + if share_ref['task_state'] != ( constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS): - - share_server = None - if share_ref.instance.get('share_server_id'): - share_server = self.db.share_server_get( - context, share_ref.instance['share_server_id']) - - share_rpc = share_rpcapi.ShareAPI() - - driver_migration_info = share_rpc.migration_get_driver_info( - context, share_ref.instance) - - return self.driver.migration_get_progress( - context, share_ref.instance, share_server, - driver_migration_info) - else: msg = _("Driver is not performing migration for" - " share %s") % share_id + " share %s at this moment.") % share_ref['id'] raise exception.InvalidShare(reason=msg) + share_server = None + if share_ref.instance.get('share_server_id'): + share_server = self.db.share_server_get( + context, src_share_instance['share_server_id']) + + dest_share_server = None + if dest_share_instance.get('share_server_id'): + dest_share_server = self.db.share_server_get( + context, dest_share_instance['share_server_id']) + + return self.driver.migration_get_progress( + context, src_share_instance, dest_share_instance, + share_server, dest_share_server) + def _get_share_instance(self, context, share): if isinstance(share, six.string_types): id = share @@ -1879,6 +2162,10 @@ class ShareManager(manager.SchedulerDependentManager): LOG.info(_LI("Share instance %s: deleted successfully."), share_instance_id) + self._check_delete_share_server(context, share_instance) + + def _check_delete_share_server(self, context, share_instance): + if CONF.delete_share_server_with_last_share: share_server = self._get_share_server(context, share_instance) if share_server and len(share_server.share_instances) == 0: diff --git a/manila/share/migration.py b/manila/share/migration.py index bfb0fa1b0f..16b315484c 100644 --- a/manila/share/migration.py +++ b/manila/share/migration.py @@ -84,11 +84,12 @@ class ShareMigrationHelper(object): else: time.sleep(tries ** 2) - def create_instance_and_wait(self, share, share_instance, host): + def create_instance_and_wait( + self, share, share_instance, dest_host, new_az_id): new_share_instance = self.api.create_instance( self.context, share, share_instance['share_network_id'], - host['host']) + dest_host, new_az_id) # Wait for new_share_instance to become ready starttime = time.time() @@ -103,14 +104,14 @@ class ShareMigrationHelper(object): msg = _("Failed to create new share instance" " (from %(share_id)s) on " "destination host %(host_name)s") % { - 'share_id': share['id'], 'host_name': host['host']} + 'share_id': share['id'], 'host_name': dest_host} self.cleanup_new_instance(new_share_instance) raise exception.ShareMigrationFailed(reason=msg) elif now > deadline: msg = _("Timeout creating new share instance " "(from %(share_id)s) on " "destination host %(host_name)s") % { - 'share_id': share['id'], 'host_name': host['host']} + 'share_id': share['id'], 'host_name': dest_host} self.cleanup_new_instance(new_share_instance) raise exception.ShareMigrationFailed(reason=msg) else: @@ -199,3 +200,16 @@ class ShareMigrationHelper(object): utils.wait_for_access_update( self.context, self.db, new_share_instance, self.migration_wait_access_rules_timeout) + + @utils.retry(exception.ShareServerNotReady, retries=8) + def wait_for_share_server(self, share_server_id): + share_server = self.db.share_server_get(self.context, share_server_id) + if share_server['status'] == constants.STATUS_ERROR: + raise exception.ShareServerNotCreated( + share_server_id=share_server_id) + elif share_server['status'] == constants.STATUS_ACTIVE: + return share_server + else: + raise exception.ShareServerNotReady( + share_server_id=share_server_id, time=511, + state=constants.STATUS_AVAILABLE) diff --git a/manila/share/rpcapi.py b/manila/share/rpcapi.py index 7e33cc0e3c..258da75d7b 100644 --- a/manila/share/rpcapi.py +++ b/manila/share/rpcapi.py @@ -59,6 +59,10 @@ class ShareAPI(object): migration_get_driver_info() 1.11 - Add create_replicated_snapshot() and delete_replicated_snapshot() methods + 1.12 - Add provide_share_server(), create_share_server() and + migration_driver_recovery(), remove migration_get_driver_info(), + update migration_cancel(), migration_complete() and + migration_get_progress method signature """ BASE_RPC_API_VERSION = '1.0' @@ -67,7 +71,7 @@ class ShareAPI(object): super(ShareAPI, self).__init__() target = messaging.Target(topic=CONF.share_topic, version=self.BASE_RPC_API_VERSION) - self.client = rpc.get_client(target, version_cap='1.11') + self.client = rpc.get_client(target, version_cap='1.12') def create_share_instance(self, context, share_instance, host, request_spec, filter_properties, @@ -123,15 +127,19 @@ class ShareAPI(object): notify): new_host = utils.extract_host(share['instance']['host']) call_context = self.client.prepare(server=new_host, version='1.6') - host_p = {'host': dest_host.host, - 'capabilities': dest_host.capabilities} call_context.cast(context, 'migration_start', share_id=share['id'], - host=host_p, + dest_host=dest_host, force_host_copy=force_host_copy, notify=notify) + def migration_driver_recovery(self, context, share, host): + call_context = self.client.prepare(server=host, version='1.12') + call_context.cast(context, + 'migration_driver_recovery', + share_id=share['id']) + def migration_get_info(self, context, share_instance): new_host = utils.extract_host(share_instance['host']) call_context = self.client.prepare(server=new_host, version='1.6') @@ -139,13 +147,6 @@ class ShareAPI(object): 'migration_get_info', share_instance_id=share_instance['id']) - def migration_get_driver_info(self, context, share_instance): - new_host = utils.extract_host(share_instance['host']) - call_context = self.client.prepare(server=new_host, version='1.6') - return call_context.call(context, - 'migration_get_driver_info', - share_instance_id=share_instance['id']) - def delete_share_server(self, context, share_server): host = utils.extract_host(share_server['host']) call_context = self.client.prepare(server=host, version='1.0') @@ -296,24 +297,45 @@ class ShareAPI(object): share_replica_id=share_replica['id'], share_id=share_replica['share_id']) - def migration_complete(self, context, share, share_instance_id, - new_share_instance_id): - new_host = utils.extract_host(share['host']) - call_context = self.client.prepare(server=new_host, version='1.10') + def migration_complete(self, context, src_share_instance, + dest_instance_id): + new_host = utils.extract_host(src_share_instance['host']) + call_context = self.client.prepare(server=new_host, version='1.12') call_context.cast(context, 'migration_complete', - share_id=share['id'], - share_instance_id=share_instance_id, - new_share_instance_id=new_share_instance_id) + src_instance_id=src_share_instance['id'], + dest_instance_id=dest_instance_id) - def migration_cancel(self, context, share): - new_host = utils.extract_host(share['host']) - call_context = self.client.prepare(server=new_host, version='1.10') - call_context.call(context, 'migration_cancel', share_id=share['id']) + def migration_cancel(self, context, src_share_instance, dest_instance_id): + new_host = utils.extract_host(src_share_instance['host']) + call_context = self.client.prepare(server=new_host, version='1.12') + call_context.cast(context, + 'migration_cancel', + src_instance_id=src_share_instance['id'], + dest_instance_id=dest_instance_id) - def migration_get_progress(self, context, share): - new_host = utils.extract_host(share['host']) - call_context = self.client.prepare(server=new_host, version='1.10') + def migration_get_progress(self, context, src_share_instance, + dest_instance_id): + new_host = utils.extract_host(src_share_instance['host']) + call_context = self.client.prepare(server=new_host, version='1.12') return call_context.call(context, 'migration_get_progress', - share_id=share['id']) + src_instance_id=src_share_instance['id'], + dest_instance_id=dest_instance_id) + + def provide_share_server(self, context, share_instance, share_network_id, + snapshot_id=None): + new_host = utils.extract_host(share_instance['host']) + call_context = self.client.prepare(server=new_host, version='1.12') + return call_context.call(context, + 'provide_share_server', + share_instance_id=share_instance['id'], + share_network_id=share_network_id, + snapshot_id=snapshot_id) + + def create_share_server(self, context, share_instance, share_server_id): + new_host = utils.extract_host(share_instance['host']) + call_context = self.client.prepare(server=new_host, version='1.12') + call_context.cast(context, + 'create_share_server', + share_server_id=share_server_id) diff --git a/manila/tests/api/v2/test_shares.py b/manila/tests/api/v2/test_shares.py index 91194fc7ab..71a9629803 100644 --- a/manila/tests/api/v2/test_shares.py +++ b/manila/tests/api/v2/test_shares.py @@ -599,10 +599,7 @@ class ShareAPITest(test.TestCase): req.api_version_request.experimental = True body = {'migration_get_progress': None} - expected = {'total_progress': 'fake', - 'current_file_progress': 'fake', - 'current_file_path': 'fake', - } + expected = {'total_progress': 'fake'} self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) diff --git a/manila/tests/data/test_manager.py b/manila/tests/data/test_manager.py index af83b87ee5..824d83b725 100644 --- a/manila/tests/data/test_manager.py +++ b/manila/tests/data/test_manager.py @@ -82,6 +82,8 @@ class DataManagerTestCase(test.TestCase): # mocks self.mock_object(db, 'share_get', mock.Mock(return_value=self.share)) + self.mock_object(db, 'share_instance_get', mock.Mock( + return_value=self.share.instance)) self.mock_object(data_utils, 'Copy', mock.Mock(return_value='fake_copy')) @@ -122,7 +124,7 @@ class DataManagerTestCase(test.TestCase): if notify or exc: share_rpc.ShareAPI.migration_complete.assert_called_once_with( - self.context, self.share, 'ins1_id', 'ins2_id') + self.context, self.share.instance, 'ins2_id') @ddt.data({'cancelled': False, 'exc': None}, {'cancelled': False, 'exc': Exception('fake')}, diff --git a/manila/tests/scheduler/test_manager.py b/manila/tests/scheduler/test_manager.py index ce69981e7e..7bf214e07f 100644 --- a/manila/tests/scheduler/test_manager.py +++ b/manila/tests/scheduler/test_manager.py @@ -218,8 +218,11 @@ class SchedulerManagerTestCase(test.TestCase): def test_migrate_share_to_host(self): + class fake_host(object): + host = 'fake@backend#pool' + share = db_utils.create_share() - host = 'fake@backend#pool' + host = fake_host() self.mock_object(db, 'share_get', mock.Mock(return_value=share)) self.mock_object(share_rpcapi.ShareAPI, 'migration_start') @@ -227,8 +230,8 @@ class SchedulerManagerTestCase(test.TestCase): 'host_passes_filters', mock.Mock(return_value=host)) - self.manager.migrate_share_to_host(self.context, share['id'], host, - False, True, {}, None) + self.manager.migrate_share_to_host(self.context, share['id'], + host.host, False, True, {}, None) def test_migrate_share_to_host_no_valid_host(self): diff --git a/manila/tests/share/test_api.py b/manila/tests/share/test_api.py index 43b9fb8853..4f44486abe 100644 --- a/manila/tests/share/test_api.py +++ b/manila/tests/share/test_api.py @@ -33,7 +33,6 @@ from manila import policy from manila import quota from manila import share from manila.share import api as share_api -from manila.share import rpcapi as share_rpc from manila.share import share_types from manila import test from manila.tests import db_utils @@ -754,7 +753,7 @@ class ShareAPITestCase(test.TestCase): mock_db_share_instance_update = self.mock_object( db_api, 'share_instance_update') self.mock_object( - share_api.API, '_create_share_instance_and_get_request_spec', + share_api.API, 'create_share_instance_and_get_request_spec', mock.Mock(return_value=(fake_req_spec, fake_instance))) retval = self.api.create_instance(self.context, fake_share, @@ -2014,7 +2013,7 @@ class ShareAPITestCase(test.TestCase): def test_migration_start(self): host = 'fake2@backend#pool' - + fake_service = {'availability_zone_id': 'fake_az_id'} fake_type = { 'id': 'fake_type_id', 'extra_specs': { @@ -2027,17 +2026,21 @@ class ShareAPITestCase(test.TestCase): host='fake@backend#pool', share_type_id=fake_type['id']) request_spec = self._get_request_spec_dict( - share, fake_type, size=0) + share, fake_type, size=0, availability_zone_id='fake_az_id') self.mock_object(self.scheduler_rpcapi, 'migrate_share_to_host') self.mock_object(share_types, 'get_share_type', mock.Mock(return_value=fake_type)) self.mock_object(utils, 'validate_service_host') + self.mock_object(db_api, 'service_get_by_args', + mock.Mock(return_value=fake_service)) self.api.migration_start(self.context, share, host, True, True) self.scheduler_rpcapi.migrate_share_to_host.assert_called_once_with( self.context, share['id'], host, True, True, request_spec) + db_api.service_get_by_args.assert_called_once_with( + self.context, 'fake2@backend', 'manila-share') def test_migration_start_status_unavailable(self): host = 'fake2@backend#pool' @@ -2111,6 +2114,7 @@ class ShareAPITestCase(test.TestCase): def test_migration_start_exception(self): host = 'fake2@backend#pool' + fake_service = {'availability_zone_id': 'fake_az_id'} fake_type = { 'id': 'fake_type_id', 'extra_specs': { @@ -2128,6 +2132,8 @@ class ShareAPITestCase(test.TestCase): self.mock_object(utils, 'validate_service_host') self.mock_object(db_api, 'share_snapshot_get_all_for_share', mock.Mock(return_value=False)) + self.mock_object(db_api, 'service_get_by_args', + mock.Mock(return_value=fake_service)) self.mock_object(db_api, 'share_update', mock.Mock(return_value=True)) self.mock_object(self.scheduler_rpcapi, 'migrate_share_to_host', mock.Mock(side_effect=exception.ShareMigrationFailed( @@ -2139,12 +2145,14 @@ class ShareAPITestCase(test.TestCase): db_api.share_update.assert_any_call( mock.ANY, share['id'], mock.ANY) + db_api.service_get_by_args.assert_called_once_with( + self.context, 'fake2@backend', 'manila-share') @ddt.data({}, {'replication_type': None}) def test_create_share_replica_invalid_share_type(self, attributes): share = fakes.fake_share(id='FAKE_SHARE_ID', **attributes) mock_request_spec_call = self.mock_object( - self.api, '_create_share_instance_and_get_request_spec') + self.api, 'create_share_instance_and_get_request_spec') mock_db_update_call = self.mock_object(db_api, 'share_replica_update') mock_scheduler_rpcapi_call = self.mock_object( self.api.scheduler_rpcapi, 'create_share_replica') @@ -2163,7 +2171,7 @@ class ShareAPITestCase(test.TestCase): is_busy=True, replication_type='dr') mock_request_spec_call = self.mock_object( - self.api, '_create_share_instance_and_get_request_spec') + self.api, 'create_share_instance_and_get_request_spec') mock_db_update_call = self.mock_object(db_api, 'share_replica_update') mock_scheduler_rpcapi_call = self.mock_object( self.api.scheduler_rpcapi, 'create_share_replica') @@ -2180,7 +2188,7 @@ class ShareAPITestCase(test.TestCase): share = fakes.fake_share( id='FAKE_SHARE_ID', replication_type='dr') mock_request_spec_call = self.mock_object( - self.api, '_create_share_instance_and_get_request_spec') + self.api, 'create_share_instance_and_get_request_spec') mock_db_update_call = self.mock_object(db_api, 'share_replica_update') mock_scheduler_rpcapi_call = self.mock_object( self.api.scheduler_rpcapi, 'create_share_replica') @@ -2209,7 +2217,7 @@ class ShareAPITestCase(test.TestCase): self.mock_object(db_api, 'share_replicas_get_available_active_replica', mock.Mock(return_value={'host': 'fake_ar_host'})) self.mock_object( - share_api.API, '_create_share_instance_and_get_request_spec', + share_api.API, 'create_share_instance_and_get_request_spec', mock.Mock(return_value=(fake_request_spec, fake_replica))) self.mock_object(db_api, 'share_replica_update') mock_sched_rpcapi_call = self.mock_object( @@ -2390,18 +2398,26 @@ class ShareAPITestCase(test.TestCase): task_state=constants.TASK_STATE_DATA_COPYING_COMPLETED, instances=[instance1, instance2]) - self.mock_object(share_rpc.ShareAPI, 'migration_complete') + self.mock_object(db_api, 'share_instance_get', + mock.Mock(return_value=instance1)) + self.mock_object(self.api.share_rpcapi, 'migration_complete') self.api.migration_complete(self.context, share) - share_rpc.ShareAPI.migration_complete.assert_called_once_with( - self.context, share, instance1['id'], instance2['id']) + self.api.share_rpcapi.migration_complete.assert_called_once_with( + self.context, instance1, instance2['id']) - def test_migration_complete_task_state_invalid(self): + @ddt.data(constants.TASK_STATE_DATA_COPYING_STARTING, + constants.TASK_STATE_MIGRATION_SUCCESS, + constants.TASK_STATE_DATA_COPYING_IN_PROGRESS, + constants.TASK_STATE_MIGRATION_ERROR, + constants.TASK_STATE_MIGRATION_CANCELLED, + None) + def test_migration_complete_task_state_invalid(self, task_state): share = db_utils.create_share( id='fake_id', - task_state=constants.TASK_STATE_DATA_COPYING_IN_PROGRESS) + task_state=task_state) self.assertRaises(exception.InvalidShare, self.api.migration_complete, self.context, share) @@ -2421,86 +2437,301 @@ class ShareAPITestCase(test.TestCase): self.api.migration_complete, self.context, share) - def test_migration_cancel(self): + @ddt.data(None, Exception('fake')) + def test_migration_cancel(self, exc): share = db_utils.create_share( id='fake_id', task_state=constants.TASK_STATE_DATA_COPYING_IN_PROGRESS) + services = ['fake_service'] - self.mock_object(data_rpc.DataAPI, 'data_copy_cancel') + self.mock_object(utils, 'service_is_up', mock.Mock(return_value=True)) + self.mock_object(db_api, 'service_get_all_by_topic', + mock.Mock(return_value=services)) + self.mock_object(data_rpc.DataAPI, 'data_copy_cancel', + mock.Mock(side_effect=[exc])) - self.api.migration_cancel(self.context, share) + if exc: + self.assertRaises( + exception.ShareMigrationError, self.api.migration_cancel, + self.context, share) + else: + self.api.migration_cancel(self.context, share) data_rpc.DataAPI.data_copy_cancel.assert_called_once_with( self.context, share['id']) + db_api.service_get_all_by_topic.assert_called_once_with( + self.context, 'manila-data') + + @ddt.unpack + def test_migration_cancel_service_down(self): + service = 'fake_service' + instance1 = db_utils.create_share_instance( + share_id='fake_id', status=constants.STATUS_MIGRATING) + instance2 = db_utils.create_share_instance( + share_id='fake_id', status=constants.STATUS_MIGRATING_TO) + share = db_utils.create_share( + id='fake_id', + task_state=constants.TASK_STATE_DATA_COPYING_IN_PROGRESS, + instances=[instance1, instance2]) + + self.mock_object(utils, 'service_is_up', mock.Mock(return_value=False)) + self.mock_object(db_api, 'share_instance_get', + mock.Mock(return_value=instance1)) + self.mock_object(db_api, 'service_get_all_by_topic', + mock.Mock(return_value=service)) + + self.assertRaises(exception.InvalidShare, + self.api.migration_cancel, self.context, share) def test_migration_cancel_driver(self): + service = 'fake_service' + instance1 = db_utils.create_share_instance( + share_id='fake_id', + status=constants.STATUS_MIGRATING, + host='some_host') + instance2 = db_utils.create_share_instance( + share_id='fake_id', + status=constants.STATUS_MIGRATING_TO) share = db_utils.create_share( id='fake_id', - task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS) + task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS, + instances=[instance1, instance2]) - self.mock_object(share_rpc.ShareAPI, 'migration_cancel') + self.mock_object(db_api, 'share_instance_get', + mock.Mock(return_value=instance1)) + self.mock_object(self.api.share_rpcapi, 'migration_cancel') + self.mock_object(db_api, 'service_get_by_args', + mock.Mock(return_value=service)) + self.mock_object(utils, 'service_is_up', mock.Mock(return_value=True)) self.api.migration_cancel(self.context, share) - share_rpc.ShareAPI.migration_cancel.assert_called_once_with( - self.context, share) + self.api.share_rpcapi.migration_cancel.assert_called_once_with( + self.context, instance1, instance2['id']) + db_api.service_get_by_args.assert_called_once_with( + self.context, instance1['host'], 'manila-share') - def test_migration_cancel_task_state_invalid(self): + @ddt.unpack + def test_migration_cancel_driver_service_down(self): + + service = 'fake_service' + instance1 = db_utils.create_share_instance( + share_id='fake_id', + status=constants.STATUS_MIGRATING, + host='some_host') + instance2 = db_utils.create_share_instance( + share_id='fake_id', + status=constants.STATUS_MIGRATING_TO) + share = db_utils.create_share( + id='fake_id', + task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS, + instances=[instance1, instance2]) + + self.mock_object(utils, 'service_is_up', mock.Mock(return_value=False)) + self.mock_object(db_api, 'share_instance_get', + mock.Mock(return_value=instance1)) + self.mock_object(db_api, 'service_get_by_args', + mock.Mock(return_value=service)) + + self.assertRaises(exception.InvalidShare, + self.api.migration_cancel, self.context, share) + + @ddt.data(constants.TASK_STATE_DATA_COPYING_STARTING, + constants.TASK_STATE_MIGRATION_SUCCESS, + constants.TASK_STATE_MIGRATION_ERROR, + constants.TASK_STATE_MIGRATION_CANCELLED, + None) + def test_migration_cancel_task_state_invalid(self, task_state): share = db_utils.create_share( id='fake_id', - task_state=constants.TASK_STATE_DATA_COPYING_STARTING) + task_state=task_state) self.assertRaises(exception.InvalidShare, self.api.migration_cancel, self.context, share) - def test_migration_get_progress(self): + @ddt.data({'total_progress': 0}, Exception('fake')) + def test_migration_get_progress(self, expected): share = db_utils.create_share( id='fake_id', task_state=constants.TASK_STATE_DATA_COPYING_IN_PROGRESS) + services = ['fake_service'] - expected = 'fake_progress' - + self.mock_object(utils, 'service_is_up', mock.Mock(return_value=True)) + self.mock_object(db_api, 'service_get_all_by_topic', + mock.Mock(return_value=services)) self.mock_object(data_rpc.DataAPI, 'data_copy_get_progress', - mock.Mock(return_value=expected)) + mock.Mock(side_effect=[expected])) - result = self.api.migration_get_progress(self.context, share) - - self.assertEqual(expected, result) + if not isinstance(expected, Exception): + result = self.api.migration_get_progress(self.context, share) + self.assertEqual(expected, result) + else: + self.assertRaises( + exception.ShareMigrationError, self.api.migration_get_progress, + self.context, share) data_rpc.DataAPI.data_copy_get_progress.assert_called_once_with( self.context, share['id']) + db_api.service_get_all_by_topic.assert_called_once_with( + self.context, 'manila-data') + + @ddt.unpack + def test_migration_get_progress_service_down(self): + instance1 = db_utils.create_share_instance( + share_id='fake_id', status=constants.STATUS_MIGRATING) + instance2 = db_utils.create_share_instance( + share_id='fake_id', status=constants.STATUS_MIGRATING_TO) + share = db_utils.create_share( + id='fake_id', + task_state=constants.TASK_STATE_DATA_COPYING_IN_PROGRESS, + instances=[instance1, instance2]) + services = ['fake_service'] + + self.mock_object(utils, 'service_is_up', mock.Mock(return_value=False)) + self.mock_object(db_api, 'service_get_all_by_topic', + mock.Mock(return_value=services)) + self.mock_object(db_api, 'share_instance_get', + mock.Mock(return_value=instance1)) + + self.assertRaises(exception.InvalidShare, + self.api.migration_get_progress, self.context, share) def test_migration_get_progress_driver(self): + expected = {'total_progress': 0} + instance1 = db_utils.create_share_instance( + share_id='fake_id', + status=constants.STATUS_MIGRATING, + host='some_host') + instance2 = db_utils.create_share_instance( + share_id='fake_id', + status=constants.STATUS_MIGRATING_TO) share = db_utils.create_share( id='fake_id', - task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS) + task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS, + instances=[instance1, instance2]) + service = 'fake_service' - expected = 'fake_progress' - - self.mock_object(share_rpc.ShareAPI, 'migration_get_progress', + self.mock_object(utils, 'service_is_up', mock.Mock(return_value=True)) + self.mock_object(db_api, 'service_get_by_args', + mock.Mock(return_value=service)) + self.mock_object(db_api, 'share_instance_get', + mock.Mock(return_value=instance1)) + self.mock_object(self.api.share_rpcapi, 'migration_get_progress', mock.Mock(return_value=expected)) result = self.api.migration_get_progress(self.context, share) self.assertEqual(expected, result) - share_rpc.ShareAPI.migration_get_progress.assert_called_once_with( - self.context, share) + self.api.share_rpcapi.migration_get_progress.assert_called_once_with( + self.context, instance1, instance2['id']) + db_api.service_get_by_args.assert_called_once_with( + self.context, instance1['host'], 'manila-share') - def test_migration_get_progress_task_state_invalid(self): + def test_migration_get_progress_driver_error(self): + instance1 = db_utils.create_share_instance( + share_id='fake_id', + status=constants.STATUS_MIGRATING, + host='some_host') + instance2 = db_utils.create_share_instance( + share_id='fake_id', + status=constants.STATUS_MIGRATING_TO) share = db_utils.create_share( id='fake_id', - task_state=constants.TASK_STATE_DATA_COPYING_STARTING) + task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS, + instances=[instance1, instance2]) + service = 'fake_service' + + self.mock_object(utils, 'service_is_up', mock.Mock(return_value=True)) + self.mock_object(db_api, 'service_get_by_args', + mock.Mock(return_value=service)) + self.mock_object(db_api, 'share_instance_get', + mock.Mock(return_value=instance1)) + self.mock_object(self.api.share_rpcapi, 'migration_get_progress', + mock.Mock(side_effect=Exception('fake'))) + + self.assertRaises(exception.ShareMigrationError, + self.api.migration_get_progress, self.context, share) + + self.api.share_rpcapi.migration_get_progress.assert_called_once_with( + self.context, instance1, instance2['id']) + + def test_migration_get_progress_driver_service_down(self): + service = 'fake_service' + instance1 = db_utils.create_share_instance( + share_id='fake_id', + status=constants.STATUS_MIGRATING, + host='some_host') + instance2 = db_utils.create_share_instance( + share_id='fake_id', + status=constants.STATUS_MIGRATING_TO) + share = db_utils.create_share( + id='fake_id', + task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS, + instances=[instance1, instance2]) + + self.mock_object(utils, 'service_is_up', mock.Mock(return_value=False)) + self.mock_object(db_api, 'share_instance_get', + mock.Mock(return_value=instance1)) + self.mock_object(db_api, 'service_get_by_args', + mock.Mock(return_value=service)) self.assertRaises(exception.InvalidShare, self.api.migration_get_progress, self.context, share) + @ddt.data(constants.TASK_STATE_DATA_COPYING_STARTING, + constants.TASK_STATE_MIGRATION_SUCCESS, + constants.TASK_STATE_MIGRATION_ERROR, + constants.TASK_STATE_MIGRATION_CANCELLED, + constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE, + constants.TASK_STATE_DATA_COPYING_COMPLETED, + None) + def test_migration_get_progress_task_state_invalid(self, task_state): + + share = db_utils.create_share( + id='fake_id', + task_state=task_state) + + self.assertRaises(exception.InvalidShare, + self.api.migration_get_progress, self.context, share) + + @ddt.data(None, {'invalid_progress': None}, {}) + def test_migration_get_progress_invalid(self, progress): + + instance1 = db_utils.create_share_instance( + share_id='fake_id', + status=constants.STATUS_MIGRATING, + host='some_host') + instance2 = db_utils.create_share_instance( + share_id='fake_id', + status=constants.STATUS_MIGRATING_TO) + share = db_utils.create_share( + id='fake_id', + task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS, + instances=[instance1, instance2]) + service = 'fake_service' + + self.mock_object(utils, 'service_is_up', mock.Mock(return_value=True)) + self.mock_object(db_api, 'service_get_by_args', + mock.Mock(return_value=service)) + self.mock_object(db_api, 'share_instance_get', + mock.Mock(return_value=instance1)) + + self.mock_object(self.api.share_rpcapi, 'migration_get_progress', + mock.Mock(return_value=progress)) + + self.assertRaises(exception.InvalidShare, + self.api.migration_get_progress, self.context, share) + + self.api.share_rpcapi.migration_get_progress.assert_called_once_with( + self.context, instance1, instance2['id']) + class OtherTenantsShareActionsTestCase(test.TestCase): def setUp(self): diff --git a/manila/tests/share/test_driver.py b/manila/tests/share/test_driver.py index 104887288e..56328163a7 100644 --- a/manila/tests/share/test_driver.py +++ b/manila/tests/share/test_driver.py @@ -466,16 +466,24 @@ class ShareDriverTestCase(test.TestCase): driver.CONF.set_default('driver_handles_share_servers', False) share_driver = driver.ShareDriver(False) - self.assertEqual((None, None), - share_driver.migration_start(None, None, None, - None, None, None)) + self.assertRaises(NotImplementedError, share_driver.migration_start, + None, None, None, None, None) + + def test_migration_continue(self): + + driver.CONF.set_default('driver_handles_share_servers', False) + share_driver = driver.ShareDriver(False) + + self.assertRaises(NotImplementedError, share_driver.migration_continue, + None, None, None, None, None,) def test_migration_complete(self): driver.CONF.set_default('driver_handles_share_servers', False) share_driver = driver.ShareDriver(False) - share_driver.migration_complete(None, None, None, None) + self.assertRaises(NotImplementedError, share_driver.migration_complete, + None, None, None, None, None) def test_migration_cancel(self): @@ -483,7 +491,7 @@ class ShareDriverTestCase(test.TestCase): share_driver = driver.ShareDriver(False) self.assertRaises(NotImplementedError, share_driver.migration_cancel, - None, None, None, None) + None, None, None, None, None) def test_migration_get_progress(self): @@ -492,15 +500,7 @@ class ShareDriverTestCase(test.TestCase): self.assertRaises(NotImplementedError, share_driver.migration_get_progress, - None, None, None, None) - - def test_migration_get_driver_info_default(self): - - driver.CONF.set_default('driver_handles_share_servers', False) - share_driver = driver.ShareDriver(False) - - self.assertIsNone( - share_driver.migration_get_driver_info(None, None, None), None) + None, None, None, None, None) @ddt.data(True, False) def test_migration_get_info(self, admin): @@ -521,6 +521,21 @@ class ShareDriverTestCase(test.TestCase): self.assertEqual(expected, migration_info) + def test_migration_check_compatibility(self): + + driver.CONF.set_default('driver_handles_share_servers', False) + share_driver = driver.ShareDriver(False) + share_driver.configuration = configuration.Configuration(None) + expected = { + 'compatible': False, + 'writable': False, + } + + result = share_driver.migration_check_compatibility( + None, None, None, None, None) + + self.assertEqual(expected, result) + def test_update_access(self): share_driver = driver.ShareDriver(True, configuration=None) self.assertRaises( diff --git a/manila/tests/share/test_manager.py b/manila/tests/share/test_manager.py index 03cd065598..3a1cf5919b 100644 --- a/manila/tests/share/test_manager.py +++ b/manila/tests/share/test_manager.py @@ -16,6 +16,7 @@ """Test of Share Manager for Manila.""" import datetime import random +import time import ddt import mock @@ -33,6 +34,7 @@ from manila.db.sqlalchemy import models from manila import exception from manila import quota from manila.share import access as share_access +from manila.share import api from manila.share import drivers_private_data from manila.share import manager from manila.share import migration as migration_api @@ -180,7 +182,6 @@ class ShareManagerTestCase(test.TestCase): assert_called_once_with() @ddt.data( - "migration_get_driver_info", "migration_get_info", "migration_cancel", "migration_get_progress", @@ -273,6 +274,32 @@ class ShareManagerTestCase(test.TestCase): return instances, rules + def test_init_host_with_migration_driver_in_progress(self): + share = db_utils.create_share( + task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS) + instance = db_utils.create_share_instance( + share_id=share['id'], + host=self.share_manager.host + '#fake_pool', + status=constants.STATUS_MIGRATING) + + self.mock_object(self.share_manager.db, + 'share_instances_get_all_by_host', mock.Mock( + return_value=[instance])) + self.mock_object(self.share_manager.db, 'share_get', + mock.Mock(return_value=share)) + self.mock_object(rpcapi.ShareAPI, 'migration_driver_recovery') + + self.share_manager.init_host() + + (self.share_manager.db.share_instances_get_all_by_host. + assert_called_once_with(utils.IsAMatcher(context.RequestContext), + self.share_manager.host)) + self.share_manager.db.share_get.assert_called_once_with( + utils.IsAMatcher(context.RequestContext), share['id']) + rpcapi.ShareAPI.migration_driver_recovery.assert_called_once_with( + utils.IsAMatcher(context.RequestContext), share, + self.share_manager.host) + def test_init_host_with_shares_and_rules(self): # initialization of test data @@ -3609,171 +3636,105 @@ class ShareManagerTestCase(test.TestCase): self.share_manager.driver.migration_get_info.assert_called_once_with( self.context, share_instance, share_server) - def test_migration_get_driver_info(self): - share_instance = {'share_server_id': 'fake_server_id'} - share_instance_id = 'fake-id' - share_server = 'fake-share-server' - migration_info = 'fake_info' + @ddt.data(True, False) + def test_migration_start(self, success): - # mocks - self.mock_object(self.share_manager.db, 'share_instance_get', - mock.Mock(return_value=share_instance)) - self.mock_object(self.share_manager.db, 'share_server_get', - mock.Mock(return_value=share_server)) - self.mock_object(self.share_manager.driver, - 'migration_get_driver_info', - mock.Mock(return_value=migration_info)) - - result = self.share_manager.migration_get_driver_info( - self.context, share_instance_id) - - # asserts - self.assertEqual(migration_info, result) - - self.share_manager.db.share_instance_get.assert_called_once_with( - self.context, share_instance_id, with_share_data=True) - - self.share_manager.driver.migration_get_driver_info.\ - assert_called_once_with(self.context, share_instance, share_server) - - @ddt.data({'return_value': (True, 'fake_model_update'), 'notify': True}, - {'return_value': (False, 'fake_model_update'), 'notify': True}, - {'return_value': (True, 'fake_model_update'), 'notify': False}, - {'return_value': (False, 'fake_model_update'), 'notify': False}) - @ddt.unpack - def test_migration_start(self, return_value, notify): - - server = 'fake_share_server' instance = db_utils.create_share_instance( share_id='fake_id', status=constants.STATUS_AVAILABLE, - share_server_id='fake_server_id') + share_server_id='fake_server_id', + host='fake@backend#pool') share = db_utils.create_share(id='fake_id', instances=[instance]) - host = {'host': 'fake_host'} - driver_migration_info = 'driver_fake_info' + fake_service = {'availability_zone_id': 'fake_az_id'} + host = 'fake2@backend#pool' # mocks self.mock_object(self.share_manager.db, 'share_get', mock.Mock(return_value=share)) self.mock_object(self.share_manager.db, 'share_instance_get', mock.Mock(return_value=instance)) - self.mock_object(self.share_manager.db, 'share_server_get', - mock.Mock(return_value=server)) self.mock_object(self.share_manager.db, 'share_update') self.mock_object(self.share_manager.db, 'share_instance_update') - self.mock_object(rpcapi.ShareAPI, 'migration_get_driver_info', - mock.Mock(return_value=driver_migration_info)) - self.mock_object(self.share_manager.driver, 'migration_start', - mock.Mock(return_value=return_value)) - if not return_value[0]: + self.mock_object(self.share_manager, '_migration_start_driver', + mock.Mock(return_value=success)) + self.mock_object(self.share_manager.db, 'service_get_by_args', + mock.Mock(return_value=fake_service)) + + if not success: self.mock_object(self.share_manager, '_migration_start_generic') # run self.share_manager.migration_start( - self.context, 'fake_id', host, False, notify) + self.context, 'fake_id', host, False, True) # asserts self.share_manager.db.share_get.assert_called_once_with( self.context, share['id']) self.share_manager.db.share_instance_get.assert_called_once_with( self.context, instance['id'], with_share_data=True) - self.share_manager.db.share_server_get.assert_called_once_with( - utils.IsAMatcher(context.RequestContext), - instance['share_server_id']) share_update_calls = [ mock.call( self.context, share['id'], {'task_state': constants.TASK_STATE_MIGRATION_IN_PROGRESS}), - mock.call( - self.context, share['id'], - {'task_state': ( - constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS)}) - ] - share_instance_update_calls = [ - mock.call(self.context, instance['id'], - {'status': constants.STATUS_MIGRATING}), - mock.call(self.context, instance['id'], 'fake_model_update') ] - if not notify and return_value[0]: - share_update_calls.append(mock.call( - self.context, share['id'], - {'task_state': - constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE})) - elif notify and return_value[0]: - share_update_calls.append(mock.call( - self.context, share['id'], - {'task_state': constants.TASK_STATE_MIGRATION_SUCCESS})) - share_instance_update_calls.append(mock.call( - self.context, instance['id'], - {'status': constants.STATUS_AVAILABLE, - 'host': host['host']})) - elif not return_value[0]: + if not success: share_update_calls.append(mock.call( self.context, share['id'], {'task_state': constants.TASK_STATE_MIGRATION_IN_PROGRESS})) self.share_manager.db.share_update.assert_has_calls(share_update_calls) - self.share_manager.db.share_instance_update.assert_has_calls( - share_instance_update_calls) - rpcapi.ShareAPI.migration_get_driver_info.assert_called_once_with( - self.context, instance) - self.share_manager.driver.migration_start.assert_called_once_with( - self.context, instance, server, host, driver_migration_info, - notify) + self.share_manager.db.share_instance_update.assert_called_once_with( + self.context, instance['id'], + {'status': constants.STATUS_MIGRATING}) + self.share_manager._migration_start_driver.assert_called_once_with( + self.context, share, instance, host, True, 'fake_az_id') + if not success: + (self.share_manager._migration_start_generic. + assert_called_once_with( + self.context, share, instance, host, True, 'fake_az_id')) + self.share_manager.db.service_get_by_args.assert_called_once_with( + self.context, 'fake2@backend', 'manila-share') def test_migration_start_exception(self): - server = 'fake_share_server' instance = db_utils.create_share_instance( share_id='fake_id', status=constants.STATUS_AVAILABLE, - share_server_id='fake_server_id') + share_server_id='fake_server_id', + host='fake@backend#pool') share = db_utils.create_share(id='fake_id', instances=[instance]) - host = 'fake_host' - driver_migration_info = 'driver_fake_info' + host = 'fake2@backend#pool' + fake_service = {'availability_zone_id': 'fake_az_id'} # mocks self.mock_object(self.share_manager.db, 'share_get', mock.Mock(return_value=share)) self.mock_object(self.share_manager.db, 'share_instance_get', mock.Mock(return_value=instance)) - self.mock_object(self.share_manager.db, 'share_server_get', - mock.Mock(return_value=server)) self.mock_object(self.share_manager.db, 'share_update') self.mock_object(self.share_manager.db, 'share_instance_update') - self.mock_object(rpcapi.ShareAPI, 'migration_get_driver_info', - mock.Mock(return_value=driver_migration_info)) - - self.mock_object(self.share_manager.driver, 'migration_start', - mock.Mock(side_effect=Exception('fake'))) + self.mock_object(self.share_manager, '_migration_start_driver', + mock.Mock(side_effect=Exception('fake_exc_1'))) self.mock_object(self.share_manager, '_migration_start_generic', - mock.Mock(side_effect=Exception('fake'))) - self.mock_object(manager.LOG, 'exception') + mock.Mock(side_effect=Exception('fake_exc_2'))) + self.mock_object(self.share_manager.db, 'service_get_by_args', + mock.Mock(return_value=fake_service)) # run - self.assertRaises(exception.ShareMigrationFailed, - self.share_manager.migration_start, - self.context, 'fake_id', host, False, False) + self.assertRaises( + exception.ShareMigrationFailed, + self.share_manager.migration_start, + self.context, 'fake_id', host, False, True) # asserts self.share_manager.db.share_get.assert_called_once_with( self.context, share['id']) self.share_manager.db.share_instance_get.assert_called_once_with( self.context, instance['id'], with_share_data=True) - self.share_manager.db.share_server_get.assert_called_once_with( - utils.IsAMatcher(context.RequestContext), - instance['share_server_id']) share_update_calls = [ - mock.call( - self.context, share['id'], - {'task_state': constants.TASK_STATE_MIGRATION_IN_PROGRESS}), - mock.call( - self.context, share['id'], - {'task_state': ( - constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS)}), mock.call( self.context, share['id'], {'task_state': constants.TASK_STATE_MIGRATION_IN_PROGRESS}), @@ -3781,24 +3742,23 @@ class ShareManagerTestCase(test.TestCase): self.context, share['id'], {'task_state': constants.TASK_STATE_MIGRATION_ERROR}) ] + share_instance_update_calls = [ - mock.call(self.context, instance['id'], - {'status': constants.STATUS_MIGRATING}), - mock.call(self.context, instance['id'], - {'status': constants.STATUS_AVAILABLE}) + mock.call( + self.context, instance['id'], + {'status': constants.STATUS_MIGRATING}), + mock.call( + self.context, instance['id'], + {'status': constants.STATUS_AVAILABLE}) ] - (self.share_manager._migration_start_generic. - assert_called_once_with(self.context, share, instance, host, - False)) - self.assertTrue(manager.LOG.exception.called) self.share_manager.db.share_update.assert_has_calls(share_update_calls) self.share_manager.db.share_instance_update.assert_has_calls( share_instance_update_calls) - rpcapi.ShareAPI.migration_get_driver_info.assert_called_once_with( - self.context, instance) - self.share_manager.driver.migration_start.assert_called_once_with( - self.context, instance, server, host, driver_migration_info, False) + self.share_manager._migration_start_driver.assert_called_once_with( + self.context, share, instance, host, True, 'fake_az_id') + self.share_manager.db.service_get_by_args.assert_called_once_with( + self.context, 'fake2@backend', 'manila-share') @ddt.data(None, Exception('fake')) def test__migration_start_generic(self, exc): @@ -3844,7 +3804,7 @@ class ShareManagerTestCase(test.TestCase): self.assertRaises( exception.ShareMigrationFailed, self.share_manager._migration_start_generic, - self.context, share, instance, 'fake_host', False) + self.context, share, instance, 'fake_host', False, 'fake_az_id') # asserts self.share_manager.db.share_server_get.assert_called_once_with( @@ -3855,7 +3815,7 @@ class ShareManagerTestCase(test.TestCase): assert_called_once_with(instance, server, True, self.share_manager.driver) migration_api.ShareMigrationHelper.create_instance_and_wait.\ - assert_called_once_with(share, instance, 'fake_host') + assert_called_once_with(share, instance, 'fake_host', 'fake_az_id') migration_api.ShareMigrationHelper.\ cleanup_access_rules.assert_called_once_with( instance, server, self.share_manager.driver) @@ -3875,10 +3835,279 @@ class ShareManagerTestCase(test.TestCase): migration_api.ShareMigrationHelper.\ cleanup_new_instance.assert_called_once_with(new_instance) - @ddt.data('fake_model_update', Exception('fake')) - def test_migration_complete_driver(self, exc): - server = 'fake_server' - model_update = 'fake_model_update' + @ddt.data({'share_network_id': 'fake_share_network_id', 'exc': None}, + {'share_network_id': None, 'exc': Exception('fake')}) + @ddt.unpack + def test__migration_start_driver(self, exc, share_network_id): + fake_dest_host = 'fake_host' + src_server = db_utils.create_share_server() + if share_network_id: + dest_server = db_utils.create_share_server() + else: + dest_server = None + share = db_utils.create_share(share_network_id=share_network_id) + migrating_instance = db_utils.create_share_instance( + share_id='fake_id', + share_network_id=share_network_id) + src_instance = db_utils.create_share_instance( + share_id='fake_id', + share_server_id='fake_src_server_id', + share_network_id=share_network_id) + compatibility = {'compatible': True, 'writable': False} + + # mocks + self.mock_object(time, 'sleep') + self.mock_object(self.share_manager.db, 'share_instance_get', + mock.Mock(return_value=migrating_instance)) + self.mock_object(self.share_manager.db, 'share_server_get', + mock.Mock(return_value=src_server)) + self.mock_object(self.share_manager.driver, + 'migration_check_compatibility', + mock.Mock(return_value=compatibility)) + self.mock_object( + api.API, 'create_share_instance_and_get_request_spec', + mock.Mock(return_value=({}, migrating_instance))) + self.mock_object(self.share_manager.db, 'share_instance_update') + self.mock_object(self.share_manager.db, 'share_update') + self.mock_object(rpcapi.ShareAPI, 'provide_share_server', + mock.Mock(return_value='fake_dest_share_server_id')) + self.mock_object(rpcapi.ShareAPI, 'create_share_server') + self.mock_object( + migration_api.ShareMigrationHelper, 'wait_for_share_server', + mock.Mock(return_value=dest_server)) + self.mock_object( + migration_api.ShareMigrationHelper, 'change_to_read_only') + self.mock_object(self.share_manager.driver, 'migration_start') + self.mock_object( + self.share_manager.db, 'share_export_locations_update') + self.mock_object(self.share_manager, '_migration_driver_continue', + mock.Mock(side_effect=exc)) + self.mock_object(self.share_manager, '_migration_delete_instance') + + # run + if exc: + self.assertRaises( + exception.ShareMigrationFailed, + self.share_manager._migration_start_driver, + self.context, share, src_instance, fake_dest_host, True, + 'fake_az_id') + else: + result = self.share_manager._migration_start_driver( + self.context, share, src_instance, fake_dest_host, True, + 'fake_az_id') + + # asserts + if not exc: + self.assertTrue(result) + self.share_manager.db.share_instance_get.assert_called_once_with( + self.context, migrating_instance['id'], with_share_data=True) + self.share_manager.db.share_server_get.assert_called_once_with( + self.context, 'fake_src_server_id') + (api.API.create_share_instance_and_get_request_spec. + assert_called_once_with(self.context, share, 'fake_az_id', None, + 'fake_host', share_network_id)) + (self.share_manager.driver.migration_check_compatibility. + assert_called_once_with(self.context, src_instance, + migrating_instance, src_server, dest_server)) + if share_network_id: + (rpcapi.ShareAPI.provide_share_server. + assert_called_once_with( + self.context, migrating_instance, share_network_id)) + rpcapi.ShareAPI.create_share_server.assert_called_once_with( + self.context, migrating_instance, 'fake_dest_share_server_id') + (migration_api.ShareMigrationHelper.wait_for_share_server. + assert_called_once_with('fake_dest_share_server_id')) + self.share_manager.db.share_update.assert_called_once_with( + self.context, share['id'], + {'task_state': constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS}) + self.share_manager.driver.migration_start.assert_called_once_with( + self.context, src_instance, migrating_instance, src_server, + dest_server) + if exc: + (self.share_manager._migration_delete_instance. + assert_called_once_with(self.context, migrating_instance['id'])) + self.share_manager.db.share_instance_update.assert_called_once_with( + self.context, migrating_instance['id'], + {'status': constants.STATUS_MIGRATING_TO}) + self.share_manager._migration_driver_continue.assert_called_once_with( + self.context, share, src_instance, migrating_instance, + src_server, dest_server, True) + self.assertTrue(time.sleep.called) + + def test__migration_start_driver_not_compatible(self): + + share = db_utils.create_share() + src_instance = db_utils.create_share_instance( + share_id='fake_id', + share_server_id='src_server_id', + share_network_id='fake_share_network_id') + fake_dest_host = 'fake_host' + src_server = db_utils.create_share_server() + dest_server = db_utils.create_share_server() + migrating_instance = db_utils.create_share_instance( + share_id='fake_id', + share_network_id='fake_share_network_id') + + # mocks + self.mock_object(self.share_manager.db, 'share_server_get', + mock.Mock(return_value=src_server)) + self.mock_object(self.share_manager.db, 'share_instance_get', + mock.Mock(return_value=migrating_instance)) + self.mock_object( + api.API, 'create_share_instance_and_get_request_spec', + mock.Mock(return_value=({}, migrating_instance))) + self.mock_object(rpcapi.ShareAPI, 'provide_share_server', + mock.Mock(return_value='fake_dest_share_server_id')) + self.mock_object(rpcapi.ShareAPI, 'create_share_server') + self.mock_object( + migration_api.ShareMigrationHelper, 'wait_for_share_server', + mock.Mock(return_value=dest_server)) + self.mock_object(self.share_manager, '_migration_delete_instance') + + # run + self.assertRaises( + exception.ShareMigrationFailed, + self.share_manager._migration_start_driver, + self.context, share, src_instance, fake_dest_host, True, + 'fake_az_id') + + # asserts + self.share_manager.db.share_server_get.assert_called_once_with( + utils.IsAMatcher(context.RequestContext), 'src_server_id') + self.share_manager.db.share_instance_get.assert_called_once_with( + self.context, migrating_instance['id'], with_share_data=True) + (rpcapi.ShareAPI.provide_share_server. + assert_called_once_with( + self.context, migrating_instance, 'fake_share_network_id')) + rpcapi.ShareAPI.create_share_server.assert_called_once_with( + self.context, migrating_instance, 'fake_dest_share_server_id') + (migration_api.ShareMigrationHelper.wait_for_share_server. + assert_called_once_with('fake_dest_share_server_id')) + (api.API.create_share_instance_and_get_request_spec. + assert_called_once_with(self.context, share, 'fake_az_id', None, + 'fake_host', 'fake_share_network_id')) + self.share_manager._migration_delete_instance.assert_called_once_with( + self.context, migrating_instance['id']) + + @ddt.data({'finished': True, 'notify': True, 'cancelled': False}, + {'finished': True, 'notify': False, 'cancelled': False}, + {'finished': False, 'notify': True, 'cancelled': False}, + {'finished': False, 'notify': False, 'cancelled': True}) + @ddt.unpack + def test__migration_driver_continue(self, finished, notify, cancelled): + + share = db_utils.create_share( + task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS) + if cancelled: + aborted_share = db_utils.create_share( + task_state=constants.TASK_STATE_MIGRATION_CANCELLED) + else: + aborted_share = db_utils.create_share( + task_state=constants.TASK_STATE_MIGRATION_ERROR) + + self.mock_object(self.share_manager.driver, 'migration_continue', + mock.Mock(side_effect=[False, finished])) + if not finished: + self.mock_object(self.share_manager.db, 'share_get', mock.Mock( + side_effect=[share, share, aborted_share])) + else: + self.mock_object(self.share_manager.db, 'share_get', mock.Mock( + return_value=share)) + self.mock_object(self.share_manager.db, 'share_update') + self.mock_object(self.share_manager, '_migration_complete_driver') + self.mock_object(time, 'sleep') + + if not finished and not cancelled: + self.assertRaises( + exception.ShareMigrationFailed, + self.share_manager._migration_driver_continue, + self.context, share, 'src_ins', 'dest_ins', + 'src_server', 'dest_server', notify) + else: + self.share_manager._migration_driver_continue( + self.context, share, 'src_ins', 'dest_ins', + 'src_server', 'dest_server', notify) + + self.share_manager.db.share_get.assert_called_with( + self.context, share['id']) + self.share_manager.driver.migration_continue.assert_called_with( + self.context, 'src_ins', 'dest_ins', 'src_server', 'dest_server') + if finished: + self.share_manager.db.share_update.assert_called_once_with( + self.context, share['id'], + {'task_state': + constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE}) + if notify: + (self.share_manager._migration_complete_driver. + assert_called_once_with( + self.context, share, 'src_ins', 'dest_ins')) + + def test_migration_driver_recovery(self): + + share = db_utils.create_share() + src_server = db_utils.create_share_server() + dest_server = db_utils.create_share_server() + src_instance = db_utils.create_share_instance( + share_id=share['id'], share_server_id=src_server['id']) + dest_instance = db_utils.create_share_instance( + share_id=share['id'], + host='fake_host', + share_server_id=dest_server['id']) + + self.mock_object(self.share_manager.db, 'share_get', + mock.Mock(return_value=share)) + self.mock_object(api.API, 'get_migrating_instances', + mock.Mock(return_value=( + src_instance['id'], dest_instance['id']))) + self.mock_object(self.share_manager.db, 'share_instance_get', + mock.Mock(side_effect=[src_instance, dest_instance])) + self.mock_object(self.share_manager.db, 'share_server_get', + mock.Mock(side_effect=[src_server, dest_server])) + self.mock_object(self.share_manager, '_migration_driver_continue', + mock.Mock(side_effect=Exception('fake'))) + self.mock_object(self.share_manager.db, 'share_instance_update') + self.mock_object(self.share_manager.db, 'share_update') + self.mock_object(self.share_manager, '_migration_delete_instance') + + self.assertRaises( + exception.ShareMigrationFailed, + self.share_manager.migration_driver_recovery, + self.context, share['id']) + + self.share_manager.db.share_get.assert_called_once_with( + self.context, share['id']) + api.API.get_migrating_instances.assert_called_once_with(share) + self.share_manager.db.share_instance_get.assert_has_calls([ + mock.call(self.context, src_instance['id'], with_share_data=True), + mock.call(self.context, dest_instance['id'], with_share_data=True), + ]) + self.share_manager.db.share_server_get.assert_has_calls([ + mock.call(self.context, src_server['id']), + mock.call(self.context, dest_server['id']), + ]) + self.share_manager._migration_driver_continue.assert_called_once_with( + self.context, share, src_instance, dest_instance, + src_server, dest_server) + self.share_manager.db.share_instance_update.assert_called_once_with( + self.context, src_instance['id'], + {'status': constants.STATUS_AVAILABLE}) + self.share_manager.db.share_update.assert_called_once_with( + self.context, share['id'], + {'task_state': constants.TASK_STATE_MIGRATION_ERROR}) + self.share_manager._migration_delete_instance.assert_called_once_with( + self.context, dest_instance['id']) + + @ddt.data({'task_state': constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE, + 'exc': None}, + {'task_state': constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE, + 'exc': Exception('fake')}, + {'task_state': constants.TASK_STATE_DATA_COPYING_COMPLETED, + 'exc': None}, + {'task_state': constants.TASK_STATE_DATA_COPYING_COMPLETED, + 'exc': Exception('fake')}) + @ddt.unpack + def test_migration_complete(self, task_state, exc): + instance = db_utils.create_share_instance( share_id='fake_id', status=constants.STATUS_AVAILABLE, @@ -3886,100 +4115,66 @@ class ShareManagerTestCase(test.TestCase): share = db_utils.create_share( id='fake_id', instances=[instance], - task_state=constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE) + task_state=task_state) # mocks self.mock_object(self.share_manager.db, 'share_get', mock.Mock(return_value=share)) self.mock_object(self.share_manager.db, 'share_instance_get', - mock.Mock(return_value=instance)) - self.mock_object(self.share_manager.db, 'share_server_get', - mock.Mock(return_value=server)) - self.mock_object(self.share_manager.db, 'share_update') - if isinstance(exc, Exception): - self.mock_object(self.share_manager.driver, 'migration_complete', - mock.Mock(side_effect=exc)) - else: - self.mock_object(self.share_manager.driver, 'migration_complete', - mock.Mock(return_value=exc)) - self.mock_object(self.share_manager.db, 'share_instance_update') - self.mock_object(rpcapi.ShareAPI, 'migration_get_driver_info', - mock.Mock(return_value='fake_info')) - self.mock_object(manager.LOG, 'exception') + mock.Mock(side_effect=[instance, instance])) - # run - if isinstance(exc, Exception): + if task_state == constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE: + self.mock_object( + self.share_manager, '_migration_complete_driver', + mock.Mock(side_effect=exc)) + else: + self.mock_object( + self.share_manager, '_migration_complete_generic', + mock.Mock(side_effect=exc)) + + if exc: + self.mock_object(manager.LOG, 'exception') + self.mock_object(self.share_manager.db, 'share_update') + self.mock_object(self.share_manager.db, 'share_instance_update') self.assertRaises( exception.ShareMigrationFailed, self.share_manager.migration_complete, - self.context, 'fake_id', 'fake_ins_id', 'new_fake_ins_id') + self.context, 'fake_ins_id', 'new_fake_ins_id') else: self.share_manager.migration_complete( - self.context, 'fake_id', 'fake_ins_id', 'new_fake_ins_id') + self.context, 'fake_ins_id', 'new_fake_ins_id') # asserts self.share_manager.db.share_get.assert_called_once_with( self.context, share['id']) - self.share_manager.db.share_instance_get.assert_called_once_with( - self.context, instance['id'], with_share_data=True) - self.share_manager.db.share_server_get.assert_called_once_with( - utils.IsAMatcher(context.RequestContext), 'fake_server_id') - self.share_manager.driver.migration_complete.assert_called_once_with( - self.context, instance, server, 'fake_info') - rpcapi.ShareAPI.migration_get_driver_info.assert_called_once_with( - self.context, instance) - if isinstance(exc, Exception): + self.share_manager.db.share_instance_get.assert_has_calls([ + mock.call(self.context, 'fake_ins_id', with_share_data=True), + mock.call(self.context, 'new_fake_ins_id', with_share_data=True)]) + + if task_state == constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE: + (self.share_manager._migration_complete_driver. + assert_called_once_with(self.context, share, instance, instance)) + else: + (self.share_manager._migration_complete_generic. + assert_called_once_with( + self.context, share, 'fake_ins_id', 'new_fake_ins_id')) + + if exc: + self.assertTrue(manager.LOG.exception.called) self.share_manager.db.share_update.assert_called_once_with( self.context, share['id'], {'task_state': constants.TASK_STATE_MIGRATION_ERROR}) - self.assertTrue(manager.LOG.exception.called) - else: - self.share_manager.db.share_update.assert_called_once_with( - self.context, share['id'], - {'task_state': constants.TASK_STATE_MIGRATION_SUCCESS}) - self.share_manager.db.share_instance_update.\ - assert_called_once_with(self.context, instance['id'], - model_update) - - @ddt.data(None, Exception('fake')) - def test_migration_complete_generic(self, exc): - share = db_utils.create_share( - id='fake_id', - task_state=constants.TASK_STATE_DATA_COPYING_COMPLETED) - - # mocks - self.mock_object(self.share_manager.db, 'share_get', - mock.Mock(return_value=share)) - self.mock_object(self.share_manager, '_migration_complete', - mock.Mock(side_effect=exc)) - self.mock_object(self.share_manager.db, 'share_update') - self.mock_object(self.share_manager.db, 'share_instance_update') - self.mock_object(manager.LOG, 'exception') - - # run - if exc: - self.assertRaises( - exception.ShareMigrationFailed, - self.share_manager.migration_complete, - self.context, 'fake_id', 'fake_ins_id', 'new_fake_ins_id') - else: - self.share_manager.migration_complete( - self.context, 'fake_id', 'fake_ins_id', 'new_fake_ins_id') - - # asserts - self.share_manager.db.share_get.assert_called_once_with( - self.context, share['id']) - self.share_manager._migration_complete.assert_called_once_with( - self.context, share, 'fake_ins_id', 'new_fake_ins_id') - if exc: - self.share_manager.db.share_update.assert_called_once_with( - self.context, share['id'], - {'task_state': constants.TASK_STATE_MIGRATION_ERROR}) - self.share_manager.db.share_instance_update.\ - assert_called_once_with( - self.context, 'fake_ins_id', - {'status': constants.STATUS_AVAILABLE}) - self.assertTrue(manager.LOG.exception.called) + share_instance_update_calls = [ + mock.call(self.context, 'fake_ins_id', + {'status': constants.STATUS_AVAILABLE}) + ] + if task_state == constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE: + share_instance_update_calls.append( + mock.call(self.context, 'new_fake_ins_id', + {'status': constants.STATUS_ERROR}) + ) + self.share_manager.db.share_instance_update.assert_has_calls( + share_instance_update_calls) @ddt.data(constants.TASK_STATE_DATA_COPYING_ERROR, constants.TASK_STATE_DATA_COPYING_CANCELLED, @@ -4013,13 +4208,13 @@ class ShareManagerTestCase(test.TestCase): # run if status == constants.TASK_STATE_DATA_COPYING_CANCELLED: - self.share_manager._migration_complete( + self.share_manager._migration_complete_generic( self.context, share, instance['id'], new_instance['id']) else: self.assertRaises( exception.ShareMigrationFailed, - self.share_manager._migration_complete, self.context, share, - instance['id'], new_instance['id']) + self.share_manager._migration_complete_generic, self.context, + share, instance['id'], new_instance['id']) # asserts self.share_manager.db.share_instance_get.assert_has_calls([ @@ -4047,7 +4242,67 @@ class ShareManagerTestCase(test.TestCase): assert_called_once_with(new_instance) self.assertTrue(manager.LOG.exception.called) - def test__migration_complete(self): + def test__migration_complete_driver(self): + fake_src_host = 'src_host' + fake_dest_host = 'dest_host' + fake_rules = 'fake_rules' + fake_export_locations = 'fake_export_locations' + src_server = db_utils.create_share_server() + dest_server = db_utils.create_share_server() + share = db_utils.create_share() + dest_instance = db_utils.create_share_instance( + share_id=share['id'], + share_server_id='fake_dest_server_id', + host=fake_dest_host) + src_instance = db_utils.create_share_instance( + share_id=share['id'], + share_server_id='fake_src_server_id', + host=fake_src_host) + + # mocks + self.mock_object(self.share_manager.db, 'share_server_get', mock.Mock( + side_effect=[src_server, dest_server])) + self.mock_object( + self.share_manager.db, 'share_access_get_all_for_instance', + mock.Mock(return_value=fake_rules)) + self.mock_object( + self.share_manager.db, 'share_export_locations_update') + self.mock_object(self.share_manager.driver, 'migration_complete', + mock.Mock(return_value=fake_export_locations)) + self.mock_object( + self.share_manager.access_helper, '_check_needs_refresh', + mock.Mock(return_value=True)) + self.mock_object(self.share_manager.db, 'share_instance_update') + self.mock_object(self.share_manager.db, 'share_update') + self.mock_object(self.share_manager, '_migration_delete_instance') + self.mock_object(migration_api.ShareMigrationHelper, + 'apply_new_access_rules') + + # run + self.share_manager._migration_complete_driver( + self.context, share, src_instance, dest_instance) + + # asserts + self.share_manager.db.share_server_get.assert_has_calls([ + mock.call(self.context, 'fake_src_server_id'), + mock.call(self.context, 'fake_dest_server_id')]) + (self.share_manager.db.share_export_locations_update. + assert_called_once_with(self.context, dest_instance['id'], + fake_export_locations)) + self.share_manager.driver.migration_complete.assert_called_once_with( + self.context, src_instance, dest_instance, src_server, dest_server) + (migration_api.ShareMigrationHelper.apply_new_access_rules. + assert_called_once_with(dest_instance)) + self.share_manager._migration_delete_instance.assert_called_once_with( + self.context, src_instance['id']) + self.share_manager.db.share_instance_update.assert_called_once_with( + self.context, dest_instance['id'], + {'status': constants.STATUS_AVAILABLE}) + self.share_manager.db.share_update.assert_called_once_with( + self.context, dest_instance['share_id'], + {'task_state': constants.TASK_STATE_MIGRATION_SUCCESS}) + + def test__migration_complete_generic(self): instance = db_utils.create_share_instance( share_id='fake_id', @@ -4071,7 +4326,7 @@ class ShareManagerTestCase(test.TestCase): 'apply_new_access_rules') # run - self.share_manager._migration_complete( + self.share_manager._migration_complete_generic( self.context, share, instance['id'], new_instance['id']) # asserts @@ -4101,81 +4356,246 @@ class ShareManagerTestCase(test.TestCase): migration_api.ShareMigrationHelper.delete_instance_and_wait.\ assert_called_once_with(instance) - def test_migration_cancel(self): + @ddt.data(constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS, + constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE, + constants.TASK_STATE_DATA_COPYING_COMPLETED) + def test_migration_cancel(self, task_state): - server = db_utils.create_share_server() - share = db_utils.create_share( - task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS, - share_server_id=server['id']) + dest_host = 'fake_host' + server_1 = db_utils.create_share_server() + server_2 = db_utils.create_share_server() + share = db_utils.create_share(task_state=task_state) + + instance_1 = db_utils.create_share_instance( + share_id=share['id'], share_server_id=server_1['id']) + instance_2 = db_utils.create_share_instance( + share_id=share['id'], share_server_id=server_2['id'], + host=dest_host) self.mock_object(db, 'share_get', mock.Mock(return_value=share)) - + self.mock_object(db, 'share_instance_get', + mock.Mock(side_effect=[instance_1, instance_2])) + self.mock_object(db, 'share_update') + self.mock_object(db, 'share_instance_update') + self.mock_object(self.share_manager, '_migration_delete_instance') self.mock_object(db, 'share_server_get', - mock.Mock(return_value=server)) - - self.mock_object(rpcapi.ShareAPI, 'migration_get_driver_info', - mock.Mock(return_value='migration_info')) - + mock.Mock(side_effect=[server_1, server_2])) self.mock_object(self.share_manager.driver, 'migration_cancel') + self.mock_object(migration_api.ShareMigrationHelper, + 'cleanup_new_instance') + self.mock_object(migration_api.ShareMigrationHelper, + 'cleanup_access_rules') - self.share_manager.migration_cancel(self.context, share) + self.share_manager.migration_cancel( + self.context, instance_1['id'], instance_2['id']) - rpcapi.ShareAPI.migration_get_driver_info.assert_called_once_with( - self.context, share.instance) + share_instance_update_calls = [] - self.share_manager.driver.migration_cancel.assert_called_once_with( - self.context, share.instance, server, 'migration_info') + if task_state == constants.TASK_STATE_DATA_COPYING_COMPLETED: + share_instance_update_calls.append(mock.call( + self.context, instance_2['id'], + {'status': constants.STATUS_INACTIVE})) + (migration_api.ShareMigrationHelper.cleanup_new_instance. + assert_called_once_with(instance_2)) + (migration_api.ShareMigrationHelper.cleanup_access_rules. + assert_called_once_with(instance_1, server_1, + self.share_manager.driver)) + + else: + self.share_manager.driver.migration_cancel.assert_called_once_with( + self.context, instance_1, instance_2, server_1, server_2) + + (self.share_manager._migration_delete_instance. + assert_called_once_with(self.context, instance_2['id'])) + + self.share_manager.db.share_get.assert_called_once_with( + self.context, share['id']) + self.share_manager.db.share_server_get.assert_has_calls([ + mock.call(self.context, server_1['id']), + mock.call(self.context, server_2['id']), + ]) + self.share_manager.db.share_instance_get.assert_has_calls([ + mock.call(self.context, instance_1['id'], with_share_data=True), + mock.call(self.context, instance_2['id'], with_share_data=True) + ]) + self.share_manager.db.share_update.assert_called_once_with( + self.context, share['id'], + {'task_state': constants.TASK_STATE_MIGRATION_CANCELLED}) + + share_instance_update_calls.append(mock.call( + self.context, instance_1['id'], + {'status': constants.STATUS_AVAILABLE})) + + self.share_manager.db.share_instance_update.assert_has_calls( + share_instance_update_calls) + + def test__migration_delete_instance(self): + + instance = db_utils.create_share_instance(share_id='fake_id') + mapping_list = [{'id': 'mapping_id_1'}, {'id': 'mapping_id_2'}] + rules = [{'id': 'rule_id_1'}, {'id': 'rule_id_2'}] + + # mocks + self.mock_object(self.share_manager.db, 'share_instance_get', + mock.Mock(return_value=instance)) + self.mock_object(self.share_manager.db, 'share_instance_update') + self.mock_object( + self.share_manager.db, 'share_access_get_all_for_instance', + mock.Mock(return_value=rules)) + self.mock_object( + self.share_manager.db, 'share_instance_access_get', mock.Mock( + side_effect=[mapping_list[0], mapping_list[1]])) + self.mock_object(self.share_manager.db, 'share_instance_delete') + self.mock_object(self.share_manager.db, 'share_instance_access_delete') + self.mock_object(self.share_manager, '_check_delete_share_server') + + # run + self.share_manager._migration_delete_instance( + self.context, instance['id']) + + # asserts + self.share_manager.db.share_instance_get.assert_called_once_with( + self.context, instance['id'], with_share_data=True) + self.share_manager.db.share_instance_update.assert_called_once_with( + self.context, instance['id'], + {'status': constants.STATUS_INACTIVE}) + (self.share_manager.db.share_access_get_all_for_instance. + assert_called_once_with(self.context, instance['id'])) + self.share_manager.db.share_instance_access_get.assert_has_calls([ + mock.call(self.context, 'rule_id_1', instance['id']), + mock.call(self.context, 'rule_id_2', instance['id']) + ]) + self.share_manager.db.share_instance_access_delete.assert_has_calls([ + mock.call(self.context, 'mapping_id_1'), + mock.call(self.context, 'mapping_id_2') + ]) + self.share_manager.db.share_instance_delete.assert_called_once_with( + self.context, instance['id']) + self.share_manager._check_delete_share_server.assert_called_once_with( + self.context, instance) def test_migration_cancel_invalid(self): share = db_utils.create_share() + self.mock_object(db, 'share_instance_get', + mock.Mock(return_value=share.instance)) self.mock_object(db, 'share_get', mock.Mock(return_value=share)) self.assertRaises( exception.InvalidShare, self.share_manager.migration_cancel, - self.context, share) + self.context, 'ins1_id', 'ins2_id') def test_migration_get_progress(self): - server = db_utils.create_share_server() + expected = 'fake_progress' + dest_host = 'fake_host' + server_1 = db_utils.create_share_server() + server_2 = db_utils.create_share_server() share = db_utils.create_share( task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS, - share_server_id=server['id']) + share_server_id=server_1['id']) - expected = 'fake_progress' + instance_1 = db_utils.create_share_instance( + share_id=share['id'], share_server_id=server_1['id']) + instance_2 = db_utils.create_share_instance( + share_id=share['id'], share_server_id=server_2['id'], + host=dest_host) self.mock_object(db, 'share_get', mock.Mock(return_value=share)) + self.mock_object(db, 'share_instance_get', + mock.Mock(side_effect=[instance_1, instance_2])) self.mock_object(db, 'share_server_get', - mock.Mock(return_value=server)) - - self.mock_object(rpcapi.ShareAPI, 'migration_get_driver_info', - mock.Mock(return_value='migration_info')) + mock.Mock(side_effect=[server_1, server_2])) self.mock_object(self.share_manager.driver, 'migration_get_progress', mock.Mock(return_value=expected)) - result = self.share_manager.migration_get_progress(self.context, share) + result = self.share_manager.migration_get_progress( + self.context, instance_1['id'], instance_2['id']) self.assertEqual(expected, result) - rpcapi.ShareAPI.migration_get_driver_info.assert_called_once_with( - self.context, share.instance) - - self.share_manager.driver.migration_get_progress.\ + (self.share_manager.driver.migration_get_progress. assert_called_once_with( - self.context, share.instance, server, 'migration_info') + self.context, instance_1, instance_2, server_1, server_2)) + + self.share_manager.db.share_get.assert_called_once_with( + self.context, share['id']) + self.share_manager.db.share_server_get.assert_has_calls([ + mock.call(self.context, server_1['id']), + mock.call(self.context, server_2['id']), + ]) + self.share_manager.db.share_instance_get.assert_has_calls([ + mock.call(self.context, instance_1['id'], with_share_data=True), + mock.call(self.context, instance_2['id'], with_share_data=True) + ]) def test_migration_get_progress_invalid(self): share = db_utils.create_share() + self.mock_object(db, 'share_instance_get', + mock.Mock(return_value=share.instance)) self.mock_object(db, 'share_get', mock.Mock(return_value=share)) self.assertRaises( exception.InvalidShare, self.share_manager.migration_get_progress, - self.context, share) + self.context, 'ins1_id', 'ins2_id') + + def test_provide_share_server(self): + + instance = db_utils.create_share_instance(share_id='fake_id', + consistency_group_id='cg_id') + snapshot = db_utils.create_snapshot(with_share=True) + cg = db_utils.create_consistency_group() + server = db_utils.create_share_server() + + # mocks + self.mock_object(self.share_manager.db, 'share_instance_get', + mock.Mock(return_value=instance)) + self.mock_object(self.share_manager.db, 'share_snapshot_get', + mock.Mock(return_value=snapshot)) + self.mock_object(self.share_manager.db, 'consistency_group_get', + mock.Mock(return_value=cg)) + self.mock_object(self.share_manager, '_provide_share_server_for_share', + mock.Mock(return_value=(server, instance))) + + # run + result = self.share_manager.provide_share_server( + self.context, 'ins_id', 'net_id', 'snap_id') + + # asserts + self.assertEqual(server['id'], result) + self.share_manager.db.share_instance_get.assert_called_once_with( + self.context, 'ins_id', with_share_data=True) + self.share_manager.db.share_snapshot_get.assert_called_once_with( + self.context, 'snap_id') + self.share_manager.db.consistency_group_get.assert_called_once_with( + self.context, 'cg_id') + (self.share_manager._provide_share_server_for_share. + assert_called_once_with(self.context, 'net_id', instance, snapshot, + cg, create_on_backend=False)) + + def test_create_share_server(self): + + server = db_utils.create_share_server() + + # mocks + self.mock_object(self.share_manager.db, 'share_server_get', + mock.Mock(return_value=server)) + self.mock_object(self.share_manager, '_create_share_server_in_backend') + + # run + self.share_manager.create_share_server( + self.context, 'server_id') + + # asserts + self.share_manager.db.share_server_get.assert_called_once_with( + self.context, 'server_id') + (self.share_manager._create_share_server_in_backend. + assert_called_once_with(self.context, server)) def test_manage_snapshot_invalid_driver_mode(self): self.mock_object(self.share_manager, 'driver') diff --git a/manila/tests/share/test_migration.py b/manila/tests/share/test_migration.py index de17dfb73c..cf617b7b39 100644 --- a/manila/tests/share/test_migration.py +++ b/manila/tests/share/test_migration.py @@ -113,7 +113,7 @@ class ShareMigrationHelperTestCase(test.TestCase): def test_create_instance_and_wait(self): - host = {'host': 'fake_host'} + host = 'fake_host' share_instance_creating = db_utils.create_share_instance( share_id=self.share['id'], status=constants.STATUS_CREATING, @@ -131,13 +131,13 @@ class ShareMigrationHelperTestCase(test.TestCase): self.mock_object(time, 'sleep') # run - self.helper.create_instance_and_wait(self.share, - share_instance_creating, host) + self.helper.create_instance_and_wait( + self.share, share_instance_creating, host, 'fake_az_id') # asserts share_api.API.create_instance.assert_called_once_with( self.context, self.share, self.share_instance['share_network_id'], - 'fake_host') + 'fake_host', 'fake_az_id') db.share_instance_get.assert_has_calls([ mock.call(self.context, share_instance_creating['id'], @@ -149,7 +149,7 @@ class ShareMigrationHelperTestCase(test.TestCase): def test_create_instance_and_wait_status_error(self): - host = {'host': 'fake_host'} + host = 'fake_host' share_instance_error = db_utils.create_share_instance( share_id=self.share['id'], status=constants.STATUS_ERROR, @@ -165,12 +165,12 @@ class ShareMigrationHelperTestCase(test.TestCase): # run self.assertRaises(exception.ShareMigrationFailed, self.helper.create_instance_and_wait, - self.share, self.share_instance, host) + self.share, self.share_instance, host, 'fake_az_id') # asserts share_api.API.create_instance.assert_called_once_with( self.context, self.share, self.share_instance['share_network_id'], - 'fake_host') + 'fake_host', 'fake_az_id') db.share_instance_get.assert_called_once_with( self.context, share_instance_error['id'], with_share_data=True) @@ -180,7 +180,7 @@ class ShareMigrationHelperTestCase(test.TestCase): def test_create_instance_and_wait_timeout(self): - host = {'host': 'fake_host'} + host = 'fake_host' share_instance_creating = db_utils.create_share_instance( share_id=self.share['id'], status=constants.STATUS_CREATING, @@ -204,12 +204,12 @@ class ShareMigrationHelperTestCase(test.TestCase): # run self.assertRaises(exception.ShareMigrationFailed, self.helper.create_instance_and_wait, - self.share, self.share_instance, host) + self.share, self.share_instance, host, 'fake_az_id') # asserts share_api.API.create_instance.assert_called_once_with( self.context, self.share, self.share_instance['share_network_id'], - 'fake_host') + 'fake_host', 'fake_az_id') db.share_instance_get.assert_called_once_with( self.context, share_instance_creating['id'], with_share_data=True) @@ -219,6 +219,33 @@ class ShareMigrationHelperTestCase(test.TestCase): self.helper.cleanup_new_instance.assert_called_once_with( share_instance_creating) + @ddt.data(constants.STATUS_ACTIVE, constants.STATUS_ERROR, + constants.STATUS_CREATING) + def test_wait_for_share_server(self, status): + + server = db_utils.create_share_server(status=status) + + # mocks + self.mock_object(db, 'share_server_get', + mock.Mock(return_value=server)) + + # run + if status == constants.STATUS_ACTIVE: + result = self.helper.wait_for_share_server('fake_server_id') + self.assertEqual(server, result) + elif status == constants.STATUS_ERROR: + self.assertRaises( + exception.ShareServerNotCreated, + self.helper.wait_for_share_server, 'fake_server_id') + else: + self.mock_object(time, 'sleep') + self.assertRaises( + exception.ShareServerNotReady, + self.helper.wait_for_share_server, 'fake_server_id') + + # asserts + db.share_server_get.assert_called_with(self.context, 'fake_server_id') + def test_change_to_read_only_with_ro_support(self): share_instance = db_utils.create_share_instance( diff --git a/manila/tests/share/test_rpcapi.py b/manila/tests/share/test_rpcapi.py index ea0eab665b..c280a10ed2 100644 --- a/manila/tests/share/test_rpcapi.py +++ b/manila/tests/share/test_rpcapi.py @@ -49,7 +49,7 @@ class ShareRpcAPITestCase(test.TestCase): share_server = db_utils.create_share_server() cg = {'id': 'fake_cg_id', 'host': 'fake_host'} cgsnapshot = {'id': 'fake_cg_id'} - host = {'host': 'fake_host', 'capabilities': 1} + host = 'fake_host' self.fake_share = jsonutils.to_primitive(share) # mock out the getattr on the share db model object since jsonutils # doesn't know about those extra attributes to pull in @@ -101,7 +101,7 @@ class ShareRpcAPITestCase(test.TestCase): expected_msg['snapshot_id'] = snapshot['id'] if 'dest_host' in expected_msg: del expected_msg['dest_host'] - expected_msg['host'] = self.fake_host + expected_msg['dest_host'] = self.fake_host if 'share_replica' in expected_msg: share_replica = expected_msg.pop('share_replica', None) expected_msg['share_replica_id'] = share_replica['id'] @@ -110,6 +110,9 @@ class ShareRpcAPITestCase(test.TestCase): snapshot = expected_msg.pop('replicated_snapshot', None) expected_msg['snapshot_id'] = snapshot['id'] expected_msg['share_id'] = snapshot['share_id'] + if 'src_share_instance' in expected_msg: + share_instance = expected_msg.pop('src_share_instance', None) + expected_msg['src_instance_id'] = share_instance['id'] if 'host' in kwargs: host = kwargs['host'] @@ -123,8 +126,10 @@ class ShareRpcAPITestCase(test.TestCase): host = kwargs['share_replica']['host'] elif 'replicated_snapshot' in kwargs: host = kwargs['share']['instance']['host'] - else: + elif 'share' in kwargs: host = kwargs['share']['host'] + else: + host = self.fake_host target['server'] = host target['topic'] = '%s.%s' % (CONF.share_topic, host) @@ -247,46 +252,48 @@ class ShareRpcAPITestCase(test.TestCase): host='fake_host1') def test_migration_start(self): - fake_dest_host = self.Desthost() self._test_share_api('migration_start', rpc_method='cast', version='1.6', share=self.fake_share, - dest_host=fake_dest_host, + dest_host='fake_host', force_host_copy=True, notify=True) + def test_migration_driver_recovery(self): + fake_dest_host = "host@backend" + self._test_share_api('migration_driver_recovery', + rpc_method='cast', + version='1.12', + share=self.fake_share, + host=fake_dest_host) + def test_migration_get_info(self): self._test_share_api('migration_get_info', rpc_method='call', version='1.6', share_instance=self.fake_share) - def test_migration_get_driver_info(self): - self._test_share_api('migration_get_driver_info', - rpc_method='call', - version='1.6', - share_instance=self.fake_share) - def test_migration_complete(self): self._test_share_api('migration_complete', rpc_method='cast', - version='1.10', - share=self.fake_share, - share_instance_id='fake_ins_id', - new_share_instance_id='new_fake_ins_id') + version='1.12', + src_share_instance=self.fake_share['instance'], + dest_instance_id='new_fake_ins_id') def test_migration_cancel(self): self._test_share_api('migration_cancel', - rpc_method='call', - version='1.10', - share=self.fake_share) + rpc_method='cast', + version='1.12', + src_share_instance=self.fake_share['instance'], + dest_instance_id='ins2_id') def test_migration_get_progress(self): self._test_share_api('migration_get_progress', rpc_method='call', - version='1.10', - share=self.fake_share) + version='1.12', + src_share_instance=self.fake_share['instance'], + dest_instance_id='ins2_id') def test_delete_share_replica(self): self._test_share_api('delete_share_replica', @@ -338,6 +345,17 @@ class ShareRpcAPITestCase(test.TestCase): force=False, host='fake_host') - class Desthost(object): - host = 'fake_host' - capabilities = 1 + def test_provide_share_server(self): + self._test_share_api('provide_share_server', + rpc_method='call', + version='1.12', + share_instance=self.fake_share['instance'], + share_network_id='fake_network_id', + snapshot_id='fake_snapshot_id') + + def test_create_share_server(self): + self._test_share_api('create_share_server', + rpc_method='cast', + version='1.12', + share_instance=self.fake_share['instance'], + share_server_id='fake_server_id') diff --git a/manila_tempest_tests/common/constants.py b/manila_tempest_tests/common/constants.py index bef35a53f0..abef181497 100644 --- a/manila_tempest_tests/common/constants.py +++ b/manila_tempest_tests/common/constants.py @@ -34,3 +34,19 @@ REPLICATION_STATE_OUT_OF_SYNC = 'out_of_sync' RULE_STATE_ACTIVE = 'active' RULE_STATE_OUT_OF_SYNC = 'out_of_sync' RULE_STATE_ERROR = 'error' + +TASK_STATE_MIGRATION_STARTING = 'migration_starting' +TASK_STATE_MIGRATION_IN_PROGRESS = 'migration_in_progress' +TASK_STATE_MIGRATION_COMPLETING = 'migration_completing' +TASK_STATE_MIGRATION_SUCCESS = 'migration_success' +TASK_STATE_MIGRATION_ERROR = 'migration_error' +TASK_STATE_MIGRATION_CANCELLED = 'migration_cancelled' +TASK_STATE_MIGRATION_DRIVER_STARTING = 'migration_driver_starting' +TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS = 'migration_driver_in_progress' +TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE = 'migration_driver_phase1_done' +TASK_STATE_DATA_COPYING_STARTING = 'data_copying_starting' +TASK_STATE_DATA_COPYING_IN_PROGRESS = 'data_copying_in_progress' +TASK_STATE_DATA_COPYING_COMPLETING = 'data_copying_completing' +TASK_STATE_DATA_COPYING_COMPLETED = 'data_copying_completed' +TASK_STATE_DATA_COPYING_CANCELLED = 'data_copying_cancelled' +TASK_STATE_DATA_COPYING_ERROR = 'data_copying_error' diff --git a/manila_tempest_tests/services/share/v2/json/shares_client.py b/manila_tempest_tests/services/share/v2/json/shares_client.py index 46c0ce7cff..2887904373 100755 --- a/manila_tempest_tests/services/share/v2/json/shares_client.py +++ b/manila_tempest_tests/services/share/v2/json/shares_client.py @@ -14,6 +14,7 @@ # under the License. import json +import six import time from six.moves.urllib import parse as urlparse @@ -688,8 +689,11 @@ class SharesV2Client(shares_client.SharesClient): ############### - def list_share_types(self, params=None, version=LATEST_MICROVERSION): + def list_share_types(self, params=None, default=False, + version=LATEST_MICROVERSION): uri = 'types' + if default: + uri += '/default' if params is not None: uri += '?%s' % urlparse.urlencode(params) resp, body = self.get(uri, version=version) @@ -1076,22 +1080,25 @@ class SharesV2Client(shares_client.SharesClient): headers=EXPERIMENTAL, extra_headers=True, version=version) - def wait_for_migration_status(self, share_id, dest_host, status, + def wait_for_migration_status(self, share_id, dest_host, status_to_wait, version=LATEST_MICROVERSION): """Waits for a share to migrate to a certain host.""" + statuses = ((status_to_wait,) + if not isinstance(status_to_wait, (tuple, list, set)) + else status_to_wait) share = self.get_share(share_id, version=version) migration_timeout = CONF.share.migration_timeout start = int(time.time()) - while share['task_state'] != status: + while share['task_state'] not in statuses: time.sleep(self.build_interval) share = self.get_share(share_id, version=version) - if share['task_state'] == status: - return share + if share['task_state'] in statuses: + break elif share['task_state'] == 'migration_error': raise share_exceptions.ShareMigrationException( share_id=share['id'], src=share['host'], dest=dest_host) elif int(time.time()) - start >= migration_timeout: - message = ('Share %(share_id)s failed to reach status ' + message = ('Share %(share_id)s failed to reach a status in' '%(status)s when migrating from host %(src)s to ' 'host %(dest)s within the required time ' '%(timeout)s.' % { @@ -1099,9 +1106,10 @@ class SharesV2Client(shares_client.SharesClient): 'dest': dest_host, 'share_id': share['id'], 'timeout': self.build_timeout, - 'status': status, + 'status': six.text_type(statuses), }) raise exceptions.TimeoutException(message) + return share ################ diff --git a/manila_tempest_tests/tests/api/admin/test_migration.py b/manila_tempest_tests/tests/api/admin/test_migration.py index a5ceaa4845..c5fef446b9 100644 --- a/manila_tempest_tests/tests/api/admin/test_migration.py +++ b/manila_tempest_tests/tests/api/admin/test_migration.py @@ -16,6 +16,7 @@ from tempest import config from tempest import test +from manila_tempest_tests.common import constants from manila_tempest_tests.tests.api import base from manila_tempest_tests import utils @@ -25,7 +26,7 @@ CONF = config.CONF class MigrationNFSTest(base.BaseSharesAdminTest): """Tests Share Migration. - Tests migration in multi-backend environment. + Tests share migration in multi-backend environment. """ protocol = "nfs" @@ -37,7 +38,36 @@ class MigrationNFSTest(base.BaseSharesAdminTest): message = "%s tests are disabled" % cls.protocol raise cls.skipException(message) if not CONF.share.run_migration_tests: - raise cls.skipException("Migration tests disabled. Skipping.") + raise cls.skipException("Share migration tests are disabled.") + + @test.attr(type=[base.TAG_POSITIVE, base.TAG_BACKEND]) + @base.skip_if_microversion_lt("2.15") + def test_migration_cancel(self): + + share, dest_pool = self._setup_migration() + + old_exports = self.shares_v2_client.list_share_export_locations( + share['id'], version='2.15') + self.assertNotEmpty(old_exports) + old_exports = [x['path'] for x in old_exports + if x['is_admin_only'] is False] + self.assertNotEmpty(old_exports) + + task_states = (constants.TASK_STATE_DATA_COPYING_COMPLETED, + constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE) + + share = self.migrate_share( + share['id'], dest_pool, version='2.15', notify=False, + wait_for_status=task_states) + + self._validate_migration_successful( + dest_pool, share, task_states, '2.15', notify=False) + + share = self.migration_cancel(share['id'], dest_pool) + + self._validate_migration_successful( + dest_pool, share, constants.TASK_STATE_MIGRATION_CANCELLED, + '2.15', notify=False) @test.attr(type=[base.TAG_POSITIVE, base.TAG_BACKEND]) @base.skip_if_microversion_lt("2.5") @@ -45,12 +75,11 @@ class MigrationNFSTest(base.BaseSharesAdminTest): share, dest_pool = self._setup_migration() - old_exports = share['export_locations'] - share = self.migrate_share(share['id'], dest_pool, version='2.5') - self._validate_migration_successful(dest_pool, share, old_exports, - version='2.5') + self._validate_migration_successful( + dest_pool, share, constants.TASK_STATE_MIGRATION_SUCCESS, + version='2.5') @test.attr(type=[base.TAG_POSITIVE, base.TAG_BACKEND]) @base.skip_if_microversion_lt("2.15") @@ -65,26 +94,29 @@ class MigrationNFSTest(base.BaseSharesAdminTest): if x['is_admin_only'] is False] self.assertNotEmpty(old_exports) + task_states = (constants.TASK_STATE_DATA_COPYING_COMPLETED, + constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE) + share = self.migrate_share( share['id'], dest_pool, version='2.15', notify=False, - wait_for_status='data_copying_completed') + wait_for_status=task_states) - self._validate_migration_successful(dest_pool, share, - old_exports, '2.15', notify=False) + self._validate_migration_successful( + dest_pool, share, task_states, '2.15', notify=False) share = self.migration_complete(share['id'], dest_pool, version='2.15') - self._validate_migration_successful(dest_pool, share, old_exports, - version='2.15') + self._validate_migration_successful( + dest_pool, share, constants.TASK_STATE_MIGRATION_SUCCESS, + version='2.15') def _setup_migration(self): - pools = self.shares_client.list_pools()['pools'] + pools = self.shares_v2_client.list_pools(detail=True)['pools'] if len(pools) < 2: - raise self.skipException("At least two different pool entries " - "are needed to run migration tests. " - "Skipping.") + raise self.skipException("At least two different pool entries are " + "needed to run share migration tests.") share = self.create_share(self.protocol) share = self.shares_client.get_share(share['id']) @@ -101,8 +133,10 @@ class MigrationNFSTest(base.BaseSharesAdminTest): self.shares_v2_client.wait_for_share_status( share['id'], 'active', status_attr='access_rules_status') - dest_pool = next((x for x in pools if x['name'] != share['host']), - None) + default_type = self.shares_v2_client.list_share_types( + default=True)['share_type'] + + dest_pool = utils.choose_matching_backend(share, pools, default_type) self.assertIsNotNone(dest_pool) self.assertIsNotNone(dest_pool.get('name')) @@ -112,7 +146,12 @@ class MigrationNFSTest(base.BaseSharesAdminTest): return share, dest_pool def _validate_migration_successful(self, dest_pool, share, - old_exports, version, notify=True): + status_to_wait, version, notify=True): + + statuses = ((status_to_wait,) + if not isinstance(status_to_wait, (tuple, list, set)) + else status_to_wait) + if utils.is_microversion_lt(version, '2.9'): new_exports = share['export_locations'] self.assertNotEmpty(new_exports) @@ -127,12 +166,7 @@ class MigrationNFSTest(base.BaseSharesAdminTest): # Share migrated if notify: self.assertEqual(dest_pool, share['host']) - for export in old_exports: - self.assertFalse(export in new_exports) - self.assertEqual('migration_success', share['task_state']) # Share not migrated yet else: self.assertNotEqual(dest_pool, share['host']) - for export in old_exports: - self.assertTrue(export in new_exports) - self.assertEqual('data_copying_completed', share['task_state']) + self.assertIn(share['task_state'], statuses) diff --git a/manila_tempest_tests/tests/api/admin/test_migration_negative.py b/manila_tempest_tests/tests/api/admin/test_migration_negative.py index 908abecbd8..3dfc31af5b 100644 --- a/manila_tempest_tests/tests/api/admin/test_migration_negative.py +++ b/manila_tempest_tests/tests/api/admin/test_migration_negative.py @@ -18,7 +18,10 @@ from tempest.lib import exceptions as lib_exc from tempest import test import testtools +from manila_tempest_tests.common import constants +from manila_tempest_tests import share_exceptions from manila_tempest_tests.tests.api import base +from manila_tempest_tests import utils CONF = config.CONF @@ -26,7 +29,7 @@ CONF = config.CONF class MigrationNFSTest(base.BaseSharesAdminTest): """Tests Share Migration. - Tests migration in multi-backend environment. + Tests share migration in multi-backend environment. """ protocol = "nfs" @@ -35,18 +38,28 @@ class MigrationNFSTest(base.BaseSharesAdminTest): def resource_setup(cls): super(MigrationNFSTest, cls).resource_setup() if not CONF.share.run_migration_tests: - raise cls.skipException("Migration tests disabled. Skipping.") + raise cls.skipException("Share migration tests are disabled.") - cls.share = cls.create_share(cls.protocol) - cls.share = cls.shares_client.get_share(cls.share['id']) - pools = cls.shares_client.list_pools()['pools'] + pools = cls.shares_client.list_pools(detail=True)['pools'] if len(pools) < 2: raise cls.skipException("At least two different pool entries " - "are needed to run migration tests. " - "Skipping.") - cls.dest_pool = next((x for x in pools - if x['name'] != cls.share['host']), None) + "are needed to run share migration tests.") + + cls.share = cls.create_share(cls.protocol) + cls.share = cls.shares_client.get_share(cls.share['id']) + + default_type = cls.shares_v2_client.list_share_types( + default=True)['share_type'] + + dest_pool = utils.choose_matching_backend( + cls.share, pools, default_type) + + if not dest_pool or dest_pool.get('name') is None: + raise share_exceptions.ShareMigrationException( + "No valid pool entries to run share migration tests.") + + cls.dest_pool = dest_pool['name'] @test.attr(type=[base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND]) @base.skip_if_microversion_lt("2.15") @@ -91,10 +104,14 @@ class MigrationNFSTest(base.BaseSharesAdminTest): @test.attr(type=[base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND]) @base.skip_if_microversion_lt("2.5") def test_migrate_share_not_available_v2_5(self): - self.shares_client.reset_state(self.share['id'], 'error') - self.shares_client.wait_for_share_status(self.share['id'], 'error') + self.shares_client.reset_state( + self.share['id'], constants.STATUS_ERROR) + self.shares_client.wait_for_share_status(self.share['id'], + constants.STATUS_ERROR) self.assertRaises( lib_exc.BadRequest, self.shares_v2_client.migrate_share, self.share['id'], self.dest_pool, True, version='2.5') - self.shares_client.reset_state(self.share['id'], 'available') - self.shares_client.wait_for_share_status(self.share['id'], 'available') + self.shares_client.reset_state(self.share['id'], + constants.STATUS_AVAILABLE) + self.shares_client.wait_for_share_status(self.share['id'], + constants.STATUS_AVAILABLE) diff --git a/manila_tempest_tests/tests/api/base.py b/manila_tempest_tests/tests/api/base.py index 73fcf09e7c..82135bf641 100644 --- a/manila_tempest_tests/tests/api/base.py +++ b/manila_tempest_tests/tests/api/base.py @@ -419,6 +419,14 @@ class BaseSharesTest(test.BaseTestCase): version=kwargs.get('version')) return share + @classmethod + def migration_cancel(cls, share_id, dest_host, client=None, **kwargs): + client = client or cls.shares_v2_client + client.migration_cancel(share_id, **kwargs) + share = client.wait_for_migration_status( + share_id, dest_host, 'migration_cancelled', **kwargs) + return share + @classmethod def create_share(cls, *args, **kwargs): """Create one share and wait for available state. Retry if allowed.""" diff --git a/manila_tempest_tests/tests/scenario/test_share_basic_ops.py b/manila_tempest_tests/tests/scenario/test_share_basic_ops.py index dbe5599edc..b52fe07e5c 100644 --- a/manila_tempest_tests/tests/scenario/test_share_basic_ops.py +++ b/manila_tempest_tests/tests/scenario/test_share_basic_ops.py @@ -20,7 +20,9 @@ from tempest.lib.common.utils import data_utils from tempest.lib.common.utils import test_utils from tempest.lib import exceptions from tempest import test +import testtools +from manila_tempest_tests.common import constants from manila_tempest_tests.tests.api import base from manila_tempest_tests.tests.scenario import manager_share as manager from manila_tempest_tests import utils @@ -244,29 +246,30 @@ class ShareBasicOpsBase(manager.ShareScenarioTest): @test.services('compute', 'network') @test.attr(type=[base.TAG_POSITIVE, base.TAG_BACKEND]) + @testtools.skipUnless(CONF.share.run_migration_tests, + "Share migration tests are disabled.") def test_migration_files(self): if self.protocol == "CIFS": raise self.skipException("Test for CIFS protocol not supported " - "at this moment. Skipping.") + "at this moment.") - if not CONF.share.run_migration_tests: - raise self.skipException("Migration tests disabled. Skipping.") - - pools = self.shares_admin_client.list_pools()['pools'] + pools = self.shares_admin_v2_client.list_pools(detail=True)['pools'] if len(pools) < 2: - raise self.skipException("At least two different pool entries " - "are needed to run migration tests. " - "Skipping.") + raise self.skipException("At least two different pool entries are " + "needed to run share migration tests.") instance = self.boot_instance(wait_until="BUILD") self.create_share() instance = self.wait_for_active_instance(instance["id"]) - share = self.shares_client.get_share(self.share['id']) + self.share = self.shares_client.get_share(self.share['id']) - dest_pool = next((x for x in pools if x['name'] != share['host']), - None) + default_type = self.shares_v2_client.list_share_types( + default=True)['share_type'] + + dest_pool = utils.choose_matching_backend( + self.share, pools, default_type) self.assertIsNotNone(dest_pool) self.assertIsNotNone(dest_pool.get('name')) @@ -307,7 +310,7 @@ class ShareBasicOpsBase(manager.ShareScenarioTest): self.umount_share(ssh_client) - share = self.migrate_share(share['id'], dest_pool) + self.share = self.migrate_share(self.share['id'], dest_pool) if utils.is_microversion_lt(CONF.share.max_api_microversion, "2.9"): new_locations = self.share['export_locations'] else: @@ -315,11 +318,12 @@ class ShareBasicOpsBase(manager.ShareScenarioTest): self.share['id']) new_locations = [x['path'] for x in new_exports] - self.assertEqual(dest_pool, share['host']) + self.assertEqual(dest_pool, self.share['host']) locations.sort() new_locations.sort() self.assertNotEqual(locations, new_locations) - self.assertEqual('migration_success', share['task_state']) + self.assertEqual(constants.TASK_STATE_MIGRATION_SUCCESS, + self.share['task_state']) self.mount_share(new_locations[0], ssh_client) diff --git a/manila_tempest_tests/utils.py b/manila_tempest_tests/utils.py index dea51ab515..277130eefc 100644 --- a/manila_tempest_tests/utils.py +++ b/manila_tempest_tests/utils.py @@ -100,3 +100,18 @@ def rand_ip(): TEST_NET_3 = '203.0.113.' final_octet = six.text_type(random.randint(0, 255)) return TEST_NET_3 + final_octet + + +def choose_matching_backend(share, pools, share_type): + extra_specs = {} + # fix extra specs with string values instead of boolean + for k, v in share_type['extra_specs'].items(): + extra_specs[k] = (True if six.text_type(v).lower() == 'true' + else False if six.text_type(v).lower() == 'false' + else v) + selected_pool = next( + (x for x in pools if (x['name'] != share['host'] and all( + y in x['capabilities'].items() for y in extra_specs.items()))), + None) + + return selected_pool