From 39efc2bde81c2a0c747a491d3a778b822ca263b8 Mon Sep 17 00:00:00 2001 From: Douglas Viroel Date: Fri, 19 Jun 2020 14:34:36 +0000 Subject: [PATCH] Add share server migration This patch adds support for migration of share servers. This migration is performed using a two-phase approach. Administrators are now able to request the migration of a share server within and across backends, with the possibility of chooosing a different share network for the destination share server. - A new field called `task_state` was added to the share server model in order to help the administrator to track the share server migration steps. A new field called `source_share_server_id` was added to link destination and source share servers. - A new periodic task was added to track migration of share servers and its resources. - Two new states were added: `server_migrating` and `server_migrating_to` to represent that share migration is in progress. - When performing the server migration, manila will not go to the scheduler, instead it will provide a request spec to drivers during migration check driver call. It'll be up to the driver validate if there is free space to handle the share server. - A new API called `share-server-migration-check' was added to check the feasibility of a migration, before actually triggering the start operation. APIImpact DocImpact Partially Implements: bp share-server-migration Co-Authored-By: Andre Beltrami Co-Authored-By: Carlos Eduardo Co-Authored-By: Felipe Rodrigues Change-Id: Ic0751027d2c3f1ef7ab0f7836baff3070a230cfd Signed-off-by: Douglas Viroel --- contrib/ci/pre_test_hook.sh | 2 + devstack/plugin.sh | 4 + manila/api/openstack/api_version_request.py | 9 +- .../openstack/rest_api_version_history.rst | 5 + manila/api/openstack/wsgi.py | 1 + manila/api/v2/share_servers.py | 194 ++- manila/api/views/share_server_migration.py | 78 + manila/api/views/share_servers.py | 10 +- manila/common/constants.py | 36 +- manila/db/api.py | 45 +- ...add_task_state_field_for_share_servers_.py | 64 + manila/db/sqlalchemy/api.py | 198 ++- manila/db/sqlalchemy/models.py | 19 +- manila/exception.py | 10 +- manila/policies/share_server.py | 62 + manila/share/api.py | 437 +++++- manila/share/driver.py | 295 ++++ manila/share/manager.py | 917 +++++++++++- manila/share/migration.py | 56 +- manila/share/rpcapi.py | 70 +- manila/tests/api/v1/test_share_manage.py | 3 +- manila/tests/api/v1/test_share_servers.py | 9 + manila/tests/api/v2/test_share_servers.py | 610 ++++++++ .../alembic/migrations_data_checks.py | 26 + manila/tests/db/sqlalchemy/test_api.py | 194 +++ manila/tests/fake_share.py | 1 + manila/tests/share/test_api.py | 1100 ++++++++++++++ manila/tests/share/test_driver.py | 58 + manila/tests/share/test_manager.py | 1266 ++++++++++++++++- manila/tests/share/test_migration.py | 25 +- manila/tests/share/test_rpcapi.py | 75 +- ...are-server-migration-51deb30212859277.yaml | 26 + 32 files changed, 5723 insertions(+), 182 deletions(-) create mode 100644 manila/api/views/share_server_migration.py create mode 100644 manila/db/migrations/alembic/versions/5aa813ae673d_add_task_state_field_for_share_servers_.py create mode 100644 releasenotes/notes/add-share-server-migration-51deb30212859277.yaml diff --git a/contrib/ci/pre_test_hook.sh b/contrib/ci/pre_test_hook.sh index 633d367325..e127b87f9d 100755 --- a/contrib/ci/pre_test_hook.sh +++ b/contrib/ci/pre_test_hook.sh @@ -81,6 +81,8 @@ echo "MANILA_DATA_COPY_CHECK_HASH=${MANILA_DATA_COPY_CHECK_HASH:=True}" >> $loca # Share Migration CI tests migration_continue period task interval echo "MANILA_SHARE_MIGRATION_PERIOD_TASK_INTERVAL=${MANILA_SHARE_MIGRATION_PERIOD_TASK_INTERVAL:=1}" >> $localconf +# Share Server Migration CI tests migration_continue period task interval +echo "MANILA_SERVER_MIGRATION_PERIOD_TASK_INTERVAL=${MANILA_SERVER_MIGRATION_PERIOD_TASK_INTERVAL:=10}" >> $localconf MANILA_SERVICE_IMAGE_ENABLED=${MANILA_SERVICE_IMAGE_ENABLED:-False} DEFAULT_EXTRA_SPECS=${DEFAULT_EXTRA_SPECS:-"'snapshot_support=True create_share_from_snapshot_support=True'"} diff --git a/devstack/plugin.sh b/devstack/plugin.sh index 81fa736da1..7d3d983b2d 100755 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -237,6 +237,10 @@ function configure_manila { iniset $MANILA_CONF DEFAULT migration_driver_continue_update_interval $MANILA_SHARE_MIGRATION_PERIOD_TASK_INTERVAL fi + if ! [[ -z $MANILA_SERVER_MIGRATION_PERIOD_TASK_INTERVAL ]]; then + iniset $MANILA_CONF DEFAULT server_migration_driver_continue_update_interval $MANILA_SERVER_MIGRATION_PERIOD_TASK_INTERVAL + fi + if ! [[ -z $MANILA_DATA_COPY_CHECK_HASH ]]; then iniset $MANILA_CONF DEFAULT check_hash $MANILA_DATA_COPY_CHECK_HASH fi diff --git a/manila/api/openstack/api_version_request.py b/manila/api/openstack/api_version_request.py index 7c692634e4..d4541b6591 100644 --- a/manila/api/openstack/api_version_request.py +++ b/manila/api/openstack/api_version_request.py @@ -149,13 +149,20 @@ REST_API_VERSION_HISTORY = """ operation as a percentage. * 2.55 - Share groups feature is no longer considered experimental. * 2.56 - Share replication feature is no longer considered experimental. + * 2.57 - Added Share server migration operations: + 'share_server_migration_check' + 'share_server_migration_cancel' + 'share_server_migration_complete' + 'share_server_migration_start' + 'share_server_migration_get_progress' + 'share_server_reset_task_state' """ # The minimum and maximum versions of the API supported # The default api version request is defined to be the # minimum version of the API supported. _MIN_API_VERSION = "2.0" -_MAX_API_VERSION = "2.56" +_MAX_API_VERSION = "2.57" DEFAULT_API_VERSION = _MIN_API_VERSION diff --git a/manila/api/openstack/rest_api_version_history.rst b/manila/api/openstack/rest_api_version_history.rst index 934012e424..3b8beecd6f 100644 --- a/manila/api/openstack/rest_api_version_history.rst +++ b/manila/api/openstack/rest_api_version_history.rst @@ -309,3 +309,8 @@ user documentation. 2.56 ---- Share replication feature is no longer considered experimental. + +2.57 +---- + Added share server migration feature. A two-phase approach that migrates + a share server and all its resources to a new host. diff --git a/manila/api/openstack/wsgi.py b/manila/api/openstack/wsgi.py index 249631bf7a..c23ea194b8 100644 --- a/manila/api/openstack/wsgi.py +++ b/manila/api/openstack/wsgi.py @@ -1204,6 +1204,7 @@ class AdminActionsMixin(object): constants.STATUS_ERROR_DELETING, constants.STATUS_MIGRATING, constants.STATUS_MIGRATING_TO, + constants.STATUS_SERVER_MIGRATING, ]), 'replica_state': set([ constants.REPLICA_STATE_ACTIVE, diff --git a/manila/api/v2/share_servers.py b/manila/api/v2/share_servers.py index 0773f7327c..5c4dba7d5b 100644 --- a/manila/api/v2/share_servers.py +++ b/manila/api/v2/share_servers.py @@ -20,6 +20,7 @@ from webob import exc from manila.api.openstack import wsgi from manila.api.v1 import share_servers +from manila.api.views import share_server_migration as server_migration_views from manila.common import constants from manila.db import api as db_api from manila import exception @@ -35,17 +36,13 @@ class ShareServerController(share_servers.ShareServerController, wsgi.AdminActionsMixin): """The Share Server API V2 controller for the OpenStack API.""" + def __init__(self): + super(ShareServerController, self).__init__() + self._migration_view_builder = server_migration_views.ViewBuilder() + valid_statuses = { - 'status': { - constants.STATUS_ACTIVE, - constants.STATUS_ERROR, - constants.STATUS_DELETING, - constants.STATUS_CREATING, - constants.STATUS_MANAGING, - constants.STATUS_UNMANAGING, - constants.STATUS_UNMANAGE_ERROR, - constants.STATUS_MANAGE_ERROR, - } + 'status': set(constants.SHARE_SERVER_STATUSES), + 'task_state': set(constants.SERVER_TASK_STATE_STATUSES), } def _update(self, context, id, update): @@ -204,6 +201,183 @@ class ShareServerController(share_servers.ShareServerController, return identifier, host, share_network, driver_opts, network_subnet + @wsgi.Controller.api_version('2.57', experimental=True) + @wsgi.action("migration_start") + @wsgi.Controller.authorize + @wsgi.response(http_client.ACCEPTED) + def share_server_migration_start(self, req, id, body): + """Migrate a share server to the specified host.""" + context = req.environ['manila.context'] + try: + share_server = db_api.share_server_get( + context, id) + except exception.ShareServerNotFound as e: + raise exc.HTTPNotFound(explanation=e.msg) + + params = body.get('migration_start') + + if not params: + raise exc.HTTPBadRequest(explanation=_("Request is missing body.")) + + bool_params = ['writable', 'nondisruptive', 'preserve_snapshots'] + mandatory_params = bool_params + ['host'] + + utils.check_params_exist(mandatory_params, params) + bool_param_values = utils.check_params_are_boolean(bool_params, params) + + pool_was_specified = len(params['host'].split('#')) > 1 + + if pool_was_specified: + msg = _('The destination host can not contain pool information.') + raise exc.HTTPBadRequest(explanation=msg) + + new_share_network = None + + new_share_network_id = params.get('new_share_network_id', None) + if new_share_network_id: + try: + new_share_network = db_api.share_network_get( + context, new_share_network_id) + except exception.NotFound: + msg = _("Share network %s not " + "found.") % new_share_network_id + raise exc.HTTPBadRequest(explanation=msg) + + try: + self.share_api.share_server_migration_start( + context, share_server, params['host'], + bool_param_values['writable'], + bool_param_values['nondisruptive'], + bool_param_values['preserve_snapshots'], + new_share_network=new_share_network) + except exception.ServiceIsDown as e: + # NOTE(dviroel): user should check if the host is healthy + raise exc.HTTPBadRequest(explanation=e.msg) + except exception.InvalidShareServer as e: + # NOTE(dviroel): invalid share server meaning that some internal + # resource have a invalid state. + raise exc.HTTPConflict(explanation=e.msg) + + @wsgi.Controller.api_version('2.57', experimental=True) + @wsgi.action("migration_complete") + @wsgi.Controller.authorize + def share_server_migration_complete(self, req, id, body): + """Invokes 2nd phase of share server migration.""" + context = req.environ['manila.context'] + try: + share_server = db_api.share_server_get( + context, id) + except exception.ShareServerNotFound as e: + raise exc.HTTPNotFound(explanation=e.msg) + + try: + result = self.share_api.share_server_migration_complete( + context, share_server) + except (exception.InvalidShareServer, + exception.ServiceIsDown) as e: + raise exc.HTTPBadRequest(explanation=e.msg) + + return self._migration_view_builder.migration_complete(req, result) + + @wsgi.Controller.api_version('2.57', experimental=True) + @wsgi.action("migration_cancel") + @wsgi.Controller.authorize + @wsgi.response(http_client.ACCEPTED) + def share_server_migration_cancel(self, req, id, body): + """Attempts to cancel share migration.""" + context = req.environ['manila.context'] + try: + share_server = db_api.share_server_get( + context, id) + except exception.ShareServerNotFound as e: + raise exc.HTTPNotFound(explanation=e.msg) + + try: + self.share_api.share_server_migration_cancel(context, share_server) + except (exception.InvalidShareServer, + exception.ServiceIsDown) as e: + raise exc.HTTPBadRequest(explanation=e.msg) + + @wsgi.Controller.api_version('2.57', experimental=True) + @wsgi.action("migration_get_progress") + @wsgi.Controller.authorize + def share_server_migration_get_progress(self, req, id, body): + """Retrieve share server migration progress for a given share.""" + context = req.environ['manila.context'] + try: + result = self.share_api.share_server_migration_get_progress( + context, id) + except exception.ServiceIsDown as e: + raise exc.HTTPConflict(explanation=e.msg) + except exception.InvalidShareServer as e: + raise exc.HTTPBadRequest(explanation=e.msg) + + return self._migration_view_builder.get_progress(req, result) + + @wsgi.Controller.api_version('2.57', experimental=True) + @wsgi.action("reset_task_state") + @wsgi.Controller.authorize + def share_server_reset_task_state(self, req, id, body): + return self._reset_status(req, id, body, status_attr='task_state') + + @wsgi.Controller.api_version('2.57', experimental=True) + @wsgi.action("migration_check") + @wsgi.Controller.authorize + def share_server_migration_check(self, req, id, body): + """Check if can migrate a share server to the specified host.""" + context = req.environ['manila.context'] + try: + share_server = db_api.share_server_get( + context, id) + except exception.ShareServerNotFound as e: + raise exc.HTTPNotFound(explanation=e.msg) + + params = body.get('migration_check') + + if not params: + raise exc.HTTPBadRequest(explanation=_("Request is missing body.")) + + bool_params = ['writable', 'nondisruptive', 'preserve_snapshots'] + mandatory_params = bool_params + ['host'] + + utils.check_params_exist(mandatory_params, params) + bool_param_values = utils.check_params_are_boolean(bool_params, params) + + pool_was_specified = len(params['host'].split('#')) > 1 + + if pool_was_specified: + msg = _('The destination host can not contain pool information.') + raise exc.HTTPBadRequest(explanation=msg) + + new_share_network = None + new_share_network_id = params.get('new_share_network_id', None) + if new_share_network_id: + try: + new_share_network = db_api.share_network_get( + context, new_share_network_id) + except exception.NotFound: + msg = _("Share network %s not " + "found.") % new_share_network_id + raise exc.HTTPBadRequest(explanation=msg) + + try: + result = self.share_api.share_server_migration_check( + context, share_server, params['host'], + bool_param_values['writable'], + bool_param_values['nondisruptive'], + bool_param_values['preserve_snapshots'], + new_share_network=new_share_network) + except exception.ServiceIsDown as e: + # NOTE(dviroel): user should check if the host is healthy + raise exc.HTTPBadRequest(explanation=e.msg) + except exception.InvalidShareServer as e: + # NOTE(dviroel): invalid share server meaning that some internal + # resource have a invalid state. + raise exc.HTTPConflict(explanation=e.msg) + + return self._migration_view_builder.build_check_migration( + req, params, result) + def create_resource(): return wsgi.Resource(ShareServerController()) diff --git a/manila/api/views/share_server_migration.py b/manila/api/views/share_server_migration.py new file mode 100644 index 0000000000..999578bb58 --- /dev/null +++ b/manila/api/views/share_server_migration.py @@ -0,0 +1,78 @@ +# Copyright (c) 2020 NetApp, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +from manila.api import common + + +class ViewBuilder(common.ViewBuilder): + """Model share server migration view data response as a python dictionary. + + """ + + _collection_name = 'share_server_migration' + _detail_version_modifiers = [] + + def get_progress(self, request, params): + """View of share server migration job progress.""" + result = { + 'total_progress': params['total_progress'], + 'task_state': params['task_state'], + 'destination_share_server_id': + params['destination_share_server_id'], + } + + self.update_versioned_resource_dict(request, result, params) + return result + + def build_check_migration(self, request, params, result): + """View of share server migration check.""" + requested_capabilities = { + 'writable': params['writable'], + 'nondisruptive': params['nondisruptive'], + 'preserve_snapshots': params['preserve_snapshots'], + 'share_network_id': params['new_share_network_id'], + 'host': params['host'], + } + supported_capabilities = { + 'writable': result['writable'], + 'nondisruptive': result['nondisruptive'], + 'preserve_snapshots': result['preserve_snapshots'], + 'share_network_id': result['share_network_id'], + 'migration_cancel': result['migration_cancel'], + 'migration_get_progress': result['migration_get_progress'] + } + view = { + 'compatible': result['compatible'], + 'requested_capabilities': requested_capabilities, + 'supported_capabilities': supported_capabilities, + } + capabilities = { + 'requested': copy.copy(params), + 'supported': copy.copy(result) + } + self.update_versioned_resource_dict(request, view, capabilities) + return view + + def migration_complete(self, request, params): + """View of share server migration complete command.""" + result = { + 'destination_share_server_id': + params['destination_share_server_id'], + } + + self.update_versioned_resource_dict(request, result, params) + return result diff --git a/manila/api/views/share_servers.py b/manila/api/views/share_servers.py index 2b7a59a238..30b2543855 100644 --- a/manila/api/views/share_servers.py +++ b/manila/api/views/share_servers.py @@ -22,7 +22,8 @@ class ViewBuilder(common.ViewBuilder): _collection_name = 'share_servers' _detail_version_modifiers = [ "add_is_auto_deletable_and_identifier_fields", - "add_share_network_subnet_id_field" + "add_share_network_subnet_id_field", + "add_task_state_and_source_server_fields" ] def build_share_server(self, request, share_server): @@ -74,3 +75,10 @@ class ViewBuilder(common.ViewBuilder): share_server_dict['is_auto_deletable'] = ( share_server['is_auto_deletable']) share_server_dict['identifier'] = share_server['identifier'] + + @common.ViewBuilder.versioned_method("2.57") + def add_task_state_and_source_server_fields( + self, context, share_server_dict, share_server): + share_server_dict['task_state'] = share_server['task_state'] + share_server_dict['source_share_server_id'] = ( + share_server['source_share_server_id']) diff --git a/manila/common/constants.py b/manila/common/constants.py index bfaaa86ea4..185d2a1091 100644 --- a/manila/common/constants.py +++ b/manila/common/constants.py @@ -62,6 +62,10 @@ STATUS_NEW = 'new' STATUS_OUT_OF_SYNC = 'out_of_sync' STATUS_ACTIVE = 'active' +# Share server migration statuses +STATUS_SERVER_MIGRATING = 'server_migrating' +STATUS_SERVER_MIGRATING_TO = 'server_migrating_to' + ACCESS_RULES_STATES = ( ACCESS_STATE_QUEUED_TO_APPLY, ACCESS_STATE_QUEUED_TO_DENY, @@ -71,16 +75,18 @@ ACCESS_RULES_STATES = ( ACCESS_STATE_ERROR, ACCESS_STATE_DELETED, ) - +# Share and share server migration task states TASK_STATE_MIGRATION_STARTING = 'migration_starting' TASK_STATE_MIGRATION_IN_PROGRESS = 'migration_in_progress' TASK_STATE_MIGRATION_COMPLETING = 'migration_completing' TASK_STATE_MIGRATION_SUCCESS = 'migration_success' TASK_STATE_MIGRATION_ERROR = 'migration_error' TASK_STATE_MIGRATION_CANCELLED = 'migration_cancelled' +TASK_STATE_MIGRATION_CANCEL_IN_PROGRESS = 'migration_cancel_in_progress' TASK_STATE_MIGRATION_DRIVER_STARTING = 'migration_driver_starting' TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS = 'migration_driver_in_progress' TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE = 'migration_driver_phase1_done' +# Share statuses used by data service and host assisted migration TASK_STATE_DATA_COPYING_STARTING = 'data_copying_starting' TASK_STATE_DATA_COPYING_IN_PROGRESS = 'data_copying_in_progress' TASK_STATE_DATA_COPYING_COMPLETING = 'data_copying_completing' @@ -113,6 +119,7 @@ TRANSITIONAL_STATUSES = ( STATUS_EXTENDING, STATUS_SHRINKING, STATUS_MIGRATING, STATUS_MIGRATING_TO, STATUS_RESTORING, STATUS_REVERTING, + STATUS_SERVER_MIGRATING, STATUS_SERVER_MIGRATING_TO, ) INVALID_SHARE_INSTANCE_STATUSES_FOR_ACCESS_RULE_UPDATES = ( @@ -182,6 +189,33 @@ TASK_STATE_STATUSES = ( None, ) +SERVER_TASK_STATE_STATUSES = ( + TASK_STATE_MIGRATION_STARTING, + TASK_STATE_MIGRATION_IN_PROGRESS, + TASK_STATE_MIGRATION_COMPLETING, + TASK_STATE_MIGRATION_SUCCESS, + TASK_STATE_MIGRATION_ERROR, + TASK_STATE_MIGRATION_CANCEL_IN_PROGRESS, + TASK_STATE_MIGRATION_CANCELLED, + TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS, + TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE, + None, +) + +SHARE_SERVER_STATUSES = ( + STATUS_ACTIVE, + STATUS_ERROR, + STATUS_DELETING, + STATUS_CREATING, + STATUS_MANAGING, + STATUS_UNMANAGING, + STATUS_UNMANAGE_ERROR, + STATUS_MANAGE_ERROR, + STATUS_INACTIVE, + STATUS_SERVER_MIGRATING, + STATUS_SERVER_MIGRATING_TO, +) + REPLICA_STATE_ACTIVE = 'active' REPLICA_STATE_IN_SYNC = 'in_sync' REPLICA_STATE_OUT_OF_SYNC = 'out_of_sync' diff --git a/manila/db/api.py b/manila/db/api.py index 60bbc3ebe5..0fc8c9a70e 100644 --- a/manila/db/api.py +++ b/manila/db/api.py @@ -324,6 +324,21 @@ def share_instance_update(context, instance_id, values, with_share_data=False): with_share_data=with_share_data) +def share_and_snapshot_instances_status_update( + context, values, share_instance_ids=None, snapshot_instance_ids=None, + current_expected_status=None): + return IMPL.share_and_snapshot_instances_status_update( + context, values, share_instance_ids=share_instance_ids, + snapshot_instance_ids=snapshot_instance_ids, + current_expected_status=current_expected_status) + + +def share_instances_status_update(context, share_instance_ids, values): + """Updates the status of a bunch of share instances at once.""" + return IMPL.share_instances_status_update( + context, share_instance_ids, values) + + def share_instances_host_update(context, current_host, new_host): """Update the host attr of all share instances that are on current_host.""" return IMPL.share_instances_host_update(context, current_host, new_host) @@ -334,10 +349,11 @@ def share_instances_get_all(context, filters=None): return IMPL.share_instances_get_all(context, filters=filters) -def share_instances_get_all_by_share_server(context, share_server_id): +def share_instances_get_all_by_share_server(context, share_server_id, + with_share_data=False): """Returns all share instances with given share_server_id.""" - return IMPL.share_instances_get_all_by_share_server(context, - share_server_id) + return IMPL.share_instances_get_all_by_share_server( + context, share_server_id, with_share_data=with_share_data) def share_instances_get_all_by_host(context, host, with_share_data=False, @@ -517,6 +533,13 @@ def share_snapshot_instance_update(context, instance_id, values): return IMPL.share_snapshot_instance_update(context, instance_id, values) +def share_snapshot_instances_status_update( + context, snapshot_instance_ids, values): + """Updates the status of a bunch of share snapshot instances at once.""" + return IMPL.share_snapshot_instances_status_update( + context, snapshot_instance_ids, values) + + def share_snapshot_instance_create(context, snapshot_id, values): """Create a share snapshot instance for an existing snapshot.""" return IMPL.share_snapshot_instance_create( @@ -659,6 +682,13 @@ def share_snapshot_instance_export_location_create(context, values): return IMPL.share_snapshot_instance_export_location_create(context, values) +def share_snapshot_instance_export_locations_update( + context, share_snapshot_instance_id, export_locations, delete=True): + """Update export locations of a share instance.""" + return IMPL.share_snapshot_instance_export_locations_update( + context, share_snapshot_instance_id, export_locations, delete=delete) + + def share_snapshot_instance_export_locations_get_all( context, share_snapshot_instance_id): """Get the share snapshot instance export locations for given id.""" @@ -974,9 +1004,14 @@ def share_server_get_all(context): return IMPL.share_server_get_all(context) -def share_server_get_all_by_host(context, host): +def share_server_get_all_with_filters(context, filters): + """Get all share servers that match with the specified filters.""" + return IMPL.share_server_get_all_with_filters(context, filters) + + +def share_server_get_all_by_host(context, host, filters=None): """Get all share servers related to particular host.""" - return IMPL.share_server_get_all_by_host(context, host) + return IMPL.share_server_get_all_by_host(context, host, filters=filters) def share_server_get_all_unused_deletable(context, host, updated_before): diff --git a/manila/db/migrations/alembic/versions/5aa813ae673d_add_task_state_field_for_share_servers_.py b/manila/db/migrations/alembic/versions/5aa813ae673d_add_task_state_field_for_share_servers_.py new file mode 100644 index 0000000000..0fb1b067bc --- /dev/null +++ b/manila/db/migrations/alembic/versions/5aa813ae673d_add_task_state_field_for_share_servers_.py @@ -0,0 +1,64 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Add task_state field for share servers table + +Revision ID: 5aa813ae673d +Revises: e6d88547b381 +Create Date: 2020-06-23 12:04:47.821793 + +""" + +# revision identifiers, used by Alembic. +revision = '5aa813ae673d' +down_revision = 'e6d88547b381' + +from alembic import op +from oslo_log import log +import sqlalchemy as sa + + +LOG = log.getLogger(__name__) + + +share_servers_fk_name = ( + "fk_share_servers_source_share_server_id") + + +def upgrade(): + + try: + op.add_column('share_servers', sa.Column( + 'task_state', sa.String(length=255), default=None)) + op.add_column( + 'share_servers', sa.Column( + 'source_share_server_id', sa.String(length=36), + sa.ForeignKey('share_servers.id', name=share_servers_fk_name), + default=None, + nullable=True)) + + except Exception: + LOG.error("Column share_servers.task_state and/or " + "share_server.source_share_server_id not created!") + raise + + +def downgrade(): + try: + op.drop_column('share_servers', 'task_state') + op.drop_constraint(share_servers_fk_name, 'share_servers', + type_='foreignkey') + op.drop_column('share_servers', 'source_share_server_id') + except Exception: + LOG.error("Column share_servers.task_state and/or " + "share_servers.source_share_server_id not dropped!") + raise diff --git a/manila/db/sqlalchemy/api.py b/manila/db/sqlalchemy/api.py index 09d98f1584..ab3b89753f 100644 --- a/manila/db/sqlalchemy/api.py +++ b/manila/db/sqlalchemy/api.py @@ -1425,6 +1425,68 @@ def share_instance_update(context, share_instance_id, values, return instance_ref +def share_and_snapshot_instances_status_update( + context, values, share_instance_ids=None, snapshot_instance_ids=None, + current_expected_status=None): + updated_share_instances = None + updated_snapshot_instances = None + session = get_session() + with session.begin(): + if current_expected_status and share_instance_ids: + filters = {'instance_ids': share_instance_ids} + share_instances = share_instances_get_all( + context, filters=filters, session=session) + all_instances_are_compliant = all( + instance['status'] == current_expected_status + for instance in share_instances) + + if not all_instances_are_compliant: + msg = _('At least one of the shares is not in the %(status)s ' + 'status.') % { + 'status': current_expected_status + } + raise exception.InvalidShareInstance(reason=msg) + + if current_expected_status and snapshot_instance_ids: + filters = {'instance_ids': snapshot_instance_ids} + snapshot_instances = share_snapshot_instance_get_all_with_filters( + context, filters, session=session) + all_snap_instances_are_compliant = all( + snap_instance['status'] == current_expected_status + for snap_instance in snapshot_instances) + if not all_snap_instances_are_compliant: + msg = _('At least one of the snapshots is not in the ' + '%(status)s status.') % { + 'status': current_expected_status + } + raise exception.InvalidShareSnapshotInstance(reason=msg) + + if share_instance_ids: + updated_share_instances = share_instances_status_update( + context, share_instance_ids, values, session=session) + + if snapshot_instance_ids: + updated_snapshot_instances = ( + share_snapshot_instances_status_update( + context, snapshot_instance_ids, values, session=session)) + + return updated_share_instances, updated_snapshot_instances + + +@require_context +def share_instances_status_update( + context, share_instance_ids, values, session=None): + session = session or get_session() + + result = ( + model_query( + context, models.ShareInstance, read_deleted="no", + session=session).filter( + models.ShareInstance.id.in_(share_instance_ids)).update( + values, synchronize_session=False)) + return result + + def _share_instance_update(context, share_instance_id, values, session): share_instance_ref = share_instance_get(context, share_instance_id, session=session) @@ -1457,8 +1519,8 @@ def share_instance_get(context, share_instance_id, session=None, @require_admin_context -def share_instances_get_all(context, filters=None): - session = get_session() +def share_instances_get_all(context, filters=None, session=None): + session = session or get_session() query = model_query( context, models.ShareInstance, session=session, read_deleted="no", ).options( @@ -1483,6 +1545,10 @@ def share_instances_get_all(context, filters=None): models.ShareInstanceExportLocations.uuid == export_location_id) + instance_ids = filters.get('instance_ids') + if instance_ids: + query = query.filter(models.ShareInstance.id.in_(instance_ids)) + # Returns list of share instances that satisfy filters. query = query.all() return query @@ -1612,13 +1678,19 @@ def share_instances_get_all_by_share_network(context, share_network_id): @require_context -def share_instances_get_all_by_share_server(context, share_server_id): +def share_instances_get_all_by_share_server(context, share_server_id, + with_share_data=False): """Returns list of share instance with given share server.""" + session = get_session() result = ( model_query(context, models.ShareInstance).filter( models.ShareInstance.share_server_id == share_server_id, ).all() ) + + if with_share_data: + result = _set_instances_share_data(context, result, session) + return result @@ -2738,6 +2810,21 @@ def share_snapshot_update(context, snapshot_id, values): return snapshot_ref + +@require_context +def share_snapshot_instances_status_update( + context, snapshot_instance_ids, values, session=None): + session = session or get_session() + + result = ( + model_query( + context, models.ShareSnapshotInstance, + read_deleted="no", session=session).filter( + models.ShareSnapshotInstance.id.in_(snapshot_instance_ids) + ).update(values, synchronize_session=False)) + + return result + ################################# @@ -2974,9 +3061,10 @@ def share_snapshot_export_locations_get(context, snapshot_id): @require_context def share_snapshot_instance_export_locations_get_all( - context, share_snapshot_instance_id): + context, share_snapshot_instance_id, session=None): - session = get_session() + if not session: + session = get_session() export_locations = _share_snapshot_instance_export_locations_get_query( context, session, {'share_snapshot_instance_id': share_snapshot_instance_id}).all() @@ -3009,6 +3097,82 @@ def share_snapshot_instance_export_location_delete(context, el_id): el.soft_delete(session=session) + +@require_context +@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) +def share_snapshot_instance_export_locations_update( + context, share_snapshot_instance_id, export_locations, delete): + # NOTE(dviroel): Lets keep this backward compatibility for driver that + # may still return export_locations as string + if not isinstance(export_locations, (list, tuple, set)): + export_locations = (export_locations, ) + export_locations_as_dicts = [] + for el in export_locations: + export_location = el + if isinstance(el, six.string_types): + export_location = { + "path": el, + "is_admin_only": False, + } + elif not isinstance(export_location, dict): + raise exception.ManilaException( + _("Wrong export location type '%s'.") % type(export_location)) + export_locations_as_dicts.append(export_location) + export_locations = export_locations_as_dicts + + export_locations_paths = [el['path'] for el in export_locations] + + session = get_session() + + current_el_rows = share_snapshot_instance_export_locations_get_all( + context, share_snapshot_instance_id, session=session) + + def get_path_list_from_rows(rows): + return set([row['path'] for row in rows]) + + current_el_paths = get_path_list_from_rows(current_el_rows) + + def create_indexed_time_dict(key_list): + base = timeutils.utcnow() + return { + # NOTE(u_glide): Incrementing timestamp by microseconds to make + # timestamp order match index order. + key: base + datetime.timedelta(microseconds=index) + for index, key in enumerate(key_list) + } + + indexed_update_time = create_indexed_time_dict(export_locations_paths) + + for el in current_el_rows: + if delete and el['path'] not in export_locations_paths: + el.soft_delete(session) + else: + updated_at = indexed_update_time[el['path']] + el.update({ + 'updated_at': updated_at, + }) + el.save(session=session) + + # Now add new export locations + for el in export_locations: + if el['path'] in current_el_paths: + # Already updated + continue + + location_ref = models.ShareSnapshotInstanceExportLocation() + location_ref.update({ + 'id': uuidutils.generate_uuid(), + 'path': el['path'], + 'share_snapshot_instance_id': share_snapshot_instance_id, + 'updated_at': indexed_update_time[el['path']], + 'is_admin_only': el.get('is_admin_only', False), + }) + location_ref.save(session=session) + + return get_path_list_from_rows( + share_snapshot_instance_export_locations_get_all( + context, share_snapshot_instance_id, session=session)) + ################################# @@ -3816,8 +3980,28 @@ def share_server_get_all(context): @require_context -def share_server_get_all_by_host(context, host): - return _server_get_query(context).filter_by(host=host).all() +def share_server_get_all_with_filters(context, filters): + + query = _server_get_query(context) + + if filters.get('host'): + query = query.filter_by(host=filters.get('host')) + if filters.get('status'): + query = query.filter_by(status=filters.get('status')) + if filters.get('source_share_server_id'): + query = query.filter_by( + source_share_server_id=filters.get('source_share_server_id')) + + return query.all() + + +@require_context +def share_server_get_all_by_host(context, host, filters=None): + if filters: + filters.update({'host': host}) + else: + filters = {'host': host} + return share_server_get_all_with_filters(context, filters=filters) @require_context diff --git a/manila/db/sqlalchemy/models.py b/manila/db/sqlalchemy/models.py index 306f3ba1a6..8621772648 100644 --- a/manila/db/sqlalchemy/models.py +++ b/manila/db/sqlalchemy/models.py @@ -264,8 +264,10 @@ class Share(BASE, ManilaBase): if len(self.instances) > 0: order = (constants.STATUS_REVERTING, constants.STATUS_REPLICATION_CHANGE, - constants.STATUS_MIGRATING, constants.STATUS_AVAILABLE, - constants.STATUS_ERROR) + constants.STATUS_MIGRATING, + constants.STATUS_SERVER_MIGRATING, + constants.STATUS_AVAILABLE, + constants.STATUS_ERROR, ) other_statuses = ( [x['status'] for x in self.instances if x['status'] not in order and @@ -671,8 +673,9 @@ class ShareSnapshot(BASE, ManilaBase): lambda x: qualified_replica(x.share_instance), self.instances)) migrating_snapshots = list(filter( - lambda x: x.share_instance['status'] == - constants.STATUS_MIGRATING, self.instances)) + lambda x: x.share_instance['status'] in ( + constants.STATUS_MIGRATING, + constants.STATUS_SERVER_MIGRATING), self.instances)) snapshot_instances = (replica_snapshots or migrating_snapshots or self.instances) @@ -704,6 +707,7 @@ class ShareSnapshot(BASE, ManilaBase): order = (constants.STATUS_DELETING, constants.STATUS_CREATING, constants.STATUS_ERROR, constants.STATUS_MIGRATING, + constants.STATUS_SERVER_MIGRATING, constants.STATUS_AVAILABLE) other_statuses = [x['status'] for x in self.instances if x['status'] not in order] @@ -1006,12 +1010,17 @@ class ShareServer(BASE, ManilaBase): host = Column(String(255), nullable=False) is_auto_deletable = Column(Boolean, default=True) identifier = Column(String(255), nullable=True) + task_state = Column(String(255), nullable=True) + source_share_server_id = Column(String(36), ForeignKey('share_servers.id'), + nullable=True) status = Column(Enum( constants.STATUS_INACTIVE, constants.STATUS_ACTIVE, constants.STATUS_ERROR, constants.STATUS_DELETING, constants.STATUS_CREATING, constants.STATUS_DELETED, constants.STATUS_MANAGING, constants.STATUS_UNMANAGING, - constants.STATUS_UNMANAGE_ERROR, constants.STATUS_MANAGE_ERROR), + constants.STATUS_UNMANAGE_ERROR, constants.STATUS_MANAGE_ERROR, + constants.STATUS_SERVER_MIGRATING, + constants.STATUS_SERVER_MIGRATING_TO), default=constants.STATUS_INACTIVE) network_allocations = orm.relationship( "NetworkAllocation", diff --git a/manila/exception.py b/manila/exception.py index bfbf2b0fe5..ee9246d574 100644 --- a/manila/exception.py +++ b/manila/exception.py @@ -246,8 +246,16 @@ class ShareServerInUse(InUse): message = _("Share server %(share_server_id)s is in use.") +class ShareServerMigrationError(ManilaException): + message = _("Error in share server migration: %(reason)s") + + +class ShareServerMigrationFailed(ManilaException): + message = _("Share server migration failed: %(reason)s") + + class InvalidShareServer(Invalid): - message = _("Share server %(share_server_id)s is not valid.") + message = _("Invalid share server: %(reason)s") class ShareMigrationError(ManilaException): diff --git a/manila/policies/share_server.py b/manila/policies/share_server.py index 66c0c51494..cbdf77f615 100644 --- a/manila/policies/share_server.py +++ b/manila/policies/share_server.py @@ -93,6 +93,68 @@ share_server_policies = [ 'path': '/share-servers/{share_server_id}/action' } ]), + policy.DocumentedRuleDefault( + name=BASE_POLICY_NAME % 'share_server_migration_start', + check_str=base.RULE_ADMIN_API, + description="Migrates a share server to the specified host.", + operations=[ + { + 'method': 'POST', + 'path': '/share-servers/{share_server_id}/action', + } + ]), + policy.DocumentedRuleDefault( + name=BASE_POLICY_NAME % 'share_server_migration_check', + check_str=base.RULE_ADMIN_API, + description="Check if can migrates a share server to the specified " + "host.", + operations=[ + { + 'method': 'POST', + 'path': '/share-servers/{share_server_id}/action', + } + ]), + policy.DocumentedRuleDefault( + name=BASE_POLICY_NAME % 'share_server_migration_complete', + check_str=base.RULE_ADMIN_API, + description="Invokes the 2nd phase of share server migration.", + operations=[ + { + 'method': 'POST', + 'path': '/share-servers/{share_server_id}/action', + } + ]), + policy.DocumentedRuleDefault( + name=BASE_POLICY_NAME % 'share_server_migration_cancel', + check_str=base.RULE_ADMIN_API, + description="Attempts to cancel share server migration.", + operations=[ + { + 'method': 'POST', + 'path': '/share-servers/{share_server_id}/action', + } + ]), + policy.DocumentedRuleDefault( + name=BASE_POLICY_NAME % 'share_server_migration_get_progress', + check_str=base.RULE_ADMIN_API, + description=("Retrieves the share server migration progress for a " + "given share server."), + operations=[ + { + 'method': 'POST', + 'path': '/share-servers/{share_server_id}/action', + } + ]), + policy.DocumentedRuleDefault( + name=BASE_POLICY_NAME % 'share_server_reset_task_state', + check_str=base.RULE_ADMIN_API, + description=("Resets task state."), + operations=[ + { + 'method': 'POST', + 'path': '/share-servers/{share_server_id}/action', + } + ]), ] diff --git a/manila/share/api.py b/manila/share/api.py index 294053f4be..e0464a89cb 100644 --- a/manila/share/api.py +++ b/manila/share/api.py @@ -805,8 +805,8 @@ class API(base.Base): raise exception.InvalidInput(reason=msg) if share_server['status'] != constants.STATUS_ACTIVE: - msg = _("Share Server specified is not active.") - raise exception.InvalidShareServer(message=msg) + msg = _("The provided share server is not active.") + raise exception.InvalidShareServer(reason=msg) subnet = self.db.share_network_subnet_get( context, share_server['share_network_subnet_id']) share_data['share_network_id'] = subnet['share_network_id'] @@ -850,7 +850,9 @@ class API(base.Base): if share is None: share = {'instance': {}} - share_instance = share['instance'] + # NOTE(dviroel): The share object can be a share instance object with + # share data. + share_instance = share.get('instance', share) share_properties = { 'size': kwargs.get('size', share.get('size')), @@ -1610,6 +1612,7 @@ class API(base.Base): if task_state in (constants.TASK_STATE_MIGRATION_SUCCESS, constants.TASK_STATE_DATA_COPYING_ERROR, constants.TASK_STATE_MIGRATION_CANCELLED, + constants.TASK_STATE_MIGRATION_CANCEL_IN_PROGRESS, constants.TASK_STATE_MIGRATION_COMPLETING, constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE, constants.TASK_STATE_DATA_COPYING_COMPLETED, @@ -1625,22 +1628,30 @@ class API(base.Base): else: return None - def _migration_validate_error_message(self, share): - - task_state = share['task_state'] + def _migration_validate_error_message(self, resource, + resource_type='share'): + task_state = resource['task_state'] if task_state == constants.TASK_STATE_MIGRATION_SUCCESS: - msg = _("Migration of share %s has already " - "completed.") % share['id'] + msg = _("Migration of %(resource_type)s %(resource_id)s has " + "already completed.") % { + 'resource_id': resource['id'], + 'resource_type': resource_type} elif task_state in (None, constants.TASK_STATE_MIGRATION_ERROR): - msg = _("There is no migration being performed for share %s " - "at this moment.") % share['id'] + msg = _("There is no migration being performed for " + "%(resource_type)s %(resource_id)s at this moment.") % { + 'resource_id': resource['id'], + 'resource_type': resource_type} elif task_state == constants.TASK_STATE_MIGRATION_CANCELLED: - msg = _("Migration of share %s was already " - "cancelled.") % share['id'] + msg = _("Migration of %(resource_type)s %(resource_id)s was " + "already cancelled.") % { + 'resource_id': resource['id'], + 'resource_type': resource_type} elif task_state in (constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE, constants.TASK_STATE_DATA_COPYING_COMPLETED): - msg = _("Migration of share %s has already completed first " - "phase.") % share['id'] + msg = _("Migration of %(resource_type)s %(resource_id)s has " + "already completed first phase.") % { + 'resource_id': resource['id'], + 'resource_type': resource_type} else: return None return msg @@ -2234,3 +2245,401 @@ class API(base.Base): def snapshot_export_location_get(self, context, el_id): return self.db.share_snapshot_instance_export_location_get(context, el_id) + + def share_server_migration_get_destination(self, context, source_server_id, + status=None): + filters = {'source_share_server_id': source_server_id} + if status: + filters.update({'status': status}) + + dest_share_servers = self.db.share_server_get_all_with_filters( + context, filters=filters) + if not dest_share_servers: + msg = _("A destination share server wasn't found for source " + "share server %s.") % source_server_id + raise exception.InvalidShareServer(reason=msg) + if len(dest_share_servers) > 1: + msg = _("More than one destination share server was found for " + "source share server %s. Aborting...") % source_server_id + raise exception.InvalidShareServer(reason=msg) + + return dest_share_servers[0] + + def get_share_server_migration_request_spec_dict( + self, context, share_instances, snapshot_instances, **kwargs): + """Returns request specs related to share server and all its shares.""" + + shares_total_size = sum([instance.get('size', 0) + for instance in share_instances]) + snapshots_total_size = sum([instance.get('size', 0) + for instance in snapshot_instances]) + + shares_req_spec = [] + for share_instance in share_instances: + share_type_id = share_instance['share_type_id'] + share_type = share_types.get_share_type(context, share_type_id) + req_spec = self._get_request_spec_dict(share_instance, + share_type, + **kwargs) + shares_req_spec.append(req_spec) + + server_request_spec = { + 'shares_size': shares_total_size, + 'snapshots_size': snapshots_total_size, + 'shares_req_spec': shares_req_spec, + } + return server_request_spec + + def _migration_initial_checks(self, context, share_server, dest_host, + new_share_network): + shares = self.db.share_get_all_by_share_server( + context, share_server['id']) + + if len(shares) == 0: + msg = _("Share server %s does not have shares." + % share_server['id']) + raise exception.InvalidShareServer(reason=msg) + + # We only handle "active" share servers for now + if share_server['status'] != constants.STATUS_ACTIVE: + msg = _('Share server %(server_id)s status must be active, ' + 'but current status is: %(server_status)s.') % { + 'server_id': share_server['id'], + 'server_status': share_server['status']} + raise exception.InvalidShareServer(reason=msg) + + share_groups_related_to_share_server = ( + self.db.share_group_get_all_by_share_server( + context, share_server['id'])) + + if share_groups_related_to_share_server: + msg = _("The share server %s can not be migrated because it is " + "related to a share group.") % share_server['id'] + raise exception.InvalidShareServer(reason=msg) + + # Same backend and same network, nothing changes + src_backend = share_utils.extract_host(share_server['host'], + level='backend_name') + dest_backend = share_utils.extract_host(dest_host, + level='backend_name') + current_share_network_id = shares[0]['instance']['share_network_id'] + if (src_backend == dest_backend and + (new_share_network is None or + new_share_network['id'] == current_share_network_id)): + msg = _('There is no difference between source and destination ' + 'backends and between source and destination share ' + 'networks. Share server migration will not proceed.') + raise exception.InvalidShareServer(reason=msg) + + filters = {'source_share_server_id': share_server['id'], + 'status': constants.STATUS_SERVER_MIGRATING_TO} + dest_share_servers = self.db.share_server_get_all_with_filters( + context, filters=filters) + if len(dest_share_servers): + msg = _("There is at least one destination share server pointing " + "to this source share server. Clean up your environment " + "before starting a new migration.") + raise exception.InvalidShareServer(reason=msg) + + dest_service_host = share_utils.extract_host(dest_host) + # Make sure the host is in the list of available hosts + utils.validate_service_host(context, dest_service_host) + + service = self.db.service_get_by_args( + context, dest_service_host, 'manila-share') + + # Get all share types + type_ids = set([share['instance']['share_type_id'] + for share in shares]) + types = [share_types.get_share_type(context, type_id) + for type_id in type_ids] + + # Check if share type azs are supported by the destination host + for share_type in types: + azs = share_type['extra_specs'].get('availability_zones', '') + if azs and service['availability_zone']['name'] not in azs: + msg = _("Share server %(server)s cannot be migrated to host " + "%(dest)s because the share type %(type)s is used by " + "one of the shares, and this share type is not " + "supported within the availability zone (%(az)s) that " + "the host is in.") + type_name = '%s' % (share_type['name'] or '') + type_id = '(ID: %s)' % share_type['id'] + payload = {'type': '%s%s' % (type_name, type_id), + 'az': service['availability_zone']['name'], + 'server': share_server['id'], + 'dest': dest_host} + raise exception.InvalidShareServer(reason=msg % payload) + + if new_share_network: + new_share_network_id = new_share_network['id'] + else: + new_share_network_id = shares[0]['instance']['share_network_id'] + # NOTE(carloss): check if the new or old share network has a subnet + # that spans the availability zone of the destination host, otherwise + # we should deny this operation. + dest_az = self.db.availability_zone_get( + context, service['availability_zone']['name']) + compatible_subnet = ( + self.db.share_network_subnet_get_by_availability_zone_id( + context, new_share_network_id, dest_az['id'])) + + if not compatible_subnet: + msg = _("The share network %(network)s does not have a subnet " + "that spans the destination host availability zone.") + payload = {'network': new_share_network_id} + raise exception.InvalidShareServer(reason=msg % payload) + + # NOTE(carloss): Refreshing the list of shares since something could've + # changed from the initial list. + shares = self.db.share_get_all_by_share_server( + context, share_server['id']) + for share in shares: + if share['status'] != constants.STATUS_AVAILABLE: + msg = _('Share %(share_id)s status must be available, ' + 'but current status is: %(share_status)s.') % { + 'share_id': share['id'], + 'share_status': share['status']} + raise exception.InvalidShareServer(reason=msg) + + if share.has_replicas: + msg = _('Share %s has replicas. Remove the replicas of all ' + 'shares in the share server before attempting to ' + 'migrate it.') % share['id'] + LOG.error(msg) + raise exception.InvalidShareServer(reason=msg) + + # NOTE(carloss): Not validating the flag preserve_snapshots at this + # point, considering that even if the admin set the value to False, + # the driver can still support preserving snapshots and the + # snapshots would be copied anyway. So the share/manager will be + # responsible for checking if the driver does not support snapshot + # preservation, and if there are snapshots in the share server. + share_snapshots = self.db.share_snapshot_get_all_for_share( + context, share['id']) + all_snapshots_are_available = all( + [snapshot['status'] == constants.STATUS_AVAILABLE + for snapshot in share_snapshots]) + if not all_snapshots_are_available: + msg = _( + "All snapshots must have '%(status)s' status to be " + "migrated by the driver along with share " + "%(resource_id)s.") % { + 'resource_id': share['id'], + 'status': constants.STATUS_AVAILABLE, + } + LOG.error(msg) + raise exception.InvalidShareServer(reason=msg) + + if share.get('share_group_id'): + msg = _('Share %s is a member of a group. This operation is ' + 'not currently supported for share servers that ' + 'contain shares members of groups.') % share['id'] + LOG.error(msg) + raise exception.InvalidShareServer(reason=msg) + + share_instance = share['instance'] + # Access rules status must not be error + if share_instance['access_rules_status'] == constants.STATUS_ERROR: + msg = _( + 'Share instance %(instance_id)s access rules status must ' + 'not be in %(error)s when attempting to start a share ' + 'server migration.') % { + 'instance_id': share_instance['id'], + 'error': constants.STATUS_ERROR} + raise exception.InvalidShareServer(reason=msg) + try: + self._check_is_share_busy(share) + except exception.ShareBusyException as e: + raise exception.InvalidShareServer(reason=e.msg) + + return shares, types, service, new_share_network_id + + def share_server_migration_check(self, context, share_server, dest_host, + writable, nondisruptive, + preserve_snapshots, + new_share_network=None): + """Migrates share server to a new host.""" + shares, types, service, new_share_network_id = ( + self._migration_initial_checks(context, share_server, dest_host, + new_share_network)) + + # NOTE(dviroel): Service is up according to validations made on initial + # checks + result = self.share_rpcapi.share_server_migration_check( + context, share_server['id'], dest_host, writable, nondisruptive, + preserve_snapshots, new_share_network_id) + + return result + + def share_server_migration_start( + self, context, share_server, dest_host, writable, nondisruptive, + preserve_snapshots, new_share_network=None): + """Migrates share server to a new host.""" + + shares, types, dest_service, new_share_network_id = ( + self._migration_initial_checks(context, share_server, + dest_host, + new_share_network)) + + # Updates the share server status to migration starting + self.db.share_server_update( + context, share_server['id'], + {'task_state': constants.TASK_STATE_MIGRATION_STARTING, + 'status': constants.STATUS_SERVER_MIGRATING}) + + share_snapshots = [ + self.db.share_snapshot_get_all_for_share(context, share['id']) + for share in shares] + snapshot_instance_ids = [] + for snapshot_list in share_snapshots: + for snapshot in snapshot_list: + snapshot_instance_ids.append(snapshot['instance']['id']) + share_instance_ids = [share['instance']['id'] for share in shares] + + # Updates all shares and snapshot instances + self.db.share_and_snapshot_instances_status_update( + context, {'status': constants.STATUS_SERVER_MIGRATING}, + share_instance_ids=share_instance_ids, + snapshot_instance_ids=snapshot_instance_ids, + current_expected_status=constants.STATUS_AVAILABLE + ) + + # NOTE(dviroel): Service is up according to validations made on initial + # checks + self.share_rpcapi.share_server_migration_start( + context, share_server, dest_host, writable, nondisruptive, + preserve_snapshots, new_share_network_id) + + def share_server_migration_complete(self, context, share_server): + """Invokes 2nd phase of share server migration.""" + if share_server['status'] != constants.STATUS_SERVER_MIGRATING: + msg = _("Share server %s is not migrating") % share_server['id'] + LOG.error(msg) + raise exception.InvalidShareServer(reason=msg) + if (share_server['task_state'] != + constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE): + msg = _("The first phase of migration has to finish to " + "request the completion of server %s's " + "migration.") % share_server['id'] + LOG.error(msg) + raise exception.InvalidShareServer(reason=msg) + + dest_share_server = self.share_server_migration_get_destination( + context, share_server['id'], + status=constants.STATUS_SERVER_MIGRATING_TO + ) + + dest_host = share_utils.extract_host(dest_share_server['host']) + utils.validate_service_host(context, dest_host) + + self.share_rpcapi.share_server_migration_complete( + context, dest_share_server['host'], share_server, + dest_share_server) + + return { + 'destination_share_server_id': dest_share_server['id'] + } + + def share_server_migration_cancel(self, context, share_server): + """Attempts to cancel share server migration.""" + if share_server['status'] != constants.STATUS_SERVER_MIGRATING: + msg = _("Migration of share server %s cannot be cancelled because " + "the provided share server is not being migrated.") + LOG.error(msg) + raise exception.InvalidShareServer(reason=msg) + + if share_server['task_state'] in ( + constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE, + constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS): + + dest_share_server = self.share_server_migration_get_destination( + context, share_server['id'], + status=constants.STATUS_SERVER_MIGRATING_TO + ) + + dest_host = share_utils.extract_host(dest_share_server['host']) + utils.validate_service_host(context, dest_host) + + self.share_rpcapi.share_server_migration_cancel( + context, dest_share_server['host'], share_server, + dest_share_server) + else: + msg = self._migration_validate_error_message( + share_server, resource_type='share_server') + if msg is None: + msg = _("Migration of share server %s can be cancelled only " + "after the driver already started the migration, or " + "when the first phase of the migration gets " + "completed.") % share_server['id'] + LOG.error(msg) + raise exception.InvalidShareServer(reason=msg) + + def share_server_migration_get_progress(self, context, + src_share_server_id): + """Retrieve migration progress for a given share server.""" + try: + share_server = self.db.share_server_get(context, + src_share_server_id) + except exception.ShareServerNotFound: + msg = _('Share server %s was not found. We will search for a ' + 'successful migration') % src_share_server_id + LOG.debug(msg) + # Search for a successful migration, raise an error if not found + dest_share_server = self.share_server_migration_get_destination( + context, src_share_server_id, + status=constants.STATUS_ACTIVE + ) + return { + 'total_progress': 100, + 'destination_share_server_id': dest_share_server['id'], + 'task_state': dest_share_server['task_state'], + } + # Source server still exists so it must be in 'server_migrating' status + if (share_server and + share_server['status'] != constants.STATUS_SERVER_MIGRATING): + msg = _("Migration progress of share server %s cannot be " + "obtained. The provided share server is not being " + "migrated.") % share_server['id'] + LOG.error(msg) + raise exception.InvalidShareServer(reason=msg) + + dest_share_server = self.share_server_migration_get_destination( + context, share_server['id'], + status=constants.STATUS_SERVER_MIGRATING_TO + ) + + if (share_server['task_state'] == + constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS): + + dest_host = share_utils.extract_host(dest_share_server['host']) + utils.validate_service_host(context, dest_host) + + try: + result = ( + self.share_rpcapi.share_server_migration_get_progress( + context, dest_share_server['host'], + share_server, dest_share_server)) + except Exception: + msg = _("Failed to obtain migration progress of share " + "server %s.") % share_server['id'] + LOG.exception(msg) + raise exception.ShareServerMigrationError(reason=msg) + + else: + result = self._migration_get_progress_state(share_server) + + if not (result and result.get('total_progress') is not None): + msg = self._migration_validate_error_message( + share_server, resource_type='share_server') + if msg is None: + msg = _("Migration progress of share server %s cannot be " + "obtained at this moment.") % share_server['id'] + LOG.error(msg) + raise exception.InvalidShareServer(reason=msg) + + result.update({ + 'destination_share_server_id': dest_share_server['id'], + 'task_state': dest_share_server['task_state'] + }) + return result diff --git a/manila/share/driver.py b/manila/share/driver.py index ca60a33063..dbd9e55e34 100644 --- a/manila/share/driver.py +++ b/manila/share/driver.py @@ -2861,3 +2861,298 @@ class ShareDriver(object): to 'error'. """ raise NotImplementedError() + + def share_server_migration_start(self, context, src_share_server, + dest_share_server, shares, snapshots): + """Starts migration of a given share server to another host. + + .. note:: + Is called in destination share server's backend to start migration. + + Driver should implement this method if willing to perform a server + migration in driver-assisted way, useful when source share server's + backend driver is compatible with destination backend driver. This + method should start the migration procedure in the backend and return + immediately. + Following steps should be done in 'share_server_migration_continue'. + + :param context: The 'context.RequestContext' object for the request. + :param src_share_server: Reference to the original share server. + :param dest_share_server: Reference to the share server to be used by + as destination. + :param shares: All shares in the source share server that should be + migrated. + :param snapshots: All snapshots in the source share server that should + be migrated. + """ + raise NotImplementedError() + + def share_server_migration_continue(self, context, src_share_server, + dest_share_server, shares, snapshots): + """Continues migration of a given share server to another host. + + .. note:: + Is called in destination share server's backend to continue + migration. + + Driver should implement this method to continue monitor the migration + progress in storage and perform following steps until 1st phase is + completed. + + :param context: The 'context.RequestContext' object for the request. + :param src_share_server: Reference to the original share server. + :param dest_share_server: Reference to the share server to be used as + destination. + :param shares: All shares in the source share server that should be + migrated. + :param snapshots: All snapshots in the source share server that should + be migrated. + :return: Boolean value to indicate if 1st phase is finished. + """ + raise NotImplementedError() + + def share_server_migration_get_progress(self, context, src_share_server, + dest_share_server, shares, + snapshots): + """Obtains progress of migration of a share server to another host. + + .. note:: + Is called in destination share's backend to obtain migration + progress. + + If possible, driver can implement a way to return migration progress + information. + + :param context: The 'context.RequestContext' object for the request. + :param src_share_server: Reference to the original share server. + :param dest_share_server: Reference to the share server to be used as + destination. + :param shares: All shares in the source share server that should be + migrated. + :param snapshots: All snapshots in the source share server that should + be migrated. + :return: A dictionary with at least 'total_progress' field containing + the percentage value. + """ + raise NotImplementedError() + + def share_server_migration_cancel(self, context, src_share_server, + dest_share_server, shares, snapshots): + """Cancels migration of a given share server to another host. + + .. note:: + Is called in destination share server's backend to continue + migration. + + If possible, driver can implement a way to cancel an in-progress + migration. + + :param context: The 'context.RequestContext' object for the request. + :param src_share_server: Reference to the original share server. + :param dest_share_server: Reference to the share server to be used as + destination. + :param shares: All shares in the source share server that should be + migrated. + :param snapshots: All snapshots in the source share server that should + be migrated. + """ + raise NotImplementedError() + + def share_server_migration_check_compatibility( + self, context, share_server, dest_host, old_share_network, + new_share_network, shares_request_spec): + """Checks destination compatibility for migration of a share server. + + .. note:: + Is called in destination share server's backend to continue + migration. Can be called by an admin to check if a given host is + compatible or by the share manager to test compatibility with + destination backend. + + Driver should check if it is compatible with destination backend so + driver-assisted migration can proceed. + + :param context: The 'context.RequestContext' object for the request. + :param share_server: Share server model. + :param dest_host: Reference to the hos to be used by the migrated + share server. + :param old_share_network: Share network model where the source share + server is placed. + :param new_share_network: Share network model where the share + server is going to be migrated to. + :param shares_request_spec: Dict. Contains information about all shares + and share types that belong to the source share server. The drivers + can use this information to check if the capabilities match with + the destination backend and if there is available space to hold the + new share server and all its resource. + + Example:: + + { + 'shares_size': 100, + 'snapshots_size': 100, + 'shares_req_spec': + [ + { + 'share_properties': + { + 'size': 10 + 'user_id': '2f5c1df4-5203-444e-b68e-1e60f3f26fc3' + 'project_id': '0b82b278-51d6-4357-b273-0d7263982c31' + 'snapshot_support': True + 'create_share_from_snapshot_support': True + 'revert_to_snapshot_support': False + 'mount_snapshot_support': False + 'share_proto': NFS + 'share_type_id': '360e01c1-a4f7-4782-9676-dc013f1a2f21' + 'is_public': False + 'share_group_id': None + 'source_share_group_snapshot_member_id': None + 'snapshot_id': None + }, + 'share_instance_properties': + { + 'availability_zone_id': + '02377ad7-381c-4b25-a04c-6fd218f22a91', + 'share_network_id': '691544aa-da83-4669-8522-22719f236e16', + 'share_server_id': 'cd658413-d02c-4d1b-ac8a-b6b972e76bac', + 'share_id': 'e42fec45-781e-4dcc-a4d2-44354ad5ae91', + 'host': 'hostA@backend1#pool0', + 'status': 'available', + }, + 'share_type': + { + 'id': '360e01c1-a4f7-4782-9676-dc013f1a2f21', + 'name': 'dhss_false', + 'is_public': False, + 'extra_specs': + { + 'driver_handles_share_servers': False, + } + }, + 'share_id': e42fec45-781e-4dcc-a4d2-44354ad5ae91, + }, + ], + } + + :return: A dictionary containing values indicating if destination + backend is compatible, if share can remain writable during + migration, if it can preserve all file metadata and if it can + perform migration of given share non-disruptively. + + Example:: + + { + 'compatible': True, + 'writable': True, + 'nondisruptive': True, + 'preserve_snapshots': True, + 'migration_cancel': True, + 'migration_get_progress': False + } + + """ + return { + 'compatible': False, + 'writable': False, + 'nondisruptive': False, + 'preserve_snapshots': False, + 'migration_cancel': False, + 'migration_get_progress': False + } + + def share_server_migration_complete(self, context, src_share_server, + dest_share_server, shares, snapshots, + new_network_info): + """Completes migration of a given share server to another host. + + .. note:: + Is called in destination share server's backend to complete + migration. + + If driver is implementing 2-phase migration, this method should + perform the disruptive tasks related to the 2nd phase of migration, + thus completing it. Driver should also delete all original data from + source backend. + + It expected that all shares and snapshots will be available at the + destination share server in the end of the migration complete and all + updates provided in the returned model update. + + :param context: The 'context.RequestContext' object for the request. + :param src_share_server: Reference to the original share server. + :param dest_share_server: Reference to the share server to be used as + destination. + :param shares: All shares in the source share server that should be + migrated. + :param snapshots: All snapshots in the source share server that should + be migrated. + :param new_network_info: Network allocation associated to the + destination share server. + :return: If the migration changes the shares export locations, + snapshots provider locations or snapshots export locations, this + method should return a dictionary containing a list of share + instances and snapshot instances indexed by their id's, where each + instance should provide a dict with the relevant information that + need to be updated. + + Example:: + + { + 'share_updates': + { + '4363eb92-23ca-4888-9e24-502387816e2a': + { + 'export_locations': + [ + { + 'path': '1.2.3.4:/foo', + 'metadata': {}, + 'is_admin_only': False + }, + { + 'path': '5.6.7.8:/foo', + 'metadata': {}, + 'is_admin_only': True + }, + ], + 'pool_name': 'poolA', + }, + }, + 'snapshot_updates': + { + 'bc4e3b28-0832-4168-b688-67fdc3e9d408': + { + 'provider_location': '/snapshots/foo/bar_1', + 'export_locations': + [ + { + 'path': '1.2.3.4:/snapshots/foo/bar_1', + 'is_admin_only': False, + }, + { + 'path': '5.6.7.8:/snapshots/foo/bar_1', + 'is_admin_only': True, + }, + ], + }, + '2e62b7ea-4e30-445f-bc05-fd523ca62941': + { + 'provider_location': '/snapshots/foo/bar_2', + 'export_locations': + [ + { + 'path': '1.2.3.4:/snapshots/foo/bar_2', + 'is_admin_only': False, + }, + { + 'path': '5.6.7.8:/snapshots/foo/bar_2', + 'is_admin_only': True, + }, + ], + }, + } + } + + """ + raise NotImplementedError() diff --git a/manila/share/manager.py b/manila/share/manager.py index 05b66c4c7e..8bef6bc457 100644 --- a/manila/share/manager.py +++ b/manila/share/manager.py @@ -107,6 +107,12 @@ share_manager_opts = [ 'the share manager will poll the driver to perform the ' 'next step of migration in the storage backend, for a ' 'migrating share.'), + cfg.IntOpt('server_migration_driver_continue_update_interval', + default=900, + help='This value, specified in seconds, determines how often ' + 'the share manager will poll the driver to perform the ' + 'next step of migration in the storage backend, for a ' + 'migrating share server.'), cfg.IntOpt('share_usage_size_update_interval', default=300, help='This value, specified in seconds, determines how often ' @@ -210,7 +216,7 @@ def add_hooks(f): class ShareManager(manager.SchedulerDependentManager): """Manages NAS storages.""" - RPC_API_VERSION = '1.20' + RPC_API_VERSION = '1.21' def __init__(self, share_driver=None, service_name=None, *args, **kwargs): """Load the driver from args, or from flags.""" @@ -252,6 +258,7 @@ class ShareManager(manager.SchedulerDependentManager): CONF.migration_wait_access_rules_timeout) self.message_api = message_api.API() + self.share_api = api.API() self.hooks = [] self._init_hook_drivers() @@ -558,11 +565,10 @@ class ShareManager(manager.SchedulerDependentManager): 'id': parent_share_server_id, 'status': parent_share_server['status'], } - error("Parent share server %(id)s has invalid status " - "'%(status)s'.", error_params) - raise exception.InvalidShareServer( - share_server_id=parent_share_server - ) + msg = _("Parent share server %(id)s has invalid status " + "'%(status)s'.") + error(msg, error_params) + raise exception.InvalidShareServer(reason=msg % error_params) parent_share_same_dest = (snapshot['share']['instance']['host'] == share_instance['host']) share_network_subnet_id = None @@ -654,6 +660,85 @@ class ShareManager(manager.SchedulerDependentManager): 'share_type_id': share_type_id, } + def _provide_share_server_for_migration(self, context, + source_share_server, + new_share_network_id, + availability_zone_id, + destination_host, + create_on_backend=True, + server_metadata=None): + """Gets or creates share_server for a migration procedure. + + Active share_server can be deleted if there are no dependent shares + on it. + So we need avoid possibility to delete share_server in time gap + between reaching active state for share_server and setting up + share_server_id for share. It is possible, for example, with first + share creation, which starts share_server creation. + For this purpose used shared lock between this method and the one + with deletion of share_server. + + :param context: Current context + :param source_share_server: Share server model that will be migrated. + :param new_share_network_id: Share network where existing share server + should be found or created. + :param availability_zone_id: Id of the availability zone where the + new share server will be placed. + :param destination_host: The destination host where the new share + server will be created or retrieved. + :param create_on_backend: Boolean. If True, driver will be asked to + create the share server if no share server is available. + :param server_metadata: dict. Holds some important information that + can help drivers whether to create a new share server or not. + :returns: Share server that has been chosen for share server + migration. + """ + + share_network_subnet = ( + self.db.share_network_subnet_get_by_availability_zone_id( + context, new_share_network_id, + availability_zone_id=availability_zone_id)) + if not share_network_subnet: + raise exception.ShareNetworkSubnetNotFound( + share_network_subnet_id=None) + share_network_subnet_id = share_network_subnet['id'] + + server_metadata = {} if not server_metadata else server_metadata + + @utils.synchronized("share_manager_%s" % share_network_subnet_id, + external=True) + def _wrapped_provide_share_server_for_migration(): + destination_share_server = self.db.share_server_create( + context, + { + 'host': self.host, + 'share_network_subnet_id': share_network_subnet_id, + 'status': constants.STATUS_CREATING, + } + ) + + msg = ("Using share_server %(share_server)s as destination for " + "migration.") + LOG.debug(msg, { + 'share_server': destination_share_server['id'], + }) + + if create_on_backend: + # NOTE(carloss): adding some information about the request, so + # backends that support share server migration and need to know + # if the request share server is from a share server migration + # request can use this metadata to take actions. + server_metadata['migration_destination'] = True + server_metadata['request_host'] = destination_host + destination_share_server = ( + self._create_share_server_in_backend( + context, destination_share_server, + metadata=server_metadata)) + + return destination_share_server + + return _wrapped_provide_share_server_for_migration() + def _create_share_server_in_backend(self, context, share_server, metadata): """Perform setup_server on backend @@ -860,10 +945,8 @@ class ShareManager(manager.SchedulerDependentManager): share_server = self._get_share_server(context, src_share_instance) - share_api = api.API() - request_spec, dest_share_instance = ( - share_api.create_share_instance_and_get_request_spec( + self.share_api.create_share_instance_and_get_request_spec( context, share_ref, new_az_id, None, dest_host, new_share_network_id, new_share_type_id)) @@ -876,7 +959,7 @@ class ShareManager(manager.SchedulerDependentManager): context, dest_share_instance['id'], with_share_data=True) helper = migration.ShareMigrationHelper( - context, self.db, share_ref, self.access_helper) + context, self.db, self.access_helper) try: if dest_share_instance['share_network_id']: @@ -1018,52 +1101,83 @@ class ShareManager(manager.SchedulerDependentManager): return True def _cast_access_rules_to_readonly(self, context, src_share_instance, - share_server): - self.db.share_instance_update( - context, src_share_instance['id'], - {'cast_rules_to_readonly': True}) + share_server, dest_host=None): + self._cast_access_rules_to_readonly_for_server( + context, [src_share_instance], share_server, dest_host) - # Set all 'applying' or 'active' rules to 'queued_to_apply'. Since the - # share instance has its cast_rules_to_readonly attribute set to True, - # existing rules will be cast to read/only. - acceptable_past_states = (constants.ACCESS_STATE_APPLYING, - constants.ACCESS_STATE_ACTIVE) - new_state = constants.ACCESS_STATE_QUEUED_TO_APPLY - conditionally_change = {k: new_state for k in acceptable_past_states} - self.access_helper.get_and_update_share_instance_access_rules( - context, share_instance_id=src_share_instance['id'], - conditionally_change=conditionally_change) + def _cast_access_rules_to_readonly_for_server( + self, context, src_share_instances, share_server, dest_host=None): + for src_share_instance in src_share_instances: + self.db.share_instance_update( + context, src_share_instance['id'], + {'cast_rules_to_readonly': True}) - self.access_helper.update_access_rules( - context, src_share_instance['id'], - share_server=share_server) + # Set all 'applying' or 'active' rules to 'queued_to_apply'. Since + # the share instance has its cast_rules_to_readonly attribute set + # to True, existing rules will be cast to read/only. + acceptable_past_states = (constants.ACCESS_STATE_APPLYING, + constants.ACCESS_STATE_ACTIVE) + new_state = constants.ACCESS_STATE_QUEUED_TO_APPLY + conditionally_change = {k: new_state + for k in acceptable_past_states} + self.access_helper.get_and_update_share_instance_access_rules( + context, share_instance_id=src_share_instance['id'], + conditionally_change=conditionally_change) - utils.wait_for_access_update( - context, self.db, src_share_instance, - self.migration_wait_access_rules_timeout) + src_share_instance_ids = [x.id for x in src_share_instances] + share_server_id = share_server['id'] if share_server else None + if dest_host: + rpcapi = share_rpcapi.ShareAPI() + rpcapi.update_access_for_instances(context, + dest_host, + src_share_instance_ids, + share_server_id) + else: + self.update_access_for_instances(context, src_share_instance_ids, + share_server_id=share_server_id) + + for src_share_instance in src_share_instances: + utils.wait_for_access_update( + context, self.db, src_share_instance, + self.migration_wait_access_rules_timeout) def _reset_read_only_access_rules( - self, context, share, share_instance_id, supress_errors=True, + self, context, share_instance_id, supress_errors=True, helper=None): - instance = self.db.share_instance_get(context, share_instance_id, with_share_data=True) - if instance['cast_rules_to_readonly']: - update = {'cast_rules_to_readonly': False} + share_server = self._get_share_server(context, instance) + self._reset_read_only_access_rules_for_server( + context, [instance], share_server, supress_errors, helper) - self.db.share_instance_update( - context, share_instance_id, update) + def _reset_read_only_access_rules_for_server( + self, context, share_instances, share_server, + supress_errors=True, helper=None, dest_host=None): + if helper is None: + helper = migration.ShareMigrationHelper( + context, self.db, self.access_helper) - share_server = self._get_share_server(context, instance) + instances_to_update = [] + for share_instance in share_instances: + instance = self.db.share_instance_get(context, + share_instance['id'], + with_share_data=True) + if instance['cast_rules_to_readonly']: + update = {'cast_rules_to_readonly': False} + instances_to_update.append(share_instance) - if helper is None: - helper = migration.ShareMigrationHelper( - context, self.db, share, self.access_helper) + self.db.share_instance_update( + context, share_instance['id'], update) + if instances_to_update: if supress_errors: - helper.cleanup_access_rules(instance, share_server) + helper.cleanup_access_rules(instances_to_update, + share_server, + dest_host) else: - helper.revert_access_rules(instance, share_server) + helper.revert_access_rules(instances_to_update, + share_server, + dest_host) @periodic_task.periodic_task( spacing=CONF.migration_driver_continue_update_interval) @@ -1083,10 +1197,8 @@ class ShareManager(manager.SchedulerDependentManager): if share['task_state'] == ( constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS): - share_api = api.API() - src_share_instance_id, dest_share_instance_id = ( - share_api.get_migrating_instances(share)) + self.share_api.get_migrating_instances(share)) src_share_instance = instance @@ -1140,7 +1252,7 @@ class ShareManager(manager.SchedulerDependentManager): self._restore_migrating_snapshots_status( context, src_share_instance['id']) self._reset_read_only_access_rules( - context, share, src_share_instance_id) + context, src_share_instance_id) self.db.share_instance_update( context, src_share_instance_id, {'status': constants.STATUS_AVAILABLE}) @@ -1270,7 +1382,7 @@ class ShareManager(manager.SchedulerDependentManager): context, share_id, {'task_state': constants.TASK_STATE_MIGRATION_ERROR}) self._reset_read_only_access_rules( - context, share_ref, share_instance['id']) + context, share_instance['id']) self.db.share_instance_update( context, share_instance['id'], {'status': constants.STATUS_AVAILABLE}) @@ -1284,7 +1396,7 @@ class ShareManager(manager.SchedulerDependentManager): rpcapi = share_rpcapi.ShareAPI() helper = migration.ShareMigrationHelper( - context, self.db, share, self.access_helper) + context, self.db, self.access_helper) share_server = self._get_share_server(context.elevated(), src_share_instance) @@ -1387,9 +1499,9 @@ class ShareManager(manager.SchedulerDependentManager): context, values) helper = migration.ShareMigrationHelper( - context, self.db, share_ref, self.access_helper) + context, self.db, self.access_helper) - helper.apply_new_access_rules(dest_share_instance) + helper.apply_new_access_rules(dest_share_instance, share_ref['id']) self.db.share_instance_update( context, dest_share_instance['id'], @@ -1422,7 +1534,7 @@ class ShareManager(manager.SchedulerDependentManager): LOG.info("Share instance %s: deleted successfully.", instance_id) - self._check_delete_share_server(context, share_instance) + self._check_delete_share_server(context, share_instance=share_instance) @utils.require_driver_initialized def migration_complete(self, context, src_instance_id, dest_instance_id): @@ -1502,9 +1614,7 @@ class ShareManager(manager.SchedulerDependentManager): share_type = share_types.get_share_type(context, share_type_id) - share_api = api.API() - - return share_api.get_share_attributes_from_share_type(share_type) + return self.share_api.get_share_attributes_from_share_type(share_type) def _migration_complete_host_assisted(self, context, share_ref, src_instance_id, dest_instance_id): @@ -1515,7 +1625,7 @@ class ShareManager(manager.SchedulerDependentManager): context, dest_instance_id, with_share_data=True) helper = migration.ShareMigrationHelper( - context, self.db, share_ref, self.access_helper) + context, self.db, self.access_helper) task_state = share_ref['task_state'] if task_state in (constants.TASK_STATE_DATA_COPYING_ERROR, @@ -1530,7 +1640,7 @@ class ShareManager(manager.SchedulerDependentManager): if cancelled: suppress_errors = False self._reset_read_only_access_rules( - context, share_ref, src_instance_id, + context, src_instance_id, supress_errors=suppress_errors, helper=helper) self.db.share_instance_update( context, src_instance_id, @@ -1557,14 +1667,14 @@ class ShareManager(manager.SchedulerDependentManager): {'task_state': constants.TASK_STATE_MIGRATION_COMPLETING}) try: - helper.apply_new_access_rules(dest_share_instance) + helper.apply_new_access_rules(dest_share_instance, share_ref['id']) except Exception: msg = _("Failed to apply new access rules during migration " "of share %s.") % share_ref['id'] LOG.exception(msg) helper.cleanup_new_instance(dest_share_instance) self._reset_read_only_access_rules( - context, share_ref, src_instance_id, helper=helper, + context, src_instance_id, helper=helper, supress_errors=True) self.db.share_instance_update( context, src_instance_id, @@ -1605,7 +1715,7 @@ class ShareManager(manager.SchedulerDependentManager): context, dest_share_instance) helper = migration.ShareMigrationHelper( - context, self.db, share_ref, self.access_helper) + context, self.db, self.access_helper) if share_ref['task_state'] == ( constants.TASK_STATE_DATA_COPYING_COMPLETED): @@ -1632,7 +1742,7 @@ class ShareManager(manager.SchedulerDependentManager): context, src_share_instance['id']) self._reset_read_only_access_rules( - context, share_ref, src_instance_id, supress_errors=False, + context, src_instance_id, supress_errors=False, helper=helper) self.db.share_instance_update( @@ -3114,21 +3224,27 @@ class ShareManager(manager.SchedulerDependentManager): LOG.info("Share instance %s: deleted successfully.", share_instance_id) - self._check_delete_share_server(context, share_instance) + self._check_delete_share_server(context, share_instance=share_instance) self._notify_about_share_usage(context, share, share_instance, "delete.end") - def _check_delete_share_server(self, context, share_instance): + def _check_delete_share_server(self, context, share_instance=None, + share_server=None, remote_host=False): if CONF.delete_share_server_with_last_share: - share_server = self._get_share_server(context, share_instance) + if share_instance and not share_server: + share_server = self._get_share_server(context, share_instance) if (share_server and len(share_server.share_instances) == 0 and share_server.is_auto_deletable is True): LOG.debug("Scheduled deletion of share-server " "with id '%s' automatically by " "deletion of last share.", share_server['id']) - self.delete_share_server(context, share_server) + if remote_host: + rpcapi = share_rpcapi.ShareAPI() + rpcapi.delete_share_server(context, share_server) + else: + self.delete_share_server(context, share_server) @periodic_task.periodic_task(spacing=600) @utils.require_driver_initialized @@ -3619,15 +3735,26 @@ class ShareManager(manager.SchedulerDependentManager): def update_access(self, context, share_instance_id): """Allow/Deny access to some share.""" share_instance = self._get_share_instance(context, share_instance_id) - share_server = self._get_share_server(context, share_instance) + share_server_id = share_instance.get('share_server_id') - LOG.debug("Received request to update access for share instance" - " %s.", share_instance_id) + self.update_access_for_instances(context, [share_instance_id], + share_server_id=share_server_id) - self.access_helper.update_access_rules( - context, - share_instance_id, - share_server=share_server) + def update_access_for_instances(self, context, share_instance_ids, + share_server_id=None): + """Allow/Deny access to shares that belong to the same share server.""" + share_server = None + if share_server_id: + share_server = self.db.share_server_get(context, share_server_id) + + for instance_id in share_instance_ids: + LOG.debug("Received request to update access for share instance" + " %s.", instance_id) + + self.access_helper.update_access_rules( + context, + instance_id, + share_server=share_server) @periodic_task.periodic_task(spacing=CONF.periodic_interval) @utils.require_driver_initialized @@ -4611,3 +4738,651 @@ class ShareManager(manager.SchedulerDependentManager): if export_locations: self.db.share_export_locations_update( context, share_instance['id'], export_locations) + + def _validate_check_compatibility_result( + self, context, resource_id, share_instances, snapshot_instances, + driver_compatibility, dest_host, nondisruptive, writable, + preserve_snapshots, resource_type='share'): + resource_exception = ( + exception.ShareMigrationFailed + if resource_type == 'share' + else exception.ShareServerMigrationFailed) + if not driver_compatibility.get('compatible'): + msg = _("Destination host %(host)s is not compatible with " + "%(resource_type)s %(resource_id)s's source backend for " + "driver-assisted migration.") % { + 'host': dest_host, + 'resource_id': resource_id, + 'resource_type': resource_type, + } + raise resource_exception(reason=msg) + + if (not driver_compatibility.get('nondisruptive') and + nondisruptive): + msg = _("Driver cannot perform a non-disruptive migration of " + "%(resource_type)s %(resource_id)s.") % { + 'resource_type': resource_type, + 'resource_id': resource_id + } + raise resource_exception(reason=msg) + + if not driver_compatibility.get('writable') and writable: + msg = _("Driver cannot perform migration of %(resource_type)s " + "%(resource_id)s while remaining writable.") % { + 'resource_type': resource_type, + 'resource_id': resource_id + } + raise resource_exception(reason=msg) + + if (not driver_compatibility.get('preserve_snapshots') + and preserve_snapshots): + msg = _("Driver cannot perform migration of %(resource_type)s " + "%(resource_id)s while preserving snapshots.") % { + 'resource_type': resource_type, + 'resource_id': resource_id + } + raise resource_exception(reason=msg) + + if (not driver_compatibility.get('preserve_snapshots') + and snapshot_instances): + msg = _("Driver does not support preserving snapshots. The " + "migration of the %(resource_type)s %(resource_id)s " + "cannot proceed while it has snapshots.") % { + 'resource_type': resource_type, + 'resource_id': resource_id + } + raise resource_exception(reason=msg) + + def _update_resource_status(self, context, status, task_state=None, + share_instance_ids=None, + snapshot_instance_ids=None): + fields = {'status': status} + if task_state: + fields['task_state'] = task_state + if share_instance_ids: + self.db.share_instances_status_update( + context, share_instance_ids, fields) + if snapshot_instance_ids: + self.db.share_snapshot_instances_status_update( + context, snapshot_instance_ids, fields) + + def _share_server_migration_start_driver( + self, context, source_share_server, dest_host, writable, + nondisruptive, preserve_snapshots, new_share_network_id): + + share_instances = self.db.share_instances_get_all_by_share_server( + context, source_share_server['id'], with_share_data=True) + share_instance_ids = [x.id for x in share_instances] + + snapshot_instances = ( + self.db.share_snapshot_instance_get_all_with_filters( + context, {'share_instance_ids': share_instance_ids})) + snapshot_instance_ids = [x.id for x in snapshot_instances] + + old_share_network = self.db.share_network_get( + context, share_instances[0]['share_network_id']) + new_share_network = self.db.share_network_get( + context, new_share_network_id) + + service_host = share_utils.extract_host(dest_host) + service = self.db.service_get_by_args( + context, service_host, 'manila-share') + + # NOTE(dviroel): We'll build a list of request specs and send it to + # the driver so vendors have a chance to validate if the destination + # host meets the requirements before starting the migration. + shares_request_spec = ( + self.share_api.get_share_server_migration_request_spec_dict( + context, + share_instances, + snapshot_instances, + availability_zone_id=service['availability_zone_id'], + share_network_id=new_share_network_id)) + + dest_share_server = None + try: + compatibility = ( + self.driver.share_server_migration_check_compatibility( + context, source_share_server, dest_host, old_share_network, + new_share_network, shares_request_spec)) + + self._validate_check_compatibility_result( + context, source_share_server, share_instances, + snapshot_instances, compatibility, dest_host, nondisruptive, + writable, preserve_snapshots, resource_type='share server') + + dest_share_server = self._provide_share_server_for_migration( + context, source_share_server, new_share_network_id, + service['availability_zone_id'], dest_host) + + self.db.share_server_update( + context, dest_share_server['id'], + {'status': constants.STATUS_SERVER_MIGRATING_TO, + 'task_state': constants.TASK_STATE_MIGRATION_IN_PROGRESS, + 'source_share_server_id': source_share_server['id']}) + + if not compatibility.get('writable'): + # NOTE(dviroel): Only modify access rules to read-only if the + # driver doesn't support 'writable'. + self._cast_access_rules_to_readonly_for_server( + context, share_instances, source_share_server, + source_share_server['host']) + + LOG.debug("Initiating driver migration for share server %s.", + source_share_server['id']) + + self.db.share_server_update( + context, source_share_server['id'], + {'task_state': ( + constants.TASK_STATE_MIGRATION_DRIVER_STARTING)}) + self.db.share_server_update( + context, dest_share_server['id'], + {'task_state': ( + constants.TASK_STATE_MIGRATION_DRIVER_STARTING)}) + + self.driver.share_server_migration_start( + context, source_share_server, dest_share_server, + share_instances, snapshot_instances) + + self.db.share_server_update( + context, source_share_server['id'], + {'task_state': ( + constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS)}) + self.db.share_server_update( + context, dest_share_server['id'], + {'task_state': ( + constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS)}) + + except Exception: + # Rollback status changes for affected resources + self._update_resource_status( + context, constants.STATUS_AVAILABLE, + share_instance_ids=share_instance_ids, + snapshot_instance_ids=snapshot_instance_ids) + # Rollback read only access rules + self._reset_read_only_access_rules_for_server( + context, share_instances, source_share_server, + dest_host=source_share_server['host']) + if dest_share_server: + self.db.share_server_update( + context, dest_share_server['id'], + {'task_state': constants.TASK_STATE_MIGRATION_ERROR, + 'status': constants.STATUS_ERROR}) + self._check_delete_share_server(context, + share_server=dest_share_server) + msg = _("Driver-assisted migration of share server %s " + "failed.") % source_share_server['id'] + LOG.exception(msg) + raise exception.ShareServerMigrationFailed(reason=msg) + + return True + + @add_hooks + @utils.require_driver_initialized + def share_server_migration_check(self, context, share_server_id, dest_host, + writable, nondisruptive, + preserve_snapshots, new_share_network_id): + driver_result = {} + result = { + 'compatible': False, + 'writable': None, + 'preserve_snapshots': None, + 'nondisruptive': None, + 'share_network_id': new_share_network_id, + 'migration_cancel': None, + 'migration_get_progress': None + } + + if not self.driver.driver_handles_share_servers: + LOG.error('This operation is supported only on backends that ' + 'handles share servers.') + return result + + share_server = self.db.share_server_get(context, share_server_id) + share_instances = self.db.share_instances_get_all_by_share_server( + context, share_server_id, with_share_data=True) + share_instance_ids = [x.id for x in share_instances] + + snapshot_instances = ( + self.db.share_snapshot_instance_get_all_with_filters( + context, {'share_instance_ids': share_instance_ids})) + + old_share_network = self.db.share_network_get( + context, share_instances[0]['share_network_id']) + new_share_network = self.db.share_network_get( + context, new_share_network_id) + + service_host = share_utils.extract_host(dest_host) + service = self.db.service_get_by_args( + context, service_host, 'manila-share') + + # NOTE(dviroel): We'll build a list of request specs and send it to + # the driver so vendors have a chance to validate if the destination + # host meets the requirements before starting the migration. + shares_request_spec = ( + self.share_api.get_share_server_migration_request_spec_dict( + context, + share_instances, + snapshot_instances, + availability_zone_id=service['availability_zone_id'], + share_network_id=new_share_network_id)) + + try: + driver_result = ( + self.driver.share_server_migration_check_compatibility( + context, share_server, dest_host, old_share_network, + new_share_network, shares_request_spec)) + + self._validate_check_compatibility_result( + context, share_server, share_instances, + snapshot_instances, driver_result, dest_host, nondisruptive, + writable, preserve_snapshots, resource_type='share server') + + except Exception: + # Update driver result to not compatible since it didn't pass in + # the validations. + driver_result['compatible'] = False + + result.update(driver_result) + + return result + + @add_hooks + @utils.require_driver_initialized + def share_server_migration_start( + self, context, share_server_id, dest_host, writable, + nondisruptive, preserve_snapshots, new_share_network_id=None): + """Migrates a share server from current host to another host.""" + LOG.debug("Entered share_server_migration_start method for share " + "server %s.", share_server_id) + + self.db.share_server_update( + context, share_server_id, + {'task_state': constants.TASK_STATE_MIGRATION_IN_PROGRESS}) + + share_server = self.db.share_server_get(context, share_server_id) + + try: + if not self.driver.driver_handles_share_servers: + LOG.error('This operation is supported only on backends that ' + 'handles share servers.') + raise + + self._share_server_migration_start_driver( + context, share_server, dest_host, writable, nondisruptive, + preserve_snapshots, new_share_network_id) + except Exception: + LOG.exception( + ("The driver could not migrate the share server " + "%(server)s"), {'server': share_server_id}) + self.db.share_server_update( + context, share_server_id, + {'task_state': constants.TASK_STATE_MIGRATION_ERROR, + 'status': constants.STATUS_ACTIVE}) + + @periodic_task.periodic_task( + spacing=CONF.server_migration_driver_continue_update_interval) + @add_hooks + @utils.require_driver_initialized + def share_server_migration_driver_continue(self, context): + """Invokes driver to continue migration of share server.""" + + # Searching for destination share servers + share_servers = self.db.share_server_get_all_by_host( + context, self.host, + filters={'status': constants.STATUS_SERVER_MIGRATING_TO}) + + dest_updates_on_error = { + 'task_state': constants.TASK_STATE_MIGRATION_ERROR, + 'status': constants.STATUS_ERROR, + } + src_updates_on_error = { + 'task_state': constants.TASK_STATE_MIGRATION_ERROR, + 'status': constants.STATUS_ACTIVE, + } + updates_on_finished = { + 'task_state': constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE + } + for dest_share_server in share_servers: + if dest_share_server['task_state'] == ( + constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS): + src_share_server_id = dest_share_server.get( + 'source_share_server_id') + if src_share_server_id is None: + msg = _('Destination share server %s does not have a ' + 'source share server id.' + ) % dest_share_server['id'] + LOG.error(msg) + self.db.share_server_update( + context, dest_share_server['id'], + dest_updates_on_error) + continue + msg_args = { + 'src_id': src_share_server_id, + 'dest_id': dest_share_server['id'] + } + src_share_server = self.db.share_server_get( + context, src_share_server_id) + if not src_share_server: + msg = _('Destination share server %(dest_id)s refers to ' + 'a source share server %(src_id)s that does not ' + 'exists.') % msg_args + LOG.error(msg) + self.db.share_server_update( + context, dest_share_server['id'], + dest_updates_on_error) + continue + if (src_share_server['status'] != + constants.STATUS_SERVER_MIGRATING): + msg = _('Destination share server %(dest_id)s refers to ' + 'a source share server %(src_id)s that is not ' + ' being migrated.') % msg_args + LOG.error(msg) + self.db.share_server_update( + context, dest_share_server['id'], + dest_updates_on_error) + continue + + share_instances = ( + self.db.share_instances_get_all_by_share_server( + context, src_share_server_id, with_share_data=True)) + share_instance_ids = [x.id for x in share_instances] + + snapshot_instances = ( + self.db.share_snapshot_instance_get_all_with_filters( + context, + {'share_instance_ids': share_instance_ids})) + snapshot_instance_ids = [x.id for x in snapshot_instances] + + try: + finished = self.driver.share_server_migration_continue( + context, src_share_server, dest_share_server, + share_instances, snapshot_instances) + + if finished: + self.db.share_server_update( + context, src_share_server['id'], + updates_on_finished) + self.db.share_server_update( + context, dest_share_server['id'], + updates_on_finished) + msg = _("Share server migration for share %s " + "completed first phase successfully." + ) % src_share_server['id'] + LOG.info(msg) + else: + src_share_server = self.db.share_server_get( + context, src_share_server['id']) + if (src_share_server['task_state'] == + constants.TASK_STATE_MIGRATION_CANCELLED): + msg = _("Share server migration for share %s was " + "cancelled.") % src_share_server['id'] + LOG.warning(msg) + except Exception: + self._update_resource_status( + context, constants.STATUS_AVAILABLE, + share_instance_ids=share_instance_ids, + snapshot_instance_ids=snapshot_instance_ids) + self._reset_read_only_access_rules_for_server( + context, share_instances, src_share_server, + dest_host=dest_share_server['host']) + self.db.share_server_update( + context, dest_share_server['id'], + dest_updates_on_error) + if src_share_server: + self.db.share_server_update( + context, src_share_server['id'], + src_updates_on_error) + + msg = _("Migration of share server %s has failed.") + LOG.exception(msg, src_share_server['id']) + + @add_hooks + @utils.require_driver_initialized + def share_server_migration_complete(self, context, src_share_server_id, + dest_share_server_id): + """Invokes driver to complete the migration of share server.""" + dest_server = self.db.share_server_get(context, dest_share_server_id) + src_server = self.db.share_server_get(context, src_share_server_id) + + share_instances = ( + self.db.share_instances_get_all_by_share_server( + context, src_share_server_id, with_share_data=True)) + share_instance_ids = [x.id for x in share_instances] + + snapshot_instances = ( + self.db.share_snapshot_instance_get_all_with_filters( + context, + {'share_instance_ids': share_instance_ids})) + snapshot_instance_ids = [x.id for x in snapshot_instances] + + updates_on_error = { + 'task_state': constants.TASK_STATE_MIGRATION_ERROR, + 'status': constants.STATUS_ERROR, + } + try: + self._server_migration_complete_driver(context, + src_server, + share_instances, + snapshot_instances, + dest_server) + except Exception: + msg = _("Driver migration completion failed for" + " share server %s.") % src_share_server_id + LOG.exception(msg) + self._update_resource_status( + context, constants.STATUS_ERROR, + share_instance_ids=share_instance_ids, + snapshot_instance_ids=snapshot_instance_ids) + self.db.share_server_update( + context, src_share_server_id, updates_on_error) + self.db.share_server_update( + context, dest_share_server_id, updates_on_error) + msg_args = { + 'source_id': src_share_server_id, + 'dest_id': dest_share_server_id + } + msg = _('Share server migration from %(source_id)s to %(dest_id)s ' + 'has failed in migration-complete phase.') % msg_args + raise exception.ShareServerMigrationFailed(reason=msg) + + # Update share server status for success scenario + self.db.share_server_update( + context, dest_share_server_id, + {'task_state': constants.TASK_STATE_MIGRATION_SUCCESS, + 'status': constants.STATUS_ACTIVE}) + self._update_resource_status( + context, constants.STATUS_AVAILABLE, + share_instance_ids=share_instance_ids, + snapshot_instance_ids=snapshot_instance_ids) + + LOG.info("Share Server Migration for share server %s was completed " + "with success.", src_share_server_id) + + def _server_migration_complete_driver(self, context, source_share_server, + share_instances, + snapshot_instances, + dest_share_server): + + self.db.share_server_update( + context, source_share_server['id'], + {'task_state': constants.TASK_STATE_MIGRATION_COMPLETING}) + self.db.share_server_update( + context, dest_share_server['id'], + {'task_state': constants.TASK_STATE_MIGRATION_COMPLETING}) + + # Retrieve network allocations reserved for the new share server + dest_sns = dest_share_server['share_network_subnet'] + dest_sns_id = dest_sns['id'] + dest_sn_id = dest_sns['share_network_id'] + dest_sn = self.db.share_network_get(context, dest_sn_id) + dest_sns = self.db.share_network_subnet_get(context, dest_sns_id) + + new_network_allocations = self._form_server_setup_info( + context, dest_share_server, dest_sn, dest_sns) + + model_update = self.driver.share_server_migration_complete( + context, source_share_server, dest_share_server, share_instances, + snapshot_instances, new_network_allocations) + + host_value = share_utils.extract_host(dest_share_server['host']) + service = self.db.service_get_by_args( + context, host_value, 'manila-share') + new_az_id = service['availability_zone_id'] + + share_updates = model_update.get('share_updates', {}) + for share_instance in share_instances: + share_update = share_updates.get(share_instance['id'], {}) + new_share_host = share_utils.append_host( + dest_share_server['host'], share_update.get('pool_name')) + # Update share instance with new values + instance_update = { + 'share_server_id': dest_share_server['id'], + 'host': new_share_host, + 'share_network_id': dest_sn_id, + 'availability_zone_id': new_az_id, + } + self.db.share_instance_update( + context, share_instance['id'], instance_update) + # Try to update info returned in the model update + if not share_update: + continue + # Update export locations + update_export_location = ( + share_updates[share_instance['id']].get('export_locations')) + if update_export_location: + self.db.share_export_locations_update( + context, share_instance['id'], update_export_location) + + snapshot_updates = model_update.get('snapshot_updates', {}) + for snap_instance in snapshot_instances: + model_update = snapshot_updates.get(snap_instance['id'], {}) + snapshot_export_locations = model_update.pop( + 'export_locations', []) + if model_update: + self.db.share_snapshot_instance_update( + context, snap_instance['id'], model_update) + + if snapshot_export_locations: + export_locations_update = [] + for exp_location in snapshot_export_locations: + updated_el = { + 'path': exp_location['path'], + 'is_admin_only': exp_location['is_admin_only'], + } + export_locations_update.append(updated_el) + self.db.share_snapshot_instance_export_locations_update( + context, snap_instance['id'], export_locations_update) + + # Reset read only access since migration has finished + self._reset_read_only_access_rules_for_server( + context, share_instances, source_share_server, + dest_host=source_share_server['host']) + + # NOTE(dviroel): Setting the source share server to INACTIVE to avoid + # being reused for new shares, since it may have some invalid + # configurations and most of the drivers don't check for compatible + # share servers on share creation. + self.db.share_server_update( + context, source_share_server['id'], + {'task_state': constants.TASK_STATE_MIGRATION_SUCCESS, + 'status': constants.STATUS_INACTIVE}) + + self._check_delete_share_server( + context, share_server=source_share_server, remote_host=True) + + @add_hooks + @utils.require_driver_initialized + def share_server_migration_cancel(self, context, src_share_server_id, + dest_share_server_id): + share_server = self.db.share_server_get(context, src_share_server_id) + dest_share_server = self.db.share_server_get(context, + dest_share_server_id) + + if share_server['task_state'] not in ( + constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE, + constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS): + msg = _("Migration of share server %s cannot be cancelled at this " + "moment.") % src_share_server_id + raise exception.InvalidShareServer(reason=msg) + + share_instances = ( + self.db.share_instances_get_all_by_share_server( + context, src_share_server_id, with_share_data=True)) + share_instance_ids = [x.id for x in share_instances] + + snapshot_instances = ( + self.db.share_snapshot_instance_get_all_with_filters( + context, + {'share_instance_ids': share_instance_ids})) + snapshot_instance_ids = [x.id for x in snapshot_instances] + + # Avoid new migration continue and cancel calls while cancelling the + # migration, which can take some time to finish. The cancel in progress + # state will help administrator to identify if the operation is still + # in progress. + self.db.share_server_update( + context, share_server['id'], + {'task_state': constants.TASK_STATE_MIGRATION_CANCEL_IN_PROGRESS}) + + self.driver.share_server_migration_cancel( + context, share_server, dest_share_server, + share_instances, snapshot_instances) + + # NOTE(dviroel): After cancelling the migration we should set the new + # share server to INVALID since it may contain an invalid configuration + # to be reused. We also cleanup the source_share_server_id to unblock + # new migrations. + self.db.share_server_update( + context, dest_share_server_id, + {'task_state': constants.TASK_STATE_MIGRATION_CANCELLED, + 'status': constants.STATUS_INACTIVE}) + + self._check_delete_share_server(context, + share_server=dest_share_server) + + self._update_resource_status( + context, constants.STATUS_AVAILABLE, + share_instance_ids=share_instance_ids, + snapshot_instance_ids=snapshot_instance_ids) + + self._reset_read_only_access_rules_for_server( + context, share_instances, share_server, + dest_host=share_server['host']) + + self.db.share_server_update( + context, share_server['id'], + {'task_state': constants.TASK_STATE_MIGRATION_CANCELLED, + 'status': constants.STATUS_ACTIVE}) + + LOG.info("Share Server Migration for share server %s was cancelled.", + share_server['id']) + + @add_hooks + @utils.require_driver_initialized + def share_server_migration_get_progress( + self, context, src_share_server_id, dest_share_server_id): + + src_share_server = self.db.share_server_get(context, + src_share_server_id) + if src_share_server['task_state'] != ( + constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS): + msg = _("Driver is not performing migration for" + " share server %s at this moment.") % src_share_server_id + raise exception.InvalidShareServer(reason=msg) + + dest_share_server = self.db.share_server_get(context, + dest_share_server_id) + share_instances = ( + self.db.share_instances_get_all_by_share_server( + context, src_share_server_id, with_share_data=True)) + share_instance_ids = [x.id for x in share_instances] + + snapshot_instances = ( + self.db.share_snapshot_instance_get_all_with_filters( + context, + {'share_instance_ids': share_instance_ids})) + + return self.driver.share_server_migration_get_progress( + context, src_share_server, dest_share_server, share_instances, + snapshot_instances) diff --git a/manila/share/migration.py b/manila/share/migration.py index 224d1e5ae5..cc09fb89f1 100644 --- a/manila/share/migration.py +++ b/manila/share/migration.py @@ -23,6 +23,7 @@ from manila.common import constants from manila import exception from manila.i18n import _ from manila.share import api as share_api +from manila.share import rpcapi as share_rpcapi import manila.utils as utils @@ -47,10 +48,9 @@ CONF.register_opts(migration_opts) class ShareMigrationHelper(object): - def __init__(self, context, db, share, access_helper): + def __init__(self, context, db, access_helper): self.db = db - self.share = share self.context = context self.access_helper = access_helper self.api = share_api.API() @@ -132,36 +132,50 @@ class ShareMigrationHelper(object): try: self.delete_instance_and_wait(new_instance) except Exception: - LOG.warning("Failed to cleanup new instance during generic" - " migration for share %s.", self.share['id']) + LOG.warning("Failed to cleanup new instance during generic " + "migration for share %s.", new_instance['share_id']) - def cleanup_access_rules(self, share_instance, share_server): + def cleanup_access_rules(self, share_instances, share_server, + dest_host=None): try: - self.revert_access_rules(share_instance, share_server) + self.revert_access_rules(share_instances, share_server, dest_host) except Exception: LOG.warning("Failed to cleanup access rules during generic" - " migration for share %s.", self.share['id']) + " migration.") - def revert_access_rules(self, share_instance, share_server): + def revert_access_rules(self, share_instances, share_server, + dest_host=None): + shares_instance_ids = [] + for share_instance in share_instances: + # Cast all rules to 'queued_to_apply' so that they can be + # re-applied. + shares_instance_ids.append(share_instance['id']) + updates = {'state': constants.ACCESS_STATE_QUEUED_TO_APPLY} + self.access_helper.get_and_update_share_instance_access_rules( + self.context, updates=updates, + share_instance_id=share_instance['id']) - # Cast all rules to 'queued_to_apply' so that they can be re-applied. - updates = {'state': constants.ACCESS_STATE_QUEUED_TO_APPLY} - self.access_helper.get_and_update_share_instance_access_rules( - self.context, updates=updates, - share_instance_id=share_instance['id']) + if dest_host: + rpcapi = share_rpcapi.ShareAPI() + rpcapi.update_access_for_instances(self.context, dest_host, + shares_instance_ids, + share_server) + else: + for share_instance in share_instances: + self.access_helper.update_access_rules( + self.context, share_instance['id'], + share_server=share_server) - self.access_helper.update_access_rules( - self.context, share_instance['id'], share_server=share_server) + for share_instance in share_instances: + utils.wait_for_access_update( + self.context, self.db, share_instance, + self.migration_wait_access_rules_timeout) - utils.wait_for_access_update( - self.context, self.db, share_instance, - self.migration_wait_access_rules_timeout) - - def apply_new_access_rules(self, new_share_instance): + def apply_new_access_rules(self, new_share_instance, share_id): rules = self.db.share_instance_access_copy( - self.context, self.share['id'], new_share_instance['id']) + self.context, share_id, new_share_instance['id']) if rules: self.api.allow_access_to_instance(self.context, new_share_instance) diff --git a/manila/share/rpcapi.py b/manila/share/rpcapi.py index 401668bc4e..7d150d1b0b 100644 --- a/manila/share/rpcapi.py +++ b/manila/share/rpcapi.py @@ -77,6 +77,8 @@ class ShareAPI(object): 1.18 - Remove unused "share_id" parameter from revert_to_snapshot() 1.19 - Add manage_share_server() and unmanage_share_server() 1.20 - Add share_instance_id parameter for create_share_server() method + 1.21 - Add share_server_migration_start, share_server_migration_check() + and share_server_get_progress() """ BASE_RPC_API_VERSION = '1.0' @@ -85,7 +87,7 @@ class ShareAPI(object): super(ShareAPI, self).__init__() target = messaging.Target(topic=CONF.share_topic, version=self.BASE_RPC_API_VERSION) - self.client = rpc.get_client(target, version_cap='1.20') + self.client = rpc.get_client(target, version_cap='1.21') def create_share_instance(self, context, share_instance, host, request_spec, filter_properties, @@ -180,6 +182,64 @@ class ShareAPI(object): new_share_network_id=new_share_network_id, new_share_type_id=new_share_type_id) + def share_server_migration_start(self, context, share_server, dest_host, + writable, nondisruptive, + preserve_snapshots, new_share_network_id): + host = utils.extract_host(dest_host) + call_context = self.client.prepare(server=host, version='1.21') + call_context.cast( + context, + 'share_server_migration_start', + share_server_id=share_server['id'], + dest_host=dest_host, + writable=writable, + nondisruptive=nondisruptive, + preserve_snapshots=preserve_snapshots, + new_share_network_id=new_share_network_id) + + def share_server_migration_check(self, context, share_server_id, dest_host, + writable, nondisruptive, + preserve_snapshots, new_share_network_id): + host = utils.extract_host(dest_host) + call_context = self.client.prepare(server=host, version='1.21') + return call_context.call( + context, + 'share_server_migration_check', + share_server_id=share_server_id, + dest_host=dest_host, + writable=writable, + nondisruptive=nondisruptive, + preserve_snapshots=preserve_snapshots, + new_share_network_id=new_share_network_id) + + def share_server_migration_cancel(self, context, dest_host, share_server, + dest_share_server): + host = utils.extract_host(dest_host) + call_context = self.client.prepare(server=host, version='1.21') + call_context.cast( + context, + 'share_server_migration_cancel', + src_share_server_id=share_server['id'], + dest_share_server_id=dest_share_server['id']) + + def share_server_migration_get_progress(self, context, dest_host, + share_server, dest_share_server): + host = utils.extract_host(dest_host) + call_context = self.client.prepare(server=host, version='1.21') + return call_context.call(context, + 'share_server_migration_get_progress', + src_share_server_id=share_server['id'], + dest_share_server_id=dest_share_server['id']) + + def share_server_migration_complete(self, context, dest_host, + share_server, dest_share_server): + host = utils.extract_host(dest_host) + call_context = self.client.prepare(server=host, version='1.21') + call_context.cast(context, + 'share_server_migration_complete', + src_share_server_id=share_server['id'], + dest_share_server_id=dest_share_server['id']) + def connection_get_info(self, context, share_instance): new_host = utils.extract_host(share_instance['host']) call_context = self.client.prepare(server=new_host, version='1.12') @@ -234,6 +294,14 @@ class ShareAPI(object): call_context.cast(context, 'update_access', share_instance_id=share_instance['id']) + def update_access_for_instances(self, context, dest_host, + share_instance_ids, share_server_id=None): + host = utils.extract_host(dest_host) + call_context = self.client.prepare(server=host, version='1.21') + call_context.cast(context, 'update_access_for_instances', + share_instance_ids=share_instance_ids, + share_server_id=share_server_id) + def publish_service_capabilities(self, context): call_context = self.client.prepare(fanout=True, version='1.0') call_context.cast(context, 'publish_service_capabilities') diff --git a/manila/tests/api/v1/test_share_manage.py b/manila/tests/api/v1/test_share_manage.py index 68affd208e..de7d83496a 100644 --- a/manila/tests/api/v1/test_share_manage.py +++ b/manila/tests/api/v1/test_share_manage.py @@ -174,8 +174,7 @@ class ShareManageTest(test.TestCase): body = get_fake_manage_body() self._setup_manage_mocks() error = mock.Mock( - side_effect=exception.InvalidShareServer(message="", - share_server_id="") + side_effect=exception.InvalidShareServer(reason="") ) self.mock_object(share_api.API, 'manage', mock.Mock(side_effect=error)) diff --git a/manila/tests/api/v1/test_share_servers.py b/manila/tests/api/v1/test_share_servers.py index f4234d08e4..9c2096f5b5 100644 --- a/manila/tests/api/v1/test_share_servers.py +++ b/manila/tests/api/v1/test_share_servers.py @@ -40,6 +40,8 @@ fake_share_server_list = { 'project_id': 'fake_project_id', 'id': 'fake_server_id', 'is_auto_deletable': False, + 'task_state': None, + 'source_share_server_id': None, 'identifier': 'fake_id' }, { @@ -52,6 +54,8 @@ fake_share_server_list = { 'project_id': 'fake_project_id_2', 'id': 'fake_server_id_2', 'is_auto_deletable': True, + 'task_state': None, + 'source_share_server_id': None, 'identifier': 'fake_id_2' }, ] @@ -88,6 +92,8 @@ fake_share_server_get_result = { 'fake_key_2': 'fake_value_2', }, 'is_auto_deletable': False, + 'task_state': None, + 'source_share_server_id': None, 'identifier': 'fake_id' } } @@ -122,6 +128,8 @@ class FakeShareServer(object): self.project_id = 'fake_project_id' self.identifier = kwargs.get('identifier', 'fake_id') self.is_auto_deletable = kwargs.get('is_auto_deletable', False) + self.task_state = kwargs.get('task_state') + self.source_share_server_id = kwargs.get('source_share_server_id') self.backend_details = share_server_backend_details def __getitem__(self, item): @@ -138,6 +146,7 @@ def fake_share_server_get_all(): 'share_network_id': 'fake_sn_id_2', }, identifier='fake_id_2', + task_state=None, is_auto_deletable=True, status=constants.STATUS_ERROR) ] diff --git a/manila/tests/api/v2/test_share_servers.py b/manila/tests/api/v2/test_share_servers.py index 8118afb329..c068656c0a 100644 --- a/manila/tests/api/v2/test_share_servers.py +++ b/manila/tests/api/v2/test_share_servers.py @@ -20,6 +20,7 @@ import webob from manila.api.v2 import share_servers from manila.common import constants +from manila import context as ctx_api from manila.db import api as db_api from manila import exception from manila import policy @@ -456,3 +457,612 @@ class ShareServerControllerTest(test.TestCase): mock_unmanage.assert_called_once_with(context, server, force=True) policy.check_policy.assert_called_once_with( context, self.resource_name, 'unmanage_share_server') + + def _get_server_migration_request(self, server_id): + req = fakes.HTTPRequest.blank( + '/share-servers/%s/action' % server_id, + use_admin_context=True, version='2.57') + req.method = 'POST' + req.headers['content-type'] = 'application/json' + req.api_version_request.experimental = True + return req + + def test_share_server_migration_start(self): + server = db_utils.create_share_server(id='fake_server_id', + status=constants.STATUS_ACTIVE) + share_network = db_utils.create_share_network() + req = self._get_server_migration_request(server['id']) + context = req.environ['manila.context'] + + self.mock_object(db_api, 'share_network_get', mock.Mock( + return_value=share_network)) + self.mock_object(db_api, 'share_server_get', + mock.Mock(return_value=server)) + self.mock_object(share_api.API, 'share_server_migration_start') + + body = { + 'migration_start': { + 'host': 'fake_host', + 'preserve_snapshots': True, + 'writable': True, + 'nondisruptive': True, + 'new_share_network_id': 'fake_net_id', + } + } + + self.controller.share_server_migration_start(req, server['id'], body) + + db_api.share_server_get.assert_called_once_with( + context, server['id']) + share_api.API.share_server_migration_start.assert_called_once_with( + context, server, 'fake_host', True, True, True, + new_share_network=share_network) + db_api.share_network_get.assert_called_once_with( + context, 'fake_net_id') + + @ddt.data({'api_exception': exception.ServiceIsDown(service='fake_srv'), + 'expected_exception': webob.exc.HTTPBadRequest}, + {'api_exception': exception.InvalidShareServer(reason=""), + 'expected_exception': webob.exc.HTTPConflict}) + @ddt.unpack + def test_share_server_migration_start_conflict(self, api_exception, + expected_exception): + server = db_utils.create_share_server( + id='fake_server_id', status=constants.STATUS_ACTIVE) + req = self._get_server_migration_request(server['id']) + context = req.environ['manila.context'] + body = { + 'migration_start': { + 'host': 'fake_host', + 'preserve_snapshots': True, + 'writable': True, + 'nondisruptive': True, + } + } + self.mock_object(share_api.API, 'share_server_migration_start', + mock.Mock(side_effect=api_exception)) + self.mock_object(db_api, 'share_server_get', + mock.Mock(return_value=server)) + + self.assertRaises(expected_exception, + self.controller.share_server_migration_start, + req, server['id'], body) + + db_api.share_server_get.assert_called_once_with(context, + server['id']) + migration_start_params = body['migration_start'] + share_api.API.share_server_migration_start.assert_called_once_with( + context, server, migration_start_params['host'], + migration_start_params['writable'], + migration_start_params['nondisruptive'], + migration_start_params['preserve_snapshots'], + new_share_network=None) + + @ddt.data('host', 'body') + def test_share_server_migration_start_missing_mandatory(self, param): + server = db_utils.create_share_server( + id='fake_server_id', status=constants.STATUS_ACTIVE) + req = self._get_server_migration_request(server['id']) + context = req.environ['manila.context'] + + body = { + 'migration_start': { + 'host': 'fake_host', + 'preserve_metadata': True, + 'preserve_snapshots': True, + 'writable': True, + 'nondisruptive': True, + } + } + + if param == 'body': + body.pop('migration_start') + else: + body['migration_start'].pop(param) + + method = 'share_server_migration_start' + + self.mock_object(share_api.API, method) + self.mock_object(db_api, 'share_server_get', + mock.Mock(return_value=server)) + + self.assertRaises(webob.exc.HTTPBadRequest, + getattr(self.controller, method), + req, server['id'], body) + db_api.share_server_get.assert_called_once_with(context, + server['id']) + + @ddt.data('nondisruptive', 'writable', 'preserve_snapshots') + def test_share_server_migration_start_non_boolean(self, param): + server = db_utils.create_share_server( + id='fake_server_id', status=constants.STATUS_ACTIVE) + req = self._get_server_migration_request(server['id']) + context = req.environ['manila.context'] + + body = { + 'migration_start': { + 'host': 'fake_host', + 'preserve_snapshots': True, + 'writable': True, + 'nondisruptive': True, + } + } + + body['migration_start'][param] = None + + method = 'share_server_migration_start' + + self.mock_object(share_api.API, method) + self.mock_object(db_api, 'share_server_get', + mock.Mock(return_value=server)) + + self.assertRaises(webob.exc.HTTPBadRequest, + getattr(self.controller, method), + req, server['id'], body) + db_api.share_server_get.assert_called_once_with(context, + server['id']) + + def test_share_server_migration_start_share_server_not_found(self): + fake_id = 'fake_server_id' + req = self._get_server_migration_request(fake_id) + context = req.environ['manila.context'] + + body = {'migration_start': {'host': 'fake_host'}} + + self.mock_object(db_api, 'share_server_get', + mock.Mock(side_effect=exception.ShareServerNotFound( + share_server_id=fake_id))) + + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.share_server_migration_start, + req, fake_id, body) + db_api.share_server_get.assert_called_once_with(context, + fake_id) + + def test_share_server_migration_start_new_share_network_not_found(self): + server = db_utils.create_share_server( + id='fake_server_id', status=constants.STATUS_ACTIVE) + req = self._get_server_migration_request(server['id']) + context = req.environ['manila.context'] + + body = { + 'migration_start': { + 'host': 'fake_host', + 'preserve_metadata': True, + 'preserve_snapshots': True, + 'writable': True, + 'nondisruptive': True, + 'new_share_network_id': 'nonexistent'}} + + self.mock_object(db_api, 'share_network_get', + mock.Mock(side_effect=exception.NotFound())) + self.mock_object(db_api, 'share_server_get', + mock.Mock(return_value=server)) + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.share_server_migration_start, + req, server['id'], body) + db_api.share_network_get.assert_called_once_with(context, + 'nonexistent') + db_api.share_server_get.assert_called_once_with(context, + server['id']) + + def test_share_server_migration_start_host_with_pool(self): + server = db_utils.create_share_server(id='fake_server_id', + status=constants.STATUS_ACTIVE) + req = self._get_server_migration_request(server['id']) + + body = { + 'migration_start': { + 'host': 'fake_host@fakebackend#pool', + 'preserve_snapshots': True, + 'writable': True, + 'nondisruptive': True, + 'new_share_network_id': 'fake_net_id', + } + } + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.share_server_migration_start, + req, server['id'], body) + + def test_share_server_migration_check_host_with_pool(self): + server = db_utils.create_share_server(id='fake_server_id', + status=constants.STATUS_ACTIVE) + req = self._get_server_migration_request(server['id']) + + body = { + 'migration_start': { + 'host': 'fake_host@fakebackend#pool', + 'preserve_snapshots': True, + 'writable': True, + 'nondisruptive': True, + 'new_share_network_id': 'fake_net_id', + } + } + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.share_server_migration_check, + req, server['id'], body) + + @ddt.data(constants.TASK_STATE_MIGRATION_ERROR, None) + def test_reset_task_state(self, task_state): + server = db_utils.create_share_server( + id='fake_server_id', status=constants.STATUS_ACTIVE) + req = self._get_server_migration_request(server['id']) + + update = {'task_state': task_state} + body = {'reset_task_state': update} + + self.mock_object(db_api, 'share_server_update') + + response = self.controller.share_server_reset_task_state( + req, server['id'], body) + + self.assertEqual(202, response.status_int) + + db_api.share_server_update.assert_called_once_with(utils.IsAMatcher( + ctx_api.RequestContext), server['id'], update) + + def test_reset_task_state_error_body(self): + server = db_utils.create_share_server( + id='fake_server_id', status=constants.STATUS_ACTIVE) + req = self._get_server_migration_request(server['id']) + + update = {'error': 'error'} + body = {'reset_task_state': update} + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.share_server_reset_task_state, + req, server['id'], body) + + def test_reset_task_state_error_invalid(self): + server = db_utils.create_share_server( + id='fake_server_id', status=constants.STATUS_ACTIVE) + req = self._get_server_migration_request(server['id']) + + update = {'task_state': 'error'} + body = {'reset_task_state': update} + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.share_server_reset_task_state, + req, server['id'], body) + + def test_reset_task_state_not_found(self): + server = db_utils.create_share_server( + id='fake_server_id', status=constants.STATUS_ACTIVE) + req = self._get_server_migration_request(server['id']) + + update = {'task_state': constants.TASK_STATE_MIGRATION_ERROR} + body = {'reset_task_state': update} + + self.mock_object(db_api, 'share_server_update', + mock.Mock(side_effect=exception.ShareServerNotFound( + share_server_id='fake_server_id'))) + + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.share_server_reset_task_state, + req, server['id'], body) + + db_api.share_server_update.assert_called_once_with(utils.IsAMatcher( + ctx_api.RequestContext), server['id'], update) + + def test_share_server_migration_complete(self): + server = db_utils.create_share_server( + id='fake_server_id', status=constants.STATUS_ACTIVE) + req = self._get_server_migration_request(server['id']) + context = req.environ['manila.context'] + + body = {'migration_complete': None} + api_return = { + 'destination_share_server_id': 'fake_destination_id' + } + + self.mock_object(share_api.API, 'share_server_migration_complete', + mock.Mock(return_value=api_return)) + self.mock_object(db_api, 'share_server_get', + mock.Mock(return_value=server)) + + result = self.controller.share_server_migration_complete( + req, server['id'], body) + + self.assertEqual(api_return, result) + share_api.API.share_server_migration_complete.assert_called_once_with( + utils.IsAMatcher(ctx_api.RequestContext), server) + db_api.share_server_get.assert_called_once_with(context, + server['id']) + + def test_share_server_migration_complete_not_found(self): + fake_id = 'fake_server_id' + req = self._get_server_migration_request(fake_id) + context = req.environ['manila.context'] + + body = {'migration_complete': None} + + self.mock_object(db_api, 'share_server_get', + mock.Mock(side_effect=exception.ShareServerNotFound( + share_server_id=fake_id))) + self.mock_object(share_api.API, 'share_server_migration_complete') + + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.share_server_migration_complete, + req, fake_id, body) + db_api.share_server_get.assert_called_once_with(context, + fake_id) + + @ddt.data({'api_exception': exception.ServiceIsDown(service='fake_srv'), + 'expected_exception': webob.exc.HTTPBadRequest}, + {'api_exception': exception.InvalidShareServer(reason=""), + 'expected_exception': webob.exc.HTTPBadRequest}) + @ddt.unpack + def test_share_server_migration_complete_exceptions(self, api_exception, + expected_exception): + fake_id = 'fake_server_id' + req = self._get_server_migration_request(fake_id) + context = req.environ['manila.context'] + body = {'migration_complete': None} + self.mock_object(db_api, 'share_server_get', + mock.Mock(return_value='fake_share_server')) + self.mock_object(share_api.API, 'share_server_migration_complete', + mock.Mock(side_effect=api_exception)) + + self.assertRaises(expected_exception, + self.controller.share_server_migration_complete, + req, fake_id, body) + + db_api.share_server_get.assert_called_once_with(context, + fake_id) + share_api.API.share_server_migration_complete.assert_called_once_with( + context, 'fake_share_server') + + def test_share_server_migration_cancel(self): + server = db_utils.create_share_server( + id='fake_server_id', status=constants.STATUS_ACTIVE) + req = self._get_server_migration_request(server['id']) + context = req.environ['manila.context'] + + body = {'migration_cancel': None} + + self.mock_object(db_api, 'share_server_get', + mock.Mock(return_value=server)) + self.mock_object(share_api.API, 'share_server_migration_cancel') + + self.controller.share_server_migration_cancel( + req, server['id'], body) + + share_api.API.share_server_migration_cancel.assert_called_once_with( + utils.IsAMatcher(ctx_api.RequestContext), server) + db_api.share_server_get.assert_called_once_with(context, + server['id']) + + def test_share_server_migration_cancel_not_found(self): + fake_id = 'fake_server_id' + req = self._get_server_migration_request(fake_id) + context = req.environ['manila.context'] + + body = {'migration_cancel': None} + + self.mock_object(db_api, 'share_server_get', + mock.Mock(side_effect=exception.ShareServerNotFound( + share_server_id=fake_id))) + self.mock_object(share_api.API, 'share_server_migration_cancel') + + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.share_server_migration_cancel, + req, fake_id, body) + db_api.share_server_get.assert_called_once_with(context, + fake_id) + + @ddt.data({'api_exception': exception.ServiceIsDown(service='fake_srv'), + 'expected_exception': webob.exc.HTTPBadRequest}, + {'api_exception': exception.InvalidShareServer(reason=""), + 'expected_exception': webob.exc.HTTPBadRequest}) + @ddt.unpack + def test_share_server_migration_cancel_exceptions(self, api_exception, + expected_exception): + fake_id = 'fake_server_id' + req = self._get_server_migration_request(fake_id) + context = req.environ['manila.context'] + body = {'migration_complete': None} + self.mock_object(db_api, 'share_server_get', + mock.Mock(return_value='fake_share_server')) + self.mock_object(share_api.API, 'share_server_migration_cancel', + mock.Mock(side_effect=api_exception)) + + self.assertRaises(expected_exception, + self.controller.share_server_migration_cancel, + req, fake_id, body) + + db_api.share_server_get.assert_called_once_with(context, + fake_id) + share_api.API.share_server_migration_cancel.assert_called_once_with( + context, 'fake_share_server') + + def test_share_server_migration_get_progress(self): + server = db_utils.create_share_server( + id='fake_server_id', + status=constants.STATUS_ACTIVE, + task_state=constants.TASK_STATE_MIGRATION_SUCCESS) + req = self._get_server_migration_request(server['id']) + + body = {'migration_get_progress': None} + expected = { + 'total_progress': 'fake', + 'task_state': constants.TASK_STATE_MIGRATION_SUCCESS, + 'destination_share_server_id': 'fake_destination_server_id' + } + + self.mock_object(share_api.API, 'share_server_migration_get_progress', + mock.Mock(return_value=expected)) + + response = self.controller.share_server_migration_get_progress( + req, server['id'], body) + self.assertEqual(expected, response) + (share_api.API.share_server_migration_get_progress. + assert_called_once_with(utils.IsAMatcher(ctx_api.RequestContext), + server['id'])) + + @ddt.data({'api_exception': exception.ServiceIsDown(service='fake_srv'), + 'expected_exception': webob.exc.HTTPConflict}, + {'api_exception': exception.InvalidShareServer(reason=""), + 'expected_exception': webob.exc.HTTPBadRequest}) + @ddt.unpack + def test_share_server_migration_get_progress_exceptions( + self, api_exception, expected_exception): + fake_id = 'fake_server_id' + req = self._get_server_migration_request(fake_id) + context = req.environ['manila.context'] + body = {'migration_complete': None} + self.mock_object(db_api, 'share_server_get', + mock.Mock(return_value='fake_share_server')) + mock_get_progress = self.mock_object( + share_api.API, 'share_server_migration_get_progress', + mock.Mock(side_effect=api_exception)) + + self.assertRaises(expected_exception, + self.controller.share_server_migration_get_progress, + req, fake_id, body) + + mock_get_progress.assert_called_once_with(context, fake_id) + + def test_share_server_migration_check(self): + fake_id = 'fake_server_id' + fake_share_server = db_utils.create_share_server(id=fake_id) + fake_share_network = db_utils.create_share_network() + req = self._get_server_migration_request(fake_id) + context = req.environ['manila.context'] + requested_writable = False + requested_nondisruptive = False + requested_preserve_snapshots = False + fake_host = 'fakehost@fakebackend' + body = { + 'migration_check': { + 'writable': requested_writable, + 'nondisruptive': requested_nondisruptive, + 'preserve_snapshots': requested_preserve_snapshots, + 'new_share_network_id': fake_share_network['id'], + 'host': fake_host + } + } + driver_result = { + 'compatible': False, + 'writable': False, + 'nondisruptive': True, + 'preserve_snapshots': False, + 'share_network_id': 'fake_network_uuid', + 'migration_cancel': False, + 'migration_get_progress': False, + } + + mock_server_get = self.mock_object( + db_api, 'share_server_get', + mock.Mock(return_value=fake_share_server)) + mock_network_get = self.mock_object( + db_api, 'share_network_get', + mock.Mock(return_value=fake_share_network)) + mock_migration_check = self.mock_object( + share_api.API, 'share_server_migration_check', + mock.Mock(return_value=driver_result)) + + result = self.controller.share_server_migration_check( + req, fake_id, body) + + expected_result_keys = ['compatible', 'requested_capabilities', + 'supported_capabilities'] + [self.assertIn(key, result) for key in expected_result_keys] + mock_server_get.assert_called_once_with( + context, fake_share_server['id']) + mock_network_get.assert_called_once_with( + context, fake_share_network['id']) + mock_migration_check.assert_called_once_with( + context, fake_share_server, fake_host, requested_writable, + requested_nondisruptive, requested_preserve_snapshots, + new_share_network=fake_share_network) + + @ddt.data( + (webob.exc.HTTPNotFound, True, False, {'migration_check': {}}), + (webob.exc.HTTPBadRequest, False, True, + {'migration_check': {'new_share_network_id': 'fake_id'}}), + (webob.exc.HTTPBadRequest, False, False, None) + ) + @ddt.unpack + def test_share_server_migration_check_exception( + self, exception_to_raise, raise_server_get_exception, + raise_network_get_action, body): + req = self._get_server_migration_request('fake_id') + context = req.environ['manila.context'] + if body: + body['migration_check']['writable'] = False + body['migration_check']['nondisruptive'] = False + body['migration_check']['preserve_snapshots'] = False + body['migration_check']['host'] = 'fakehost@fakebackend' + else: + body = {} + + server_get = mock.Mock() + network_get = mock.Mock() + if raise_server_get_exception: + server_get = mock.Mock( + side_effect=exception.ShareServerNotFound( + share_server_id='fake')) + if raise_network_get_action: + network_get = mock.Mock( + side_effect=exception.ShareNetworkNotFound( + share_network_id='fake')) + + mock_server_get = self.mock_object( + db_api, 'share_server_get', server_get) + + mock_network_get = self.mock_object( + db_api, 'share_network_get', network_get) + + self.assertRaises( + exception_to_raise, + self.controller.share_server_migration_check, + req, 'fake_id', body + ) + mock_server_get.assert_called_once_with( + context, 'fake_id') + if raise_network_get_action: + mock_network_get.assert_called_once_with(context, 'fake_id') + + @ddt.data( + {'api_exception': exception.ServiceIsDown(service='fake_srv'), + 'expected_exception': webob.exc.HTTPBadRequest}, + {'api_exception': exception.InvalidShareServer(reason=""), + 'expected_exception': webob.exc.HTTPConflict}) + @ddt.unpack + def test_share_server_migration_complete_exceptions_from_api( + self, api_exception, expected_exception): + req = self._get_server_migration_request('fake_id') + context = req.environ['manila.context'] + body = { + 'migration_check': { + 'writable': False, + 'nondisruptive': False, + 'preserve_snapshots': True, + 'host': 'fakehost@fakebackend', + } + } + + self.mock_object(db_api, 'share_server_get', + mock.Mock(return_value='fake_share_server')) + + self.mock_object(share_api.API, 'share_server_migration_check', + mock.Mock(side_effect=api_exception)) + + self.assertRaises( + expected_exception, + self.controller.share_server_migration_check, + req, 'fake_id', body + ) + + db_api.share_server_get.assert_called_once_with(context, + 'fake_id') + migration_check_params = body['migration_check'] + share_api.API.share_server_migration_check.assert_called_once_with( + context, 'fake_share_server', migration_check_params['host'], + migration_check_params['writable'], + migration_check_params['nondisruptive'], + migration_check_params['preserve_snapshots'], + new_share_network=None) diff --git a/manila/tests/db/migrations/alembic/migrations_data_checks.py b/manila/tests/db/migrations/alembic/migrations_data_checks.py index 9e53f441e7..ebe79f841d 100644 --- a/manila/tests/db/migrations/alembic/migrations_data_checks.py +++ b/manila/tests/db/migrations/alembic/migrations_data_checks.py @@ -2946,3 +2946,29 @@ class ShareInstanceProgressFieldChecks(BaseMigrationChecks): for si_record in engine.execute(si_table.select()): self.test_case.assertFalse(hasattr(si_record, self.progress_field_name)) + + +@map_to_migration('5aa813ae673d') +class ShareServerTaskState(BaseMigrationChecks): + + def setup_upgrade_data(self, engine): + # Create share server + share_server_data = { + 'id': uuidutils.generate_uuid(), + 'host': 'fake_host', + 'status': 'active', + } + ss_table = utils.load_table('share_servers', engine) + engine.execute(ss_table.insert(share_server_data)) + + def check_upgrade(self, engine, data): + ss_table = utils.load_table('share_servers', engine) + for ss in engine.execute(ss_table.select()): + self.test_case.assertTrue(hasattr(ss, 'task_state')) + self.test_case.assertTrue(hasattr(ss, 'source_share_server_id')) + + def check_downgrade(self, engine): + ss_table = utils.load_table('share_servers', engine) + for ss in engine.execute(ss_table.select()): + self.test_case.assertFalse(hasattr(ss, 'task_state')) + self.test_case.assertFalse(hasattr(ss, 'source_share_server_id')) diff --git a/manila/tests/db/sqlalchemy/test_api.py b/manila/tests/db/sqlalchemy/test_api.py index 2ea768743b..20bae61647 100644 --- a/manila/tests/db/sqlalchemy/test_api.py +++ b/manila/tests/db/sqlalchemy/test_api.py @@ -507,6 +507,23 @@ class ShareDatabaseAPITestCase(test.TestCase): self.assertEqual('share-%s' % instance['id'], instance['name']) + def test_share_instance_get_all_by_ids(self): + fake_share = db_utils.create_share() + expected_share_instance = db_utils.create_share_instance( + share_id=fake_share['id']) + + # Populate the db with a dummy share + db_utils.create_share_instance(share_id=fake_share['id']) + + instances = db_api.share_instances_get_all( + self.ctxt, + filters={'instance_ids': [expected_share_instance['id']]}) + + self.assertEqual(1, len(instances)) + instance = instances[0] + + self.assertEqual('share-%s' % instance['id'], instance['name']) + @ddt.data('host', 'share_group_id') def test_share_get_all_sort_by_share_instance_fields(self, sort_key): shares = [db_utils.create_share(**{sort_key: n, 'size': 1}) @@ -1699,6 +1716,33 @@ class ShareSnapshotDatabaseAPITestCase(test.TestCase): self.assertEqual(self.snapshot_instance_export_locations[0][key], out[0][key]) + def test_share_snapshot_instance_export_locations_update(self): + snapshot = db_utils.create_snapshot(with_share=True) + initial_locations = ['fake1/1/', 'fake2/2', 'fake3/3'] + update_locations = ['fake4/4', 'fake2/2', 'fake3/3'] + + # add initial locations + db_api.share_snapshot_instance_export_locations_update( + self.ctxt, snapshot.instance['id'], initial_locations, False) + # update locations + db_api.share_snapshot_instance_export_locations_update( + self.ctxt, snapshot.instance['id'], update_locations, True) + + get_result = db_api.share_snapshot_instance_export_locations_get_all( + self.ctxt, snapshot.instance['id']) + result_locations = [el['path'] for el in get_result] + + self.assertEqual(sorted(result_locations), sorted(update_locations)) + + def test_share_snapshot_instance_export_locations_update_wrong_type(self): + snapshot = db_utils.create_snapshot(with_share=True) + new_export_locations = [1] + + self.assertRaises( + exception.ManilaException, + db_api.share_snapshot_instance_export_locations_update, + self.ctxt, snapshot.instance['id'], new_export_locations, False) + class ShareExportLocationsDatabaseAPITestCase(test.TestCase): @@ -3024,6 +3068,32 @@ class ShareServerDatabaseAPITestCase(test.TestCase): self.ctxt, host, updated_before) self.assertEqual(expected_len, len(unused_deletable)) + @ddt.data({'host': 'fakepool@fakehost'}, + {'status': constants.STATUS_SERVER_MIGRATING_TO}, + {'source_share_server_id': 'fake_ss_id'}) + def test_share_server_get_all_with_filters(self, filters): + db_utils.create_share_server(**filters) + db_utils.create_share_server() + filter_keys = filters.keys() + + results = db_api.share_server_get_all_with_filters(self.ctxt, filters) + + self.assertEqual(1, len(results)) + for result in results: + for key in filter_keys: + self.assertEqual(result[key], filters[key]) + + @ddt.data('fake@fake', 'host1@backend1') + def test_share_server_get_all_by_host(self, host): + db_utils.create_share_server(host='fake@fake') + db_utils.create_share_server(host='host1@backend1') + + share_servers = db_api.share_server_get_all_by_host(self.ctxt, host) + + self.assertEqual(1, len(share_servers)) + for share_server in share_servers: + self.assertEqual(host, share_server['host']) + class ServiceDatabaseAPITestCase(test.TestCase): @@ -4018,3 +4088,127 @@ class ShareInstancesTestCase(test.TestCase): si['host'].startswith(new_host)] self.assertEqual(actual_updates, expected_updates) self.assertEqual(expected_updates, len(host_updates)) + + def test_share_instances_status_update(self): + for i in range(1, 3): + instances = [db_utils.create_share_instance( + status=constants.STATUS_SERVER_MIGRATING, share_id='fake')] + share_instance_ids = [instance['id'] for instance in instances] + values = {'status': constants.STATUS_AVAILABLE} + + db_api.share_instances_status_update( + self.context, share_instance_ids, values) + + instances = [ + db_api.share_instance_get(self.context, instance_id) + for instance_id in share_instance_ids] + + for instance in instances: + self.assertEqual(constants.STATUS_AVAILABLE, instance['status']) + + def test_share_snapshot_instances_status_update(self): + share_instance = db_utils.create_share_instance( + status=constants.STATUS_AVAILABLE, share_id='fake') + for i in range(1, 3): + instances = [db_utils.create_snapshot_instance( + 'fake_snapshot_id_1', status=constants.STATUS_CREATING, + share_instance_id=share_instance['id'])] + + snapshot_instance_ids = [instance['id'] for instance in instances] + values = {'status': constants.STATUS_AVAILABLE} + + db_api.share_snapshot_instances_status_update( + self.context, snapshot_instance_ids, values) + + instances = [ + db_api.share_snapshot_instance_get(self.context, instance_id) + for instance_id in snapshot_instance_ids] + + for instance in instances: + self.assertEqual(constants.STATUS_AVAILABLE, instance['status']) + + def test_share_and_snapshot_instances_status_update(self): + share_instance = db_utils.create_share_instance( + status=constants.STATUS_AVAILABLE, share_id='fake') + share_instance_ids = [share_instance['id']] + fake_session = db_api.get_session() + for i in range(1, 3): + snap_instances = [db_utils.create_snapshot_instance( + 'fake_snapshot_id_1', status=constants.STATUS_CREATING, + share_instance_id=share_instance['id'])] + + snapshot_instance_ids = [instance['id'] for instance in snap_instances] + values = {'status': constants.STATUS_AVAILABLE} + + mock_update_share_instances = self.mock_object( + db_api, 'share_instances_status_update', + mock.Mock(return_value=[share_instance])) + mock_update_snap_instances = self.mock_object( + db_api, 'share_snapshot_instances_status_update', + mock.Mock(return_value=snap_instances)) + mock_get_session = self.mock_object( + db_api, 'get_session', mock.Mock(return_value=fake_session)) + + updated_share_instances, updated_snap_instances = ( + db_api.share_and_snapshot_instances_status_update( + self.context, values, share_instance_ids=share_instance_ids, + snapshot_instance_ids=snapshot_instance_ids)) + + mock_get_session.assert_called() + mock_update_share_instances.assert_called_once_with( + self.context, share_instance_ids, values, session=fake_session) + mock_update_snap_instances.assert_called_once_with( + self.context, snapshot_instance_ids, values, session=fake_session) + self.assertEqual(updated_share_instances, [share_instance]) + self.assertEqual(updated_snap_instances, snap_instances) + + @ddt.data( + { + 'share_instance_status': constants.STATUS_ERROR, + 'snap_instance_status': constants.STATUS_AVAILABLE, + 'expected_exc': exception.InvalidShareInstance + }, + { + 'share_instance_status': constants.STATUS_AVAILABLE, + 'snap_instance_status': constants.STATUS_ERROR, + 'expected_exc': exception.InvalidShareSnapshotInstance + } + ) + @ddt.unpack + def test_share_and_snapshot_instances_status_update_invalid_status( + self, share_instance_status, snap_instance_status, expected_exc): + share_instance = db_utils.create_share_instance( + status=share_instance_status, share_id='fake') + share_snapshot_instance = db_utils.create_snapshot_instance( + 'fake_snapshot_id_1', status=snap_instance_status, + share_instance_id=share_instance['id']) + share_instance_ids = [share_instance['id']] + snap_instance_ids = [share_snapshot_instance['id']] + values = {'status': constants.STATUS_AVAILABLE} + fake_session = db_api.get_session() + + mock_get_session = self.mock_object( + db_api, 'get_session', mock.Mock(return_value=fake_session)) + mock_instances_get_all = self.mock_object( + db_api, 'share_instances_get_all', + mock.Mock(return_value=[share_instance])) + mock_snap_instances_get_all = self.mock_object( + db_api, 'share_snapshot_instance_get_all_with_filters', + mock.Mock(return_value=[share_snapshot_instance])) + + self.assertRaises(expected_exc, + db_api.share_and_snapshot_instances_status_update, + self.context, + values, + share_instance_ids=share_instance_ids, + snapshot_instance_ids=snap_instance_ids, + current_expected_status=constants.STATUS_AVAILABLE) + + mock_get_session.assert_called() + mock_instances_get_all.assert_called_once_with( + self.context, filters={'instance_ids': share_instance_ids}, + session=fake_session) + if snap_instance_status == constants.STATUS_ERROR: + mock_snap_instances_get_all.assert_called_once_with( + self.context, {'instance_ids': snap_instance_ids}, + session=fake_session) diff --git a/manila/tests/fake_share.py b/manila/tests/fake_share.py index d442df54cc..6277e05e41 100644 --- a/manila/tests/fake_share.py +++ b/manila/tests/fake_share.py @@ -42,6 +42,7 @@ def fake_share(**kwargs): 'id': 'fake_share_instance_id', 'host': 'fakehost', 'share_type_id': '1', + 'share_network_id': 'fake share network id', }, 'mount_snapshot_support': False, } diff --git a/manila/tests/share/test_api.py b/manila/tests/share/test_api.py index e86beb3538..847ac7f483 100644 --- a/manila/tests/share/test_api.py +++ b/manila/tests/share/test_api.py @@ -4413,6 +4413,1106 @@ class ShareAPITestCase(test.TestCase): self.api.share_rpcapi.migration_get_progress.assert_called_once_with( self.context, instance1, instance2['id']) + @ddt.data(True, False) + def test__migration_initial_checks(self, create_share_network): + type_data = { + 'extra_specs': { + 'availability_zones': 'fake_az1,fake_az2' + } + } + fake_server_host = 'fake@backend' + fake_share_server = db_utils.create_share_server(host=fake_server_host) + share_type = db_utils.create_share_type(**type_data) + share_type = db_api.share_type_get(self.context, share_type['id']) + fake_share = db_utils.create_share( + host='fake@backend#pool', status=constants.STATUS_AVAILABLE, + share_type_id=share_type['id']) + fake_az = { + 'id': 'fake_az_id', + 'name': 'fake_az1' + } + fake_share_network = ( + db_utils.create_share_network() if create_share_network else None) + fake_share_network_id = ( + fake_share_network['id'] + if create_share_network else fake_share['share_network_id']) + fake_subnet = db_utils.create_share_network_subnet( + availability_zone_id=fake_az['id']) + + fake_host = 'test@fake' + service = {'availability_zone_id': fake_az['id'], + 'availability_zone': {'name': fake_az['name']}} + + mock_shares_get_all = self.mock_object( + db_api, 'share_get_all_by_share_server', + mock.Mock(return_value=[fake_share])) + mock_get_type = self.mock_object( + share_types, 'get_share_type', mock.Mock(return_value=share_type)) + mock_validate_service = self.mock_object( + utils, 'validate_service_host') + mock_service_get = self.mock_object( + db_api, 'service_get_by_args', mock.Mock(return_value=service)) + mock_az_get = self.mock_object( + db_api, 'availability_zone_get', mock.Mock(return_value=fake_az)) + mock_get_subnet = self.mock_object( + db_api, 'share_network_subnet_get_by_availability_zone_id', + mock.Mock(return_value=fake_subnet)) + + exp_shares, exp_types, exp_service, exp_share_network_id = ( + self.api._migration_initial_checks( + self.context, fake_share_server, fake_host, + fake_share_network)) + + self.assertEqual(exp_shares, [fake_share]) + self.assertEqual(exp_types, [share_type]) + self.assertEqual(exp_service, service) + self.assertEqual(exp_share_network_id, fake_share_network_id) + mock_shares_get_all.assert_has_calls([ + mock.call(self.context, fake_share_server['id']), + mock.call(self.context, fake_share_server['id'])]) + mock_get_type.assert_called_once_with(self.context, share_type['id']) + mock_validate_service.assert_called_once_with(self.context, fake_host) + mock_service_get.assert_called_once_with( + self.context, fake_host, 'manila-share') + mock_get_subnet.assert_called_once_with( + self.context, fake_share_network_id, fake_az['id']) + mock_az_get.assert_called_once_with( + self.context, service['availability_zone']['name'] + ) + + def test_share_server_migration_get_destination(self): + fake_source_server_id = 'fake_source_id' + server_data = { + 'id': 'fake', + 'source_share_server_id': fake_source_server_id, + 'status': constants.STATUS_SERVER_MIGRATING_TO, + } + server = db_utils.create_share_server(**server_data) + mock_get_all = self.mock_object( + db_api, 'share_server_get_all_with_filters', + mock.Mock(return_value=[server])) + filters = { + 'status': constants.STATUS_SERVER_MIGRATING_TO, + 'source_share_server_id': fake_source_server_id, + } + + filtered_server = self.api.share_server_migration_get_destination( + self.context, fake_source_server_id, + status=constants.STATUS_SERVER_MIGRATING_TO + ) + self.assertEqual(filtered_server['id'], server['id']) + mock_get_all.assert_called_once_with(self.context, filters=filters) + + def test_share_server_migration_get_destination_no_share_server(self): + fake_source_server_id = 'fake_source_id' + server_data = { + 'id': 'fake', + 'source_share_server_id': fake_source_server_id, + 'status': constants.STATUS_SERVER_MIGRATING_TO, + } + db_utils.create_share_server(**server_data) + mock_get_all = self.mock_object( + db_api, 'share_server_get_all_with_filters', + mock.Mock(return_value=[])) + filters = { + 'status': constants.STATUS_SERVER_MIGRATING_TO, + 'source_share_server_id': fake_source_server_id, + } + + self.assertRaises( + exception.InvalidShareServer, + self.api.share_server_migration_get_destination, + self.context, fake_source_server_id, + status=constants.STATUS_SERVER_MIGRATING_TO + ) + mock_get_all.assert_called_once_with(self.context, filters=filters) + + def test_share_server_migration_get_destination_multiple_servers(self): + fake_source_server_id = 'fake_source_id' + server_data = { + 'id': 'fake', + 'source_share_server_id': fake_source_server_id, + 'status': constants.STATUS_SERVER_MIGRATING_TO, + } + server_1 = db_utils.create_share_server(**server_data) + server_data['id'] = 'fake_id_2' + server_2 = db_utils.create_share_server(**server_data) + mock_get_all = self.mock_object( + db_api, 'share_server_get_all_with_filters', + mock.Mock(return_value=[server_1, server_2])) + filters = { + 'status': constants.STATUS_SERVER_MIGRATING_TO, + 'source_share_server_id': fake_source_server_id, + } + + self.assertRaises( + exception.InvalidShareServer, + self.api.share_server_migration_get_destination, + self.context, fake_source_server_id, + status=constants.STATUS_SERVER_MIGRATING_TO + ) + mock_get_all.assert_called_once_with(self.context, filters=filters) + + def test__migration_initial_checks_no_shares(self): + fake_share_server = fakes.fake_share_server_get() + fake_share_network = {} + fake_host = 'test@fake' + mock_shares_get_all = self.mock_object( + db_api, 'share_get_all_by_share_server', + mock.Mock(return_value=[])) + + self.assertRaises( + exception.InvalidShareServer, + self.api._migration_initial_checks, + self.context, fake_share_server, fake_host, fake_share_network, + ) + mock_shares_get_all.assert_called_once_with( + self.context, fake_share_server['id']) + + def test__migration_initial_checks_server_not_active(self): + fake_share_server = fakes.fake_share_server_get() + fake_share_server['status'] = 'error' + fake_share = fakes.fake_share() + fake_share_network = {} + fake_host = 'test@fake' + + mock_shares_get_all = self.mock_object( + db_api, 'share_get_all_by_share_server', + mock.Mock(return_value=[fake_share])) + + self.assertRaises( + exception.InvalidShareServer, + self.api._migration_initial_checks, + self.context, fake_share_server, fake_host, fake_share_network, + ) + mock_shares_get_all.assert_called_once_with( + self.context, fake_share_server['id']) + + def test__migration_initial_checks_share_group_related_to_server(self): + fake_share_server = db_utils.create_share_server() + fake_share = db_utils.create_share() + fake_share_group = db_utils.create_share_group() + fake_share_network = {} + fake_host = 'test@fake' + + mock_shares_get_all = self.mock_object( + db_api, 'share_get_all_by_share_server', + mock.Mock(return_value=[fake_share])) + mock_get_groups = self.mock_object( + db_api, 'share_group_get_all_by_share_server', + mock.Mock(return_value=[fake_share_group])) + + self.assertRaises( + exception.InvalidShareServer, + self.api._migration_initial_checks, + self.context, fake_share_server, fake_host, fake_share_network, + ) + mock_shares_get_all.assert_called_once_with( + self.context, fake_share_server['id']) + mock_get_groups.assert_called_once_with(self.context, + fake_share_server['id']) + + def _setup_mocks_for_initial_checks(self, fake_share, share_type, service, + fake_az, fake_subnet): + self.mock_object( + db_api, 'share_get_all_by_share_server', + mock.Mock(return_value=[fake_share])) + self.mock_object( + db_api, 'share_group_get_all_by_share_server', + mock.Mock(return_value=[])) + self.mock_object( + share_types, 'get_share_type', mock.Mock(return_value=share_type)) + self.mock_object( + utils, 'validate_service_host') + self.mock_object( + db_api, 'service_get_by_args', mock.Mock(return_value=service)) + self.mock_object( + db_api, 'availability_zone_get', mock.Mock(return_value=fake_az)) + self.mock_object( + db_api, 'share_network_subnet_get_by_availability_zone_id', + mock.Mock(return_value=fake_subnet)) + + def test__migration_initial_checks_share_not_available(self): + fake_share_server = fakes.fake_share_server_get() + fake_share_server['host'] = 'fake@backend' + type_data = { + 'extra_specs': { + 'availability_zones': 'fake_az1,fake_az2' + } + } + fake_server_host = 'fake@backend' + fake_share_server = db_utils.create_share_server(host=fake_server_host) + share_type = db_utils.create_share_type(**type_data) + share_type = db_api.share_type_get(self.context, share_type['id']) + fake_share = db_utils.create_share( + host='fake@backend#pool', status=constants.STATUS_ERROR, + share_type_id=share_type['id']) + fake_az = { + 'id': 'fake_az_id', + 'name': 'fake_az1' + } + fake_share_network = None + fake_share_network_id = fake_share['share_network_id'] + fake_subnet = db_utils.create_share_network_subnet( + availability_zone_id=fake_az['id']) + fake_host = 'test@fake' + service = {'availability_zone_id': fake_az['id'], + 'availability_zone': {'name': fake_az['name']}} + self._setup_mocks_for_initial_checks(fake_share, share_type, service, + fake_az, fake_subnet) + + self.assertRaises( + exception.InvalidShareServer, + self.api._migration_initial_checks, + self.context, fake_share_server, fake_host, fake_share_network, + ) + db_api.share_get_all_by_share_server.assert_has_calls([ + mock.call(self.context, fake_share_server['id']), + mock.call(self.context, fake_share_server['id'])]) + share_types.get_share_type.assert_called_once_with( + self.context, share_type['id']) + utils.validate_service_host.assert_called_once_with( + self.context, fake_host) + db_api.service_get_by_args.assert_called_once_with( + self.context, fake_host, 'manila-share') + db_api.availability_zone_get.assert_called_once_with( + self.context, service['availability_zone']['name'] + ) + (db_api.share_network_subnet_get_by_availability_zone_id. + assert_called_once_with( + self.context, fake_share_network_id, fake_az['id'])) + db_api.share_group_get_all_by_share_server.assert_called_once_with( + self.context, fake_share_server['id']) + + def test__migration_initial_checks_share_with_replicas(self): + fake_share_server = fakes.fake_share_server_get() + fake_share_server['host'] = 'fake@backend' + type_data = { + 'extra_specs': { + 'availability_zones': 'fake_az1,fake_az2' + } + } + fake_server_host = 'fake@backend' + fake_share_server = db_utils.create_share_server(host=fake_server_host) + share_type = db_utils.create_share_type(**type_data) + share_type = db_api.share_type_get(self.context, share_type['id']) + fake_share = db_utils.create_share( + host='fake@backend#pool', status=constants.STATUS_AVAILABLE, + replication_type='dr', share_type_id=share_type['id']) + for i in range(1, 4): + db_utils.create_share_replica( + share_id=fake_share['id'], replica_state='in_sync') + fake_share = db_api.share_get(self.context, fake_share['id']) + fake_az = { + 'id': 'fake_az_id', + 'name': 'fake_az1' + } + fake_share_network = None + fake_share_network_id = fake_share['share_network_id'] + fake_subnet = db_utils.create_share_network_subnet( + availability_zone_id=fake_az['id']) + fake_host = 'test@fake' + service = {'availability_zone_id': fake_az['id'], + 'availability_zone': {'name': fake_az['name']}} + self._setup_mocks_for_initial_checks(fake_share, share_type, service, + fake_az, fake_subnet) + + self.assertRaises( + exception.InvalidShareServer, + self.api._migration_initial_checks, + self.context, fake_share_server, fake_host, fake_share_network, + ) + db_api.share_get_all_by_share_server.assert_has_calls([ + mock.call(self.context, fake_share_server['id']), + mock.call(self.context, fake_share_server['id'])]) + share_types.get_share_type.assert_called_once_with( + self.context, share_type['id']) + utils.validate_service_host.assert_called_once_with( + self.context, fake_host) + db_api.service_get_by_args.assert_called_once_with( + self.context, fake_host, 'manila-share') + db_api.availability_zone_get.assert_called_once_with( + self.context, service['availability_zone']['name'] + ) + (db_api.share_network_subnet_get_by_availability_zone_id. + assert_called_once_with( + self.context, fake_share_network_id, fake_az['id'])) + db_api.share_group_get_all_by_share_server.assert_called_once_with( + self.context, fake_share_server['id']) + + def test__migration_initial_checks_share_in_share_group(self): + fake_share_server = fakes.fake_share_server_get() + fake_share_server['host'] = 'fake@backend' + type_data = { + 'extra_specs': { + 'availability_zones': 'fake_az1,fake_az2' + } + } + fake_server_host = 'fake@backend' + fake_share_server = db_utils.create_share_server(host=fake_server_host) + share_type = db_utils.create_share_type(**type_data) + share_type = db_api.share_type_get(self.context, share_type['id']) + fake_share = db_utils.create_share( + host='fake@backend#pool', status=constants.STATUS_AVAILABLE, + share_type_id=share_type['id'], share_group_id='fake_group_id') + fake_az = { + 'id': 'fake_az_id', + 'name': 'fake_az1' + } + fake_share_network = None + fake_share_network_id = fake_share['share_network_id'] + fake_subnet = db_utils.create_share_network_subnet( + availability_zone_id=fake_az['id']) + fake_host = 'test@fake' + service = {'availability_zone_id': fake_az['id'], + 'availability_zone': {'name': fake_az['name']}} + self._setup_mocks_for_initial_checks(fake_share, share_type, service, + fake_az, fake_subnet) + mock_snapshots_get = self.mock_object( + db_api, 'share_snapshot_get_all_for_share', + mock.Mock(return_value=[])) + + self.assertRaises( + exception.InvalidShareServer, + self.api._migration_initial_checks, + self.context, fake_share_server, fake_host, fake_share_network, + ) + db_api.share_get_all_by_share_server.assert_has_calls([ + mock.call(self.context, fake_share_server['id']), + mock.call(self.context, fake_share_server['id'])]) + share_types.get_share_type.assert_called_once_with( + self.context, share_type['id']) + utils.validate_service_host.assert_called_once_with( + self.context, fake_host) + db_api.service_get_by_args.assert_called_once_with( + self.context, fake_host, 'manila-share') + db_api.availability_zone_get.assert_called_once_with( + self.context, service['availability_zone']['name'] + ) + (db_api.share_network_subnet_get_by_availability_zone_id. + assert_called_once_with( + self.context, fake_share_network_id, fake_az['id'])) + mock_snapshots_get.assert_called_once_with( + self.context, fake_share['id']) + db_api.share_group_get_all_by_share_server.assert_called_once_with( + self.context, fake_share_server['id']) + + def test__migration_initial_checks_same_backend_and_network(self): + fake_server_host = 'fake@backend' + fake_share_network = {'id': 'fake_share_network_id'} + fake_share_server = db_utils.create_share_server(host=fake_server_host) + fake_share = db_utils.create_share( + host=fake_server_host, status=constants.STATUS_AVAILABLE, + share_group_id='fake_group_id', + share_network_id=fake_share_network['id']) + + mock_shares_get_all = self.mock_object( + db_api, 'share_get_all_by_share_server', + mock.Mock(return_value=[fake_share])) + + self.assertRaises( + exception.InvalidShareServer, + self.api._migration_initial_checks, + self.context, fake_share_server, fake_server_host, + fake_share_network, + ) + mock_shares_get_all.assert_called_once_with( + self.context, fake_share_server['id']) + + def test__migration_initial_checks_another_migration_found(self): + fake_server_host = 'fake@backend2' + fake_share_network = {'id': 'fake_share_network_id'} + fake_share_server = db_utils.create_share_server(host=fake_server_host) + fake_share = db_utils.create_share( + host='fake@backend#pool', status=constants.STATUS_AVAILABLE, + share_group_id='fake_group_id', share_network=fake_share_network) + + mock_shares_get_all = self.mock_object( + db_api, 'share_get_all_by_share_server', + mock.Mock(return_value=[fake_share])) + mock_shares_get_servers_filters = self.mock_object( + db_api, 'share_server_get_all_with_filters', + mock.Mock(return_value=['fake_share_server'])) + + self.assertRaises( + exception.InvalidShareServer, + self.api._migration_initial_checks, + self.context, fake_share_server, fake_server_host, + fake_share_network, + ) + mock_shares_get_all.assert_called_once_with( + self.context, fake_share_server['id']) + filters = {'source_share_server_id': fake_share_server['id'], + 'status': constants.STATUS_SERVER_MIGRATING_TO} + mock_shares_get_servers_filters.assert_called_once_with( + self.context, filters=filters) + + def test_share_server_migration_get_request_spec_dict(self): + share_instances = [ + db_utils.create_share_instance(share_id='fake_id') + for i in range(1, 3)] + snapshot_instances = [ + db_utils.create_snapshot_instance( + snapshot_id='fake_' + str(i), share_instance_id='fake') + for i in range(1, 3)] + shares_req_spec = [{} for instance in share_instances] + total_shares_size = sum( + [instance.get('size', 0) for instance in share_instances]) + total_snapshots_size = sum( + [instance.get('size', 0) for instance in snapshot_instances]) + expected_result = { + 'shares_size': total_shares_size, + 'snapshots_size': total_snapshots_size, + 'shares_req_spec': shares_req_spec, + } + fake_share_type = db_utils.create_share_type() + get_type_calls = [] + get_request_spec_calls = [] + for instance in share_instances: + get_type_calls.append( + mock.call(self.context, instance['share_type_id'])) + get_request_spec_calls.append( + mock.call(instance, fake_share_type)) + + mock_get_type = self.mock_object( + share_types, 'get_share_type', + mock.Mock(return_value=fake_share_type)) + mock_get_request_spec = self.mock_object( + self.api, '_get_request_spec_dict', mock.Mock(return_value={})) + + result = self.api.get_share_server_migration_request_spec_dict( + self.context, share_instances, snapshot_instances) + + self.assertEqual(result, expected_result) + mock_get_type.assert_has_calls(get_type_calls) + mock_get_request_spec.assert_has_calls(get_request_spec_calls) + + def test__migration_initial_checks_instance_rules_error_status(self): + fake_share_server = fakes.fake_share_server_get() + fake_share_server['host'] = 'fake@backend' + type_data = { + 'extra_specs': { + 'availability_zones': 'fake_az1,fake_az2' + } + } + fake_server_host = 'fake@backend' + fake_share_server = db_utils.create_share_server(host=fake_server_host) + share_type = db_utils.create_share_type(**type_data) + share_type = db_api.share_type_get(self.context, share_type['id']) + fake_share = db_utils.create_share( + host='fake@backend#pool', status=constants.STATUS_AVAILABLE, + share_type_id=share_type['id'], share_group_id='fake_group_id') + fake_share['instance']['access_rules_status'] = constants.STATUS_ERROR + fake_az = { + 'id': 'fake_az_id', + 'name': 'fake_az1' + } + fake_share_network = None + fake_share_network_id = fake_share['share_network_id'] + fake_subnet = db_utils.create_share_network_subnet( + availability_zone_id=fake_az['id']) + fake_host = 'test@fake' + service = {'availability_zone_id': fake_az['id'], + 'availability_zone': {'name': fake_az['name']}} + self._setup_mocks_for_initial_checks(fake_share, share_type, service, + fake_az, fake_subnet) + + mock_snapshots_get = self.mock_object( + db_api, 'share_snapshot_get_all_for_share', + mock.Mock(return_value=[])) + + self.assertRaises( + exception.InvalidShareServer, + self.api._migration_initial_checks, + self.context, fake_share_server, fake_host, fake_share_network, + ) + + db_api.share_get_all_by_share_server.assert_has_calls([ + mock.call(self.context, fake_share_server['id']), + mock.call(self.context, fake_share_server['id'])]) + share_types.get_share_type.assert_called_once_with( + self.context, share_type['id']) + utils.validate_service_host.assert_called_once_with( + self.context, fake_host) + db_api.service_get_by_args.assert_called_once_with( + self.context, fake_host, 'manila-share') + db_api.availability_zone_get.assert_called_once_with( + self.context, service['availability_zone']['name'] + ) + (db_api.share_network_subnet_get_by_availability_zone_id. + assert_called_once_with( + self.context, fake_share_network_id, fake_az['id'])) + mock_snapshots_get.assert_called_once_with( + self.context, fake_share['id']) + db_api.share_group_get_all_by_share_server.assert_called_once_with( + self.context, fake_share_server['id']) + + def test__migration_initial_checks_dest_az_not_match_host_az(self): + type_data = { + 'extra_specs': { + 'availability_zones': 'zone1,zone2' + } + } + fake_server_host = 'fake@backend' + fake_share_server = db_utils.create_share_server(host=fake_server_host) + share_type = db_utils.create_share_type(**type_data) + share_type = db_api.share_type_get(self.context, share_type['id']) + fake_share = db_utils.create_share( + host='fake@backend#pool', status=constants.STATUS_AVAILABLE, + share_type_id=share_type['id']) + fake_share_network = {} + fake_host = 'test@fake' + service = {'availability_zone_id': 'fake_az_id', + 'availability_zone': {'name': 'fake_az1'}} + + mock_shares_get_all = self.mock_object( + db_api, 'share_get_all_by_share_server', + mock.Mock(return_value=[fake_share])) + mock_get_type = self.mock_object( + share_types, 'get_share_type', mock.Mock(return_value=share_type)) + mock_validate_service = self.mock_object( + utils, 'validate_service_host') + mock_service_get = self.mock_object( + db_api, 'service_get_by_args', mock.Mock(return_value=service)) + + self.assertRaises( + exception.InvalidShareServer, + self.api._migration_initial_checks, + self.context, fake_share_server, fake_host, fake_share_network, + ) + mock_shares_get_all.assert_called_once_with( + self.context, fake_share_server['id']) + mock_get_type.assert_called_once_with(self.context, share_type['id']) + mock_validate_service.assert_called_once_with(self.context, fake_host) + mock_service_get.assert_called_once_with( + self.context, fake_host, 'manila-share') + + def test__migration_initial_checks_no_matching_subnet(self): + type_data = { + 'extra_specs': { + 'availability_zones': 'fake_az1,fake_az2' + } + } + fake_server_host = 'fake@backend' + fake_share_server = db_utils.create_share_server(host=fake_server_host) + share_type = db_utils.create_share_type(**type_data) + share_type = db_api.share_type_get(self.context, share_type['id']) + fake_share = db_utils.create_share( + host='fake@backend#pool', status=constants.STATUS_AVAILABLE, + share_type_id=share_type['id']) + fake_share_network = db_utils.create_share_network() + fake_az = { + 'id': 'fake_az_id', + 'name': 'fake_az1' + } + + db_utils.create_share_network_subnet( + availability_zone_id='fake', + share_network_id=fake_share_network['id']) + fake_share_network = db_api.share_network_get( + self.context, fake_share_network['id']) + fake_host = 'test@fake' + service = {'availability_zone_id': fake_az['id'], + 'availability_zone': {'name': fake_az['name']}} + + mock_shares_get_all = self.mock_object( + db_api, 'share_get_all_by_share_server', + mock.Mock(return_value=[fake_share])) + mock_get_type = self.mock_object( + share_types, 'get_share_type', mock.Mock(return_value=share_type)) + mock_validate_service = self.mock_object( + utils, 'validate_service_host') + mock_service_get = self.mock_object( + db_api, 'service_get_by_args', mock.Mock(return_value=service)) + mock_az_get = self.mock_object( + db_api, 'availability_zone_get', mock.Mock(return_value=fake_az)) + mock_get_subnet = self.mock_object( + db_api, 'share_network_subnet_get_by_availability_zone_id', + mock.Mock(return_value=None)) + + self.assertRaises( + exception.InvalidShareServer, + self.api._migration_initial_checks, + self.context, fake_share_server, fake_host, fake_share_network, + ) + mock_shares_get_all.assert_called_once_with( + self.context, fake_share_server['id']) + mock_get_type.assert_called_once_with(self.context, share_type['id']) + mock_validate_service.assert_called_once_with(self.context, fake_host) + mock_service_get.assert_called_once_with( + self.context, fake_host, 'manila-share') + mock_get_subnet.assert_called_once_with( + self.context, fake_share_network['id'], fake_az['id']) + mock_az_get.assert_called_once_with( + self.context, service['availability_zone']['name'] + ) + + def test_share_server_migration_check(self): + type_data = { + 'extra_specs': { + 'availability_zones': 'fake_az1,fake_az2' + } + } + fake_share_server = db_utils.create_share_server() + share_type = db_utils.create_share_type(**type_data) + share_type = db_api.share_type_get(self.context, share_type['id']) + fake_share = db_utils.create_share( + host='fake@backend#pool', status=constants.STATUS_AVAILABLE, + share_type_id=share_type['id']) + fake_shares = [fake_share] + fake_types = [share_type] + fake_share_network = db_utils.create_share_network() + fake_az = { + 'id': 'fake_az_id', + 'name': 'fake_az1' + } + writable = True + nondisruptive = True + preserve_snapshots = True + fake_share_network = db_api.share_network_get( + self.context, fake_share_network['id']) + fake_host = 'test@fake' + service = {'availability_zone_id': fake_az['id'], + 'availability_zone': {'name': fake_az['name']}} + expected_result = { + 'requested_capabilities': {}, + 'supported_capabilities': {} + } + + mock_initial_checks = self.mock_object( + self.api, '_migration_initial_checks', + mock.Mock(return_value=[fake_shares, fake_types, service, + fake_share_network['id']])) + # NOTE(carloss): Returning an "empty" dictionary should be enough for + # this test case. The unit test to check the values being returned to + # the user should be placed in the share manager, where the dict is + # populated with the real info. At this level we only forward the + # received response to the user. + mock_migration_check = self.mock_object( + self.share_rpcapi, 'share_server_migration_check', + mock.Mock(return_value=expected_result)) + + result = self.api.share_server_migration_check( + self.context, fake_share_server, fake_host, writable, + nondisruptive, preserve_snapshots, fake_share_network + ) + + mock_initial_checks.assert_called_once_with( + self.context, fake_share_server, fake_host, fake_share_network) + mock_migration_check.assert_called_once_with( + self.context, fake_share_server['id'], fake_host, writable, + nondisruptive, preserve_snapshots, fake_share_network['id'] + ) + self.assertEqual(result, expected_result) + + def test_share_server_migration_start(self): + type_data = { + 'extra_specs': { + 'availability_zones': 'fake_az1,fake_az2' + } + } + fake_share_server = db_utils.create_share_server() + share_type = db_utils.create_share_type(**type_data) + share_type = db_api.share_type_get(self.context, share_type['id']) + fake_shares = [db_utils.create_share( + host='fake@backend#pool', status=constants.STATUS_AVAILABLE, + share_type_id=share_type['id']) for x in range(4)] + fake_snapshots = [ + db_utils.create_snapshot(share_id=fake_shares[0]['id'])] + instance_ids = [share['instance']['id'] for share in fake_shares] + snap_instance_ids = [] + for fake_share in fake_shares: + for snapshot in fake_snapshots: + snap_instance_ids.append(snapshot['instance']['id']) + fake_types = [share_type] + fake_share_network = db_utils.create_share_network() + writable = True + nondisruptive = True + preserve_snapshots = True + fake_share_network = db_api.share_network_get( + self.context, fake_share_network['id']) + fake_host = 'test@fake' + service = {'availability_zone_id': 'fake_az_id', + 'availability_zone': {'name': 'fake_az1'}} + server_expected_update = { + 'task_state': constants.TASK_STATE_MIGRATION_STARTING, + 'status': constants.STATUS_SERVER_MIGRATING + } + share_expected_update = { + 'status': constants.STATUS_SERVER_MIGRATING + } + snapshot_get_calls = [ + mock.call(self.context, share['id']) for share in fake_shares] + + mock_initial_checks = self.mock_object( + self.api, '_migration_initial_checks', + mock.Mock(return_value=[fake_shares, fake_types, service, + fake_share_network['id']])) + mock_migration_start = self.mock_object( + self.share_rpcapi, 'share_server_migration_start') + mock_server_update = self.mock_object(db_api, 'share_server_update') + mock_snapshots_get = self.mock_object( + db_api, 'share_snapshot_get_all_for_share', + mock.Mock(return_value=fake_snapshots)) + mock_update_instances = self.mock_object( + db_api, 'share_and_snapshot_instances_status_update') + + self.api.share_server_migration_start( + self.context, fake_share_server, fake_host, writable, + nondisruptive, preserve_snapshots, fake_share_network + ) + + mock_initial_checks.assert_called_once_with( + self.context, fake_share_server, fake_host, fake_share_network) + mock_migration_start.assert_called_once_with( + self.context, fake_share_server, fake_host, writable, + nondisruptive, preserve_snapshots, fake_share_network['id'] + ) + mock_server_update.assert_called_once_with( + self.context, fake_share_server['id'], server_expected_update) + mock_snapshots_get.assert_has_calls( + snapshot_get_calls) + mock_update_instances.assert_called_once_with( + self.context, share_expected_update, + current_expected_status=constants.STATUS_AVAILABLE, + share_instance_ids=instance_ids, + snapshot_instance_ids=snap_instance_ids) + + @ddt.data( + (constants.STATUS_ACTIVE, None), + (constants.STATUS_SERVER_MIGRATING, + constants.TASK_STATE_MIGRATION_STARTING) + ) + @ddt.unpack + def test_share_server_migration_complete_invalid_status(self, status, + task_state): + fake_host = 'fakehost@fakebackend' + fake_share_server = db_utils.create_share_server( + status=status, task_state=task_state, host=fake_host) + self.assertRaises( + exception.InvalidShareServer, + self.api.share_server_migration_complete, + self.context, fake_share_server) + + def test_share_server_migration_complete(self): + fake_service_host = 'fakehost@fakebackend' + fake_share_server = db_utils.create_share_server( + status=constants.STATUS_SERVER_MIGRATING, + task_state=constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE, + host=fake_service_host) + fake_share_server_dest = db_utils.create_share_server( + status=constants.STATUS_SERVER_MIGRATING_TO, + host=fake_service_host) + fake_service = {'availability_zone_id': 'fake_az_id', + 'availability_zone': {'name': 'fake_az1'}} + + mock_get_destination = self.mock_object( + self.api, 'share_server_migration_get_destination', + mock.Mock(return_value=fake_share_server_dest)) + mock_validate_service_host = self.mock_object( + utils, 'validate_service_host', + mock.Mock(return_value=fake_service)) + mock_migration_complete = self.mock_object( + self.share_rpcapi, 'share_server_migration_complete') + + result = self.api.share_server_migration_complete( + self.context, fake_share_server) + + expected = { + 'destination_share_server_id': fake_share_server_dest['id'] + } + self.assertEqual(expected, result) + mock_get_destination.assert_called_once_with( + self.context, fake_share_server['id'], + status=constants.STATUS_SERVER_MIGRATING_TO) + mock_validate_service_host.assert_called_once_with( + self.context, fake_service_host) + mock_migration_complete.assert_called_once_with( + self.context, fake_share_server['host'], fake_share_server, + fake_share_server_dest + ) + + @ddt.data( + (constants.STATUS_ACTIVE, None), + (constants.STATUS_SERVER_MIGRATING, + constants.TASK_STATE_MIGRATION_STARTING) + ) + @ddt.unpack + def test_share_server_migration_cancel_server_not_migrating( + self, status, task_state): + fake_share_server = db_utils.create_share_server( + status=status, task_state=task_state) + + self.mock_object(self.api, '_migration_validate_error_message', + mock.Mock(return_value=None)) + + self.assertRaises( + exception.InvalidShareServer, + self.api.share_server_migration_cancel, + self.context, + fake_share_server + ) + + @ddt.data(constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE, + constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS) + def test_share_server_migration_cancel_service_not_up(self, task_state): + fake_service_host = 'host@backend' + fake_share_server = db_utils.create_share_server( + status=constants.STATUS_SERVER_MIGRATING, + task_state=task_state, + host=fake_service_host) + fake_share_server_dest = db_utils.create_share_server( + status=constants.STATUS_SERVER_MIGRATING_TO, + host=fake_service_host) + + mock_get_destination = self.mock_object( + self.api, 'share_server_migration_get_destination', + mock.Mock(return_value=fake_share_server_dest)) + mock_validate_service_host = self.mock_object( + utils, 'validate_service_host', + mock.Mock(side_effect=exception.ServiceIsDown( + service="fake_service"))) + + self.assertRaises( + exception.ServiceIsDown, + self.api.share_server_migration_cancel, + self.context, + fake_share_server + ) + mock_get_destination.assert_called_once_with( + self.context, fake_share_server['id'], + status=constants.STATUS_SERVER_MIGRATING_TO) + mock_validate_service_host.assert_called_once_with( + self.context, fake_service_host) + + @ddt.data(constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE, + constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS) + def test_share_server_migration_cancel(self, task_state): + fake_service_host = 'host@backend' + fake_share_server = db_utils.create_share_server( + status=constants.STATUS_SERVER_MIGRATING, + task_state=task_state, + host=fake_service_host) + fake_share_server_dest = db_utils.create_share_server( + status=constants.STATUS_SERVER_MIGRATING_TO, + host=fake_service_host) + fake_service = {'availability_zone_id': 'fake_az_id', + 'availability_zone': {'name': 'fake_az1'}} + + mock_get_destination = self.mock_object( + self.api, 'share_server_migration_get_destination', + mock.Mock(return_value=fake_share_server_dest)) + mock_validate_service_host = self.mock_object( + utils, 'validate_service_host', + mock.Mock(return_value=fake_service)) + + self.api.share_server_migration_cancel( + self.context, fake_share_server) + + mock_get_destination.assert_called_once_with( + self.context, fake_share_server['id'], + status=constants.STATUS_SERVER_MIGRATING_TO) + mock_validate_service_host.assert_called_once_with( + self.context, fake_service_host) + + def test_share_server_migration_get_progress_not_migrating(self): + fake_share_server = db_utils.create_share_server( + status=constants.STATUS_ACTIVE) + self.assertRaises( + exception.InvalidShareServer, + self.api.share_server_migration_get_progress, + self.context, fake_share_server['id'] + ) + + def test_share_server_migration_get_progress_service_not_up(self): + fake_service_host = 'host@backend' + fake_share_server = db_utils.create_share_server( + status=constants.STATUS_SERVER_MIGRATING, + task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS, + host=fake_service_host) + fake_share_server_dest = db_utils.create_share_server( + status=constants.STATUS_SERVER_MIGRATING_TO, + host=fake_service_host) + + mock_get_destination = self.mock_object( + self.api, 'share_server_migration_get_destination', + mock.Mock(return_value=fake_share_server_dest)) + mock_validate_service_host = self.mock_object( + utils, 'validate_service_host', + mock.Mock(side_effect=exception.ServiceIsDown( + service="fake_service"))) + + self.assertRaises( + exception.ServiceIsDown, + self.api.share_server_migration_get_progress, + self.context, fake_share_server['id'] + ) + + mock_get_destination.assert_called_once_with( + self.context, fake_share_server['id'], + status=constants.STATUS_SERVER_MIGRATING_TO) + mock_validate_service_host.assert_called_once_with( + self.context, fake_service_host) + + def test_share_server_migration_get_progress_rpcapi_exception(self): + fake_service_host = 'host@backend' + fake_share_server = db_utils.create_share_server( + status=constants.STATUS_SERVER_MIGRATING, + task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS, + host=fake_service_host) + fake_share_server_dest = db_utils.create_share_server( + status=constants.STATUS_SERVER_MIGRATING_TO, + host=fake_service_host) + fake_service = {'availability_zone_id': 'fake_az_id', + 'availability_zone': {'name': 'fake_az1'}} + + mock_server_get = self.mock_object( + db_api, 'share_server_get', + mock.Mock(return_value=fake_share_server)) + mock_get_destination = self.mock_object( + self.api, 'share_server_migration_get_destination', + mock.Mock(return_value=fake_share_server_dest)) + mock_validate_service_host = self.mock_object( + utils, 'validate_service_host', + mock.Mock(return_value=fake_service)) + mock_migration_get_progress = self.mock_object( + self.share_rpcapi, 'share_server_migration_get_progress', + mock.Mock(side_effect=Exception)) + + self.assertRaises( + exception.ShareServerMigrationError, + self.api.share_server_migration_get_progress, + self.context, + fake_share_server['id'] + ) + + mock_server_get.assert_called_once_with(self.context, + fake_share_server['id']) + mock_get_destination.assert_called_once_with( + self.context, fake_share_server['id'], + status=constants.STATUS_SERVER_MIGRATING_TO) + mock_validate_service_host.assert_called_once_with( + self.context, fake_service_host) + mock_migration_get_progress.assert_called_once_with( + self.context, fake_share_server_dest['host'], fake_share_server, + fake_share_server_dest) + + @ddt.data(constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS, + constants.TASK_STATE_MIGRATION_SUCCESS) + def test_share_server_migration_get_progress(self, task_state): + fake_service_host = 'host@backend' + fake_share_server = db_utils.create_share_server( + status=constants.STATUS_SERVER_MIGRATING, + task_state=task_state, host=fake_service_host) + fake_share_server_dest = db_utils.create_share_server( + status=constants.STATUS_SERVER_MIGRATING_TO, + host=fake_service_host) + fake_service = {'availability_zone_id': 'fake_az_id', + 'availability_zone': {'name': 'fake_az1'}} + + mock_server_get = self.mock_object( + db_api, 'share_server_get', + mock.Mock(return_value=fake_share_server)) + mock_get_destination = self.mock_object( + self.api, 'share_server_migration_get_destination', + mock.Mock(return_value=fake_share_server_dest)) + mock_validate_service_host = self.mock_object( + utils, 'validate_service_host', + mock.Mock(return_value=fake_service)) + mock_migration_get_progress = self.mock_object( + self.share_rpcapi, 'share_server_migration_get_progress', + mock.Mock(return_value={'total_progress': 50})) + self.mock_object(utils, 'service_is_up', mock.Mock(return_value=True)) + + result = self.api.share_server_migration_get_progress( + self.context, fake_share_server['id']) + + self.assertIn('total_progress', result) + mock_server_get.assert_called_once_with(self.context, + fake_share_server['id']) + if task_state == constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS: + mock_get_destination.assert_called_once_with( + self.context, fake_share_server['id'], + status=constants.STATUS_SERVER_MIGRATING_TO) + mock_validate_service_host.assert_called_once_with( + self.context, fake_service_host) + mock_migration_get_progress.assert_called_once_with( + self.context, fake_share_server_dest['host'], + fake_share_server, fake_share_server_dest) + + @ddt.data(constants.STATUS_SERVER_MIGRATING_TO, + constants.STATUS_SERVER_MIGRATING) + def test_share_server_migration_get_progress_invalid_share_server(self, + status): + fake_service_host = 'host@backend' + fake_share_server = db_utils.create_share_server( + status=status, + task_state=None, + host=fake_service_host) + mock_server_get = self.mock_object( + db_api, 'share_server_get', + mock.Mock(return_value=fake_share_server)) + mock_get_progress_state = self.mock_object( + self.api, '_migration_get_progress_state', + mock.Mock(return_value=None)) + self.mock_object(self.api, 'share_server_migration_get_destination') + + self.assertRaises( + exception.InvalidShareServer, + self.api.share_server_migration_get_progress, + self.context, fake_share_server['id']) + + mock_server_get.assert_called_once_with(self.context, + fake_share_server['id']) + if status == constants.STATUS_SERVER_MIGRATING: + mock_get_progress_state.assert_called_once_with(fake_share_server) + + def test_share_server_migration_get_progress_source_not_found(self): + fake_dest_hare_server = db_utils.create_share_server( + status=constants.STATUS_ACTIVE, + task_state=constants.TASK_STATE_MIGRATION_SUCCESS) + mock_server_get = self.mock_object( + db_api, 'share_server_get', + mock.Mock(side_effect=exception.ShareServerNotFound( + share_server_id='fake_id'))) + mock_get_destination = self.mock_object( + self.api, 'share_server_migration_get_destination', + mock.Mock(return_value=fake_dest_hare_server)) + + result = self.api.share_server_migration_get_progress( + self.context, 'fake_source_server_id') + expected = { + 'total_progress': 100, + 'destination_share_server_id': fake_dest_hare_server['id'], + 'task_state': constants.TASK_STATE_MIGRATION_SUCCESS, + } + + self.assertEqual(expected, result) + mock_server_get.assert_called_once_with(self.context, + 'fake_source_server_id') + mock_get_destination.assert_called_once_with( + self.context, 'fake_source_server_id', + status=constants.STATUS_ACTIVE) + + def test_share_server_migration_get_progress_has_destination_only(self): + mock_server_get = self.mock_object( + db_api, 'share_server_get', + mock.Mock(side_effect=exception.ShareServerNotFound( + share_server_id='fake_id'))) + mock_get_destination = self.mock_object( + self.api, 'share_server_migration_get_destination', + mock.Mock(side_effect=exception.InvalidShareServer(reason=''))) + + self.assertRaises( + exception.InvalidShareServer, + self.api.share_server_migration_get_progress, + self.context, 'fake_src_server_id') + + mock_server_get.assert_called_once_with(self.context, + 'fake_src_server_id') + mock_get_destination.assert_called_once_with( + self.context, 'fake_src_server_id', status=constants.STATUS_ACTIVE) + class OtherTenantsShareActionsTestCase(test.TestCase): def setUp(self): diff --git a/manila/tests/share/test_driver.py b/manila/tests/share/test_driver.py index 816103a73e..5158067996 100644 --- a/manila/tests/share/test_driver.py +++ b/manila/tests/share/test_driver.py @@ -581,6 +581,64 @@ class ShareDriverTestCase(test.TestCase): share_driver.migration_get_progress, None, None, None, None, None, None, None) + def test_share_server_migration_start(self): + driver.CONF.set_default('driver_handles_share_servers', True) + share_driver = driver.ShareDriver(True) + + self.assertRaises(NotImplementedError, + share_driver.share_server_migration_start, + None, None, None, None, None) + + def test_share_server_migration_continue(self): + driver.CONF.set_default('driver_handles_share_servers', True) + share_driver = driver.ShareDriver(True) + + self.assertRaises(NotImplementedError, + share_driver.share_server_migration_continue, + None, None, None, None, None) + + def test_share_server_migration_get_progress(self): + driver.CONF.set_default('driver_handles_share_servers', True) + share_driver = driver.ShareDriver(True) + + self.assertRaises(NotImplementedError, + share_driver.share_server_migration_get_progress, + None, None, None, None, None) + + def test_share_server_migration_cancel(self): + driver.CONF.set_default('driver_handles_share_servers', True) + share_driver = driver.ShareDriver(True) + + self.assertRaises(NotImplementedError, + share_driver.share_server_migration_cancel, + None, None, None, None, None) + + def test_share_server_migration_check_compatibility(self): + driver.CONF.set_default('driver_handles_share_servers', True) + share_driver = driver.ShareDriver(True) + expected_compatibility = { + 'compatible': False, + 'writable': False, + 'nondisruptive': False, + 'preserve_snapshots': False, + 'migration_cancel': False, + 'migration_get_progress': False + } + + driver_compatibility = ( + share_driver.share_server_migration_check_compatibility( + None, None, None, None, None, None)) + self.assertEqual(expected_compatibility, driver_compatibility) + + def test_share_server_migration_complete(self): + driver.CONF.set_default('driver_handles_share_servers', True) + share_driver = driver.ShareDriver(True) + + self.assertRaises( + NotImplementedError, + share_driver.share_server_migration_complete, + None, None, None, None, None, None) + @ddt.data(True, False) def test_connection_get_info(self, admin): diff --git a/manila/tests/share/test_manager.py b/manila/tests/share/test_manager.py index fbb1bfae13..7ce4380fff 100644 --- a/manila/tests/share/test_manager.py +++ b/manila/tests/share/test_manager.py @@ -2518,8 +2518,8 @@ class ShareManagerTestCase(test.TestCase): driver_mock.choose_share_server_compatible_with_share ) driver_method_mock.assert_called_once_with( - self.context, [fake_share_server], share.instance, snapshot=None, - share_group=None) + self.context, [fake_share_server], share.instance, + snapshot=None, share_group=None) def test_provide_share_server_for_share_invalid_arguments(self): self.assertRaises(ValueError, @@ -2636,6 +2636,83 @@ class ShareManagerTestCase(test.TestCase): assert_called_once_with( mock.ANY, share['instance']['share_type_id'])) + def _setup_provide_server_for_migration_test(self): + source_share_server = db_utils.create_share_server() + fake_share_network = db_utils.create_share_network() + fake_network_subnet = db_utils.create_share_network_subnet( + share_network_id=fake_share_network['id']) + fake_dest_host = 'fakehost@fakebackend' + fake_az = { + 'availability_zone_id': 'fake_az_id', + 'availability_zone_name': 'fake_az_name' + } + fake_data = { + 'source_share_server': source_share_server, + 'fake_share_network': fake_share_network, + 'fake_network_subnet': fake_network_subnet, + 'fake_dest_host': fake_dest_host, + 'fake_az': fake_az, + } + return fake_data + + def test__provide_share_server_for_migration_subnet_not_found(self): + fake_data = self._setup_provide_server_for_migration_test() + + mock_subnet_get = self.mock_object( + db, 'share_network_subnet_get_by_availability_zone_id', + mock.Mock(return_value=None)) + self.assertRaises( + exception.ShareNetworkSubnetNotFound, + self.share_manager._provide_share_server_for_migration, + self.context, + fake_data['source_share_server'], + fake_data['fake_share_network']['id'], + fake_data['fake_az']['availability_zone_id'], + fake_data['fake_dest_host'] + ) + mock_subnet_get.assert_called_once_with( + self.context, fake_data['fake_share_network']['id'], + availability_zone_id=fake_data['fake_az']['availability_zone_id']) + + def test__provide_share_server_for_migration(self): + fake_data = self._setup_provide_server_for_migration_test() + dest_share_server = db_utils.create_share_server() + expected_share_server_data = { + 'host': self.share_manager.host, + 'share_network_subnet_id': fake_data['fake_network_subnet']['id'], + 'status': constants.STATUS_CREATING, + } + fake_metadata = { + 'migration_destination': True, + 'request_host': fake_data['fake_dest_host'] + } + + mock_subnet_get = self.mock_object( + db, 'share_network_subnet_get_by_availability_zone_id', + mock.Mock(return_value=fake_data['fake_network_subnet'])) + mock_server_create = self.mock_object( + db, 'share_server_create', + mock.Mock(return_value=dest_share_server)) + mock_create_server_in_backend = self.mock_object( + self.share_manager, '_create_share_server_in_backend', + mock.Mock(return_value=dest_share_server)) + + result = self.share_manager._provide_share_server_for_migration( + self.context, + fake_data['source_share_server'], + fake_data['fake_share_network']['id'], + fake_data['fake_az']['availability_zone_id'], + fake_data['fake_dest_host'] + ) + self.assertEqual(result, dest_share_server) + mock_subnet_get.assert_called_once_with( + self.context, fake_data['fake_share_network']['id'], + availability_zone_id=fake_data['fake_az']['availability_zone_id']) + mock_server_create.assert_called_once_with( + self.context, expected_share_server_data) + mock_create_server_in_backend.assert_called_once_with( + self.context, dest_share_server, metadata=fake_metadata) + def test_manage_share_invalid_size(self): self.mock_object(self.share_manager, 'driver') self.share_manager.driver.driver_handles_share_servers = False @@ -4787,7 +4864,7 @@ class ShareManagerTestCase(test.TestCase): self.share_manager.db.service_get_by_args.assert_called_once_with( self.context, 'fake@backend', 'manila-share') (self.share_manager._reset_read_only_access_rules. - assert_called_once_with(self.context, share, instance['id'])) + assert_called_once_with(self.context, instance['id'])) def test_migration_start_exception(self): @@ -4838,7 +4915,7 @@ class ShareManagerTestCase(test.TestCase): ] (self.share_manager._reset_read_only_access_rules. - assert_called_once_with(self.context, share, instance['id'])) + assert_called_once_with(self.context, instance['id'])) self.share_manager.db.share_update.assert_has_calls(share_update_calls) self.share_manager.db.share_instance_update.assert_called_once_with( self.context, instance['id'], @@ -4851,15 +4928,15 @@ class ShareManagerTestCase(test.TestCase): @ddt.data(None, Exception('fake')) def test__migration_start_host_assisted(self, exc): + share_server = db_utils.create_share_server() instance = db_utils.create_share_instance( share_id='fake_id', status=constants.STATUS_AVAILABLE, - share_server_id='fake_server_id') + share_server_id=share_server['id']) new_instance = db_utils.create_share_instance( share_id='new_fake_id', status=constants.STATUS_AVAILABLE) share = db_utils.create_share(id='fake_id', instances=[instance]) - server = 'share_server' src_connection_info = 'src_fake_info' dest_connection_info = 'dest_fake_info' instance_updates = [ @@ -4873,9 +4950,9 @@ class ShareManagerTestCase(test.TestCase): mock.Mock(return_value=helper)) self.mock_object(helper, 'cleanup_new_instance') self.mock_object(self.share_manager.db, 'share_server_get', - mock.Mock(return_value=server)) + mock.Mock(return_value=share_server)) self.mock_object(self.share_manager.db, 'share_instance_update', - mock.Mock(return_value=server)) + mock.Mock(return_value=share_server)) self.mock_object(self.share_manager.access_helper, 'get_and_update_share_instance_access_rules') self.mock_object(self.share_manager.access_helper, @@ -4908,12 +4985,15 @@ class ShareManagerTestCase(test.TestCase): 'fake_az_id', 'fake_type_id') # asserts - self.share_manager.db.share_server_get.assert_called_once_with( - utils.IsAMatcher(context.RequestContext), - instance['share_server_id']) + self.share_manager.db.share_server_get.assert_has_calls([ + mock.call(utils.IsAMatcher(context.RequestContext), + instance['share_server_id']), + mock.call(utils.IsAMatcher(context.RequestContext), + instance['share_server_id']) + ]) (self.share_manager.access_helper.update_access_rules. assert_called_once_with( - self.context, instance['id'], share_server=server)) + self.context, instance['id'], share_server=share_server)) helper.create_instance_and_wait.assert_called_once_with( share, 'fake_host', 'fake_net_id', 'fake_az_id', 'fake_type_id') utils.wait_for_access_update.assert_called_once_with( @@ -4922,7 +5002,7 @@ class ShareManagerTestCase(test.TestCase): if exc is None: (self.share_manager.driver.connection_get_info. - assert_called_once_with(self.context, instance, server)) + assert_called_once_with(self.context, instance, share_server)) rpcapi.ShareAPI.connection_get_info.assert_called_once_with( self.context, new_instance) data_rpc.DataAPI.migration_start.assert_called_once_with( @@ -5009,8 +5089,7 @@ class ShareManagerTestCase(test.TestCase): mock.Mock(return_value=[migrating_snap_instance])) self.mock_object(self.share_manager.driver, 'migration_start') self.mock_object(self.share_manager, '_migration_delete_instance') - self.mock_object(self.share_manager.access_helper, - 'update_access_rules') + self.mock_object(self.share_manager, 'update_access_for_instances') self.mock_object(utils, 'wait_for_access_update') # run @@ -5045,9 +5124,9 @@ class ShareManagerTestCase(test.TestCase): {'status': constants.STATUS_MIGRATING_TO}), mock.call(self.context, src_instance['id'], {'cast_rules_to_readonly': True})])) - (self.share_manager.access_helper.update_access_rules. - assert_called_once_with( - self.context, src_instance['id'], share_server=src_server)) + (self.share_manager.update_access_for_instances. + assert_called_once_with(self.context, [src_instance['id']], + share_server_id=src_server['id'])) self.share_manager.driver.migration_start.assert_called_once_with( self.context, src_instance, migrating_instance, [snapshot.instance] if has_snapshots else [], @@ -5283,7 +5362,7 @@ class ShareManagerTestCase(test.TestCase): (self.share_manager._migration_delete_instance. assert_called_once_with(self.context, dest_instance['id'])) (self.share_manager._reset_read_only_access_rules. - assert_called_once_with(self.context, share, src_instance['id'])) + assert_called_once_with(self.context, src_instance['id'])) (self.share_manager.db.share_snapshot_instance_update. assert_called_once_with( self.context, migrating_snap_instance['id'], @@ -5504,7 +5583,7 @@ class ShareManagerTestCase(test.TestCase): if status != 'other': helper.cleanup_new_instance.assert_called_once_with(new_instance) (self.share_manager._reset_read_only_access_rules. - assert_called_once_with(self.context, share, instance['id'], + assert_called_once_with(self.context, instance['id'], helper=helper, supress_errors=cancelled)) if status == constants.TASK_STATE_MIGRATION_CANCELLED: (self.share_manager.db.share_instance_update. @@ -5517,7 +5596,7 @@ class ShareManagerTestCase(test.TestCase): {'task_state': constants.TASK_STATE_MIGRATION_CANCELLED}) if status == constants.TASK_STATE_DATA_COPYING_COMPLETED: helper.apply_new_access_rules. assert_called_once_with( - new_instance) + new_instance, 'fake_id') self.assertTrue(manager.LOG.exception.called) @ddt.data({'mount_snapshot_support': True, 'snapshot_els': False}, @@ -5605,7 +5684,7 @@ class ShareManagerTestCase(test.TestCase): self.context, src_instance, dest_instance, [snapshot.instance], snapshot_mappings, src_server, dest_server) (migration_api.ShareMigrationHelper.apply_new_access_rules. - assert_called_once_with(dest_instance)) + assert_called_once_with(dest_instance, share['id'])) self.share_manager._migration_delete_instance.assert_called_once_with( self.context, src_instance['id']) self.share_manager.db.share_instance_update.assert_has_calls([ @@ -5683,7 +5762,7 @@ class ShareManagerTestCase(test.TestCase): self.context, share['id'], {'task_state': constants.TASK_STATE_MIGRATION_COMPLETING}) (migration_api.ShareMigrationHelper.apply_new_access_rules. - assert_called_once_with(new_instance)) + assert_called_once_with(new_instance, 'fake_id')) delete_mock.assert_called_once_with(instance) @ddt.data(constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS, @@ -5730,7 +5809,7 @@ class ShareManagerTestCase(test.TestCase): {'status': constants.STATUS_INACTIVE})) (helper.cleanup_new_instance.assert_called_once_with(instance_2)) (self.share_manager._reset_read_only_access_rules. - assert_called_once_with(self.context, share, instance_1['id'], + assert_called_once_with(self.context, instance_1['id'], helper=helper, supress_errors=False)) else: @@ -5787,7 +5866,7 @@ class ShareManagerTestCase(test.TestCase): # run self.share_manager._reset_read_only_access_rules( - self.context, share, instance['id'], supress_errors=supress_errors) + self.context, instance['id'], supress_errors=supress_errors) # asserts self.share_manager.db.share_server_get.assert_called_once_with( @@ -5795,14 +5874,15 @@ class ShareManagerTestCase(test.TestCase): self.share_manager.db.share_instance_update.assert_called_once_with( self.context, instance['id'], {'cast_rules_to_readonly': False}) - self.share_manager.db.share_instance_get.assert_called_once_with( - self.context, instance['id'], with_share_data=True) + self.share_manager.db.share_instance_get.assert_has_calls([ + mock.call(self.context, instance['id'], with_share_data=True), + mock.call(self.context, instance['id'], with_share_data=True)]) if supress_errors: (migration_api.ShareMigrationHelper.cleanup_access_rules. - assert_called_once_with(instance, server)) + assert_called_once_with([instance], server, None)) else: (migration_api.ShareMigrationHelper.revert_access_rules. - assert_called_once_with(instance, server)) + assert_called_once_with([instance], server, None)) def test__migration_delete_instance(self): @@ -5844,7 +5924,7 @@ class ShareManagerTestCase(test.TestCase): self.share_manager.db.share_instance_delete.assert_called_once_with( self.context, instance['id']) self.share_manager._check_delete_share_server.assert_called_once_with( - self.context, instance) + self.context, share_instance=instance) (self.share_manager.db.share_snapshot_instance_get_all_with_filters. assert_called_once_with(self.context, {'share_instance_ids': [instance['id']]})) @@ -7525,11 +7605,15 @@ class ShareManagerTestCase(test.TestCase): self.assertFalse(mock_db_delete_call.called) def test_update_access(self): - share_instance = fakes.fake_share_instance() + share_server = fakes.fake_share_server_get() + kwargs = {'share_server_id': share_server['id']} + share_instance = fakes.fake_share_instance(**kwargs) self.mock_object(self.share_manager, '_get_share_server', mock.Mock(return_value='fake_share_server')) self.mock_object(self.share_manager, '_get_share_instance', mock.Mock(return_value=share_instance)) + self.mock_object(self.share_manager.db, 'share_server_get', + mock.Mock(return_value=share_server)) access_rules_update_method = self.mock_object( self.share_manager.access_helper, 'update_access_rules') @@ -7539,7 +7623,7 @@ class ShareManagerTestCase(test.TestCase): self.assertIsNone(retval) access_rules_update_method.assert_called_once_with( self.context, share_instance['id'], - share_server='fake_share_server') + share_server=share_server) @mock.patch('manila.tests.fake_notifier.FakeNotifier._notify') def test_update_share_usage_size(self, mock_notify): @@ -7676,6 +7760,1124 @@ class ShareManagerTestCase(test.TestCase): self.assertDictEqual(expected_metadata, metadata) + @ddt.data( + { + 'compatible': False, + 'writable': True, + 'nondisruptive': True, + 'preserve_snapshots': True, + }, + { + 'compatible': True, + 'writable': False, + 'nondisruptive': True, + 'preserve_snapshots': True, + }, + { + 'compatible': True, + 'writable': True, + 'nondisruptive': False, + 'preserve_snapshots': True, + }, + { + 'compatible': True, + 'writable': True, + 'nondisruptive': True, + 'preserve_snapshots': False, + }, + { + 'compatible': True, + 'writable': True, + 'nondisruptive': True, + 'preserve_snapshots': False, + 'not_preserve_with_instances': True + }, + ) + @ddt.unpack + def test__validate_check_compatibility_result( + self, compatible, writable, nondisruptive, + preserve_snapshots, not_preserve_with_instances=False): + fake_share_network = db_utils.create_share_network() + fake_share_server = db_utils.create_share_server() + fake_share_server_dest = db_utils.create_share_server() + share_instances = [] + snapshot_instances = [ + db_utils.create_snapshot( + with_share=True, status='available')['instance']] + + driver_compatibility = { + 'compatible': compatible, + 'writable': writable, + 'preserve_snapshots': preserve_snapshots, + 'nondisruptive': nondisruptive, + 'share_network_id': fake_share_network['id'], + 'migration_cancel': False, + 'migration_get_progress': False + } + specified_writable = True if not writable else writable + specified_nondisruptive = True if not nondisruptive else nondisruptive + specified_preserve_snapshots = (True if not preserve_snapshots else + preserve_snapshots) + if not preserve_snapshots and not_preserve_with_instances: + specified_preserve_snapshots = False + + self.assertRaises( + exception.ShareServerMigrationFailed, + self.share_manager._validate_check_compatibility_result, + self.context, + fake_share_server['id'], + share_instances, + snapshot_instances, + driver_compatibility, + fake_share_server_dest['host'], + specified_nondisruptive, + specified_writable, + specified_preserve_snapshots, + resource_type='share server' + ) + + @ddt.data( + { + 'kwargs': {'share_instance_ids': ['fakeid1']}, + 'resource_type': 'share_instance' + }, + { + 'kwargs': {'snapshot_instance_ids': ['fakeid1']}, + 'resource_type': 'snapshot_instance' + }, + { + 'kwargs': { + 'snapshot_instance_ids': ['fakeid1'], + 'task_state': constants.TASK_STATE_MIGRATION_STARTING}, + 'resource_type': 'snapshot_instance' + }, + ) + @ddt.unpack + def test__update_resource_status(self, kwargs, resource_type): + if resource_type == 'share_instance': + mock_db_instances_status_update = self.mock_object( + db, 'share_instances_status_update') + else: + mock_db_instances_status_update = self.mock_object( + db, 'share_snapshot_instances_status_update') + + kwargs_relationship = { + 'share_instance': 'share_instance_ids', + 'snapshot_instance': 'snapshot_instance_ids' + } + resource_ids_key = kwargs_relationship.get(resource_type) + resource_ids = kwargs.get(resource_ids_key) + fields = {'status': constants.STATUS_AVAILABLE} + if kwargs.get('task_state'): + fields['task_state'] = kwargs['task_state'] + + self.share_manager._update_resource_status( + self.context, constants.STATUS_AVAILABLE, **kwargs) + + mock_db_instances_status_update.assert_called_once_with( + self.context, resource_ids, fields) + + def _get_share_server_start_update_calls( + self, source_share_server, dest_share_server, driver_failed=False): + migration_in_progress_call = mock.call( + self.context, dest_share_server['id'], + { + 'status': constants.STATUS_SERVER_MIGRATING_TO, + 'task_state': constants.TASK_STATE_MIGRATION_IN_PROGRESS, + 'source_share_server_id': source_share_server['id'] + } + ) + driver_migration_starting_src_call = mock.call( + self.context, source_share_server['id'], + {'task_state': constants.TASK_STATE_MIGRATION_DRIVER_STARTING} + ) + driver_migration_starting_dest_call = mock.call( + self.context, dest_share_server['id'], + {'task_state': constants.TASK_STATE_MIGRATION_DRIVER_STARTING} + ) + driver_migration_src_call = mock.call( + self.context, source_share_server['id'], + {'task_state': constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS} + ) + driver_migration_dest_call = mock.call( + self.context, dest_share_server['id'], + {'task_state': constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS} + ) + driver_migration_dest_error = mock.call( + self.context, dest_share_server['id'], + { + 'task_state': constants.TASK_STATE_MIGRATION_ERROR, + 'status': constants.STATUS_ERROR + } + ) + mock_calls = [ + migration_in_progress_call, driver_migration_starting_src_call, + driver_migration_starting_dest_call] + if driver_failed: + mock_calls.append(driver_migration_dest_error) + else: + mock_calls.append(driver_migration_src_call) + mock_calls.append(driver_migration_dest_call) + return mock_calls + + def _setup_server_migration_start_mocks( + self, fake_share_instances, fake_snap_instances, fake_old_network, + fake_new_network, fake_service, fake_request_spec, + fake_driver_result, fake_new_share_server): + self.mock_object(db, 'share_instances_get_all_by_share_server', + mock.Mock(return_value=fake_share_instances)) + self.mock_object(db, 'share_snapshot_instance_get_all_with_filters', + mock.Mock(return_value=fake_snap_instances)) + self.mock_object(db, 'share_network_get', + mock.Mock(side_effect=[fake_old_network, + fake_new_network])) + self.mock_object(self.share_manager, '_update_resource_status') + self.mock_object(db, 'service_get_by_args', + mock.Mock(return_value=fake_service)) + self.mock_object(api.API, + 'get_share_server_migration_request_spec_dict', + mock.Mock(return_value=fake_request_spec)) + self.mock_object(self.share_manager.driver, + 'share_server_migration_check_compatibility', + mock.Mock(return_value=fake_driver_result)) + self.mock_object(self.share_manager, + '_validate_check_compatibility_result') + self.mock_object(self.share_manager, + '_provide_share_server_for_migration', + mock.Mock(return_value=fake_new_share_server)) + self.mock_object(self.share_manager, + '_cast_access_rules_to_readonly_for_server') + self.mock_object(db, 'share_server_update') + self.mock_object(self.share_manager.driver, + 'share_server_migration_start') + + @ddt.data(True, False) + def test__share_server_migration_start_driver(self, writable): + fake_old_share_server = db_utils.create_share_server() + fake_new_share_server = db_utils.create_share_server() + fake_old_network = db_utils.create_share_network() + fake_new_network = db_utils.create_share_network() + fake_share_instances = [ + db_utils.create_share( + share_server_id=fake_old_share_server['id'], + share_network_id=fake_old_network['id'])['instance']] + fake_share_instance_ids = [ + fake_instance['id'] for fake_instance in fake_share_instances] + fake_snap_instances = [] + fake_service = {'availability_zone_id': 'fake_az_id', + 'availability_zone': {'name': 'fake_az1'}} + fake_request_spec = {} + fake_dest_host = 'fakehost@fakebackend' + nondisruptive = False + preserve_snapshots = True + fake_driver_result = { + 'compatible': True, + 'writable': writable, + 'preserve_snapshots': preserve_snapshots, + 'nondisruptive': nondisruptive, + 'share_network_id': fake_new_network['id'], + 'migration_cancel': False, + 'migration_get_progress': False + } + + self._setup_server_migration_start_mocks( + fake_share_instances, fake_snap_instances, fake_old_network, + fake_new_network, fake_service, fake_request_spec, + fake_driver_result, fake_new_share_server) + + result = self.share_manager._share_server_migration_start_driver( + self.context, fake_old_share_server, fake_dest_host, writable, + nondisruptive, preserve_snapshots, fake_new_network['id'] + ) + + self.assertTrue(result) + db.share_instances_get_all_by_share_server.assert_called_once_with( + self.context, fake_old_share_server['id'], with_share_data=True) + (db.share_snapshot_instance_get_all_with_filters. + assert_called_once_with( + self.context, {'share_instance_ids': fake_share_instance_ids})) + db.share_network_get.assert_has_calls( + [mock.call(self.context, fake_old_network['id']), + mock.call(self.context, fake_new_network['id'])]) + db.service_get_by_args.assert_called_once_with( + self.context, fake_dest_host, 'manila-share') + (api.API.get_share_server_migration_request_spec_dict. + assert_called_once_with( + self.context, fake_share_instances, fake_snap_instances, + availability_zone_id=fake_service['availability_zone_id'], + share_network_id=fake_new_network['id'])) + (self.share_manager.driver.share_server_migration_check_compatibility. + assert_called_once_with( + self.context, fake_old_share_server, fake_dest_host, + fake_old_network, fake_new_network, fake_request_spec)) + (self.share_manager._validate_check_compatibility_result. + assert_called_once_with( + self.context, fake_old_share_server, fake_share_instances, + fake_snap_instances, fake_driver_result, fake_dest_host, + nondisruptive, writable, preserve_snapshots, + resource_type='share server')) + (self.share_manager._provide_share_server_for_migration. + assert_called_once_with( + self.context, fake_old_share_server, fake_new_network['id'], + fake_service['availability_zone_id'], fake_dest_host)) + db.share_server_update.assert_has_calls( + self._get_share_server_start_update_calls( + fake_old_share_server, fake_new_share_server)) + (self.share_manager.driver.share_server_migration_start. + assert_called_once_with( + self.context, fake_old_share_server, fake_new_share_server, + fake_share_instances, fake_snap_instances)) + if not writable: + (self.share_manager._cast_access_rules_to_readonly_for_server. + assert_called_once_with( + self.context, fake_share_instances, fake_old_share_server, + fake_old_share_server['host'])) + else: + (self.share_manager._cast_access_rules_to_readonly_for_server. + assert_not_called()) + + def test__share_server_migration_start_driver_exception(self): + fake_old_share_server = db_utils.create_share_server() + fake_new_share_server = db_utils.create_share_server() + fake_old_network = db_utils.create_share_network() + fake_new_network = db_utils.create_share_network() + fake_share_instances = [ + db_utils.create_share( + share_server_id=fake_old_share_server['id'], + share_network_id=fake_old_network['id'])['instance']] + fake_share_instance_ids = [ + fake_instance['id'] for fake_instance in fake_share_instances] + fake_snap_instances = [] + fake_snap_instance_ids = [] + fake_service = {'availability_zone_id': 'fake_az_id', + 'availability_zone': {'name': 'fake_az1'}} + fake_request_spec = {} + fake_dest_host = 'fakehost@fakebackend' + nondisruptive = False + preserve_snapshots = True + writable = True + fake_driver_result = { + 'compatible': True, + 'writable': writable, + 'preserve_snapshots': preserve_snapshots, + 'nondisruptive': nondisruptive, + 'share_network_id': fake_new_network['id'], + 'migration_cancel': False, + 'migration_get_progress': False + } + + self._setup_server_migration_start_mocks( + fake_share_instances, fake_snap_instances, fake_old_network, + fake_new_network, fake_service, fake_request_spec, + fake_driver_result, fake_new_share_server) + mock__reset_read_only = self.mock_object( + self.share_manager, '_reset_read_only_access_rules_for_server') + + self.share_manager.driver.share_server_migration_start.side_effect = ( + Exception + ) + + self.assertRaises( + exception.ShareServerMigrationFailed, + self.share_manager._share_server_migration_start_driver, + self.context, fake_old_share_server, fake_dest_host, writable, + nondisruptive, preserve_snapshots, fake_new_network['id'] + ) + + db.share_instances_get_all_by_share_server.assert_called_once_with( + self.context, fake_old_share_server['id'], with_share_data=True) + (db.share_snapshot_instance_get_all_with_filters. + assert_called_once_with( + self.context, {'share_instance_ids': fake_share_instance_ids})) + db.share_network_get.assert_has_calls( + [mock.call(self.context, fake_old_network['id']), + mock.call(self.context, fake_new_network['id'])]) + self.share_manager._update_resource_status.assert_has_calls([ + mock.call( + self.context, constants.STATUS_AVAILABLE, + share_instance_ids=fake_share_instance_ids, + snapshot_instance_ids=fake_snap_instance_ids)]) + db.service_get_by_args.assert_called_once_with( + self.context, fake_dest_host, 'manila-share') + (api.API.get_share_server_migration_request_spec_dict. + assert_called_once_with( + self.context, fake_share_instances, fake_snap_instances, + availability_zone_id=fake_service['availability_zone_id'], + share_network_id=fake_new_network['id'])) + (self.share_manager.driver.share_server_migration_check_compatibility. + assert_called_once_with( + self.context, fake_old_share_server, fake_dest_host, + fake_old_network, fake_new_network, fake_request_spec)) + (self.share_manager._validate_check_compatibility_result. + assert_called_once_with( + self.context, fake_old_share_server, fake_share_instances, + fake_snap_instances, fake_driver_result, fake_dest_host, + nondisruptive, writable, preserve_snapshots, + resource_type='share server')) + (self.share_manager._provide_share_server_for_migration. + assert_called_once_with( + self.context, fake_old_share_server, fake_new_network['id'], + fake_service['availability_zone_id'], fake_dest_host)) + db.share_server_update.assert_has_calls( + self._get_share_server_start_update_calls( + fake_old_share_server, fake_new_share_server, + driver_failed=True)) + (self.share_manager.driver.share_server_migration_start. + assert_called_once_with( + self.context, fake_old_share_server, fake_new_share_server, + fake_share_instances, fake_snap_instances)) + mock__reset_read_only.assert_called_once_with( + self.context, fake_share_instances, fake_old_share_server, + dest_host=fake_old_share_server['host'] + ) + + if not writable: + (self.share_manager._cast_access_rules_to_readonly_for_server. + assert_called_once_with( + self.context, fake_share_instances, fake_old_share_server, + fake_old_share_server['host'])) + else: + (self.share_manager._cast_access_rules_to_readonly_for_server. + assert_not_called()) + + @ddt.data(None, exception.ShareServerMigrationFailed) + def test_share_server_migration_check(self, check_action): + fake_share_server = db_utils.create_share_server() + fake_old_network = db_utils.create_share_network() + fake_new_network = db_utils.create_share_network() + fake_dest_host = 'fakehost@fakebackend' + fake_share_instances = [ + db_utils.create_share( + share_network_id=fake_old_network['id'])['instance']] + fake_share_instance_ids = [ + fake_instance['id'] for fake_instance in fake_share_instances] + fake_snap_instances = [] + fake_service = {'availability_zone_id': 'fake_az_id', + 'availability_zone': {'name': 'fake_az1'}} + fake_request_spec = {} + nondisruptive = False + writable = True + preserve_snapshots = True + fake_driver_result = { + 'compatible': True, + 'writable': writable, + 'preserve_snapshots': preserve_snapshots, + 'nondisruptive': nondisruptive, + 'share_network_id': fake_new_network['id'], + 'migration_cancel': False, + 'migration_get_progress': False + } + + mock_server_get = self.mock_object( + db, 'share_server_get', mock.Mock(return_value=fake_share_server)) + mock_get_server_instances = self.mock_object( + db, 'share_instances_get_all_by_share_server', + mock.Mock(return_value=fake_share_instances)) + mock_snap_instances_get = self.mock_object( + db, 'share_snapshot_instance_get_all_with_filters', + mock.Mock(return_value=fake_snap_instances)) + mock_sn_get = self.mock_object( + db, 'share_network_get', + mock.Mock(side_effect=[fake_old_network, fake_new_network])) + mock_service_get = self.mock_object( + db, 'service_get_by_args', mock.Mock(return_value=fake_service)) + mock_get_req_spec = self.mock_object( + api.API, 'get_share_server_migration_request_spec_dict', + mock.Mock(return_value=fake_request_spec)) + mock_driver_check = self.mock_object( + self.share_manager.driver, + 'share_server_migration_check_compatibility', + mock.Mock(return_value=fake_driver_result)) + mock__validate_check_compatibility = self.mock_object( + self.share_manager, '_validate_check_compatibility_result') + if isinstance(check_action, exception.ShareServerMigrationFailed): + mock__validate_check_compatibility.side_effect = ( + exception.ShareServerMigrationFailed) + fake_driver_result['compatible'] = False + + result = self.share_manager.share_server_migration_check( + self.context, fake_share_server['id'], fake_dest_host, True, False, + True, fake_new_network['id'] + ) + + self.assertEqual(fake_driver_result, result) + mock_server_get.assert_called_once_with( + self.context, fake_share_server['id']) + mock_get_server_instances.assert_called_once_with( + self.context, fake_share_server['id'], with_share_data=True + ) + mock_snap_instances_get.assert_called_once_with( + self.context, {'share_instance_ids': fake_share_instance_ids} + ) + mock_sn_get.assert_has_calls( + [mock.call(self.context, fake_old_network['id']), + mock.call(self.context, fake_new_network['id'])] + ) + mock_service_get.assert_called_once_with( + self.context, fake_dest_host, 'manila-share' + ) + mock_get_req_spec.assert_called_once_with( + self.context, fake_share_instances, fake_snap_instances, + availability_zone_id=fake_service['availability_zone_id'], + share_network_id=fake_new_network['id'] + ) + mock_driver_check.assert_called_once_with( + self.context, fake_share_server, fake_dest_host, fake_old_network, + fake_new_network, fake_request_spec + ) + mock__validate_check_compatibility.assert_called_once_with( + self.context, fake_share_server, fake_share_instances, + fake_snap_instances, fake_driver_result, fake_dest_host, + nondisruptive, writable, preserve_snapshots, + resource_type='share server' + ) + + def test_share_server_migration_check_dhss_false(self): + self.mock_object(self.share_manager, 'driver') + self.share_manager.driver.driver_handles_share_servers = False + expected = { + 'compatible': False, + 'writable': None, + 'preserve_snapshots': None, + 'nondisruptive': None, + 'share_network_id': 'new_share_network_id', + 'migration_cancel': None, + 'migration_get_progress': None + } + + result = self.share_manager.share_server_migration_check( + self.context, 'fake_share_server_id', 'fake_dest_host', + False, False, False, 'new_share_network_id' + ) + + self.assertEqual(expected, result) + + def test_share_server_migration_start(self): + fake_share_server = db_utils.create_share_server() + fake_share_network = db_utils.create_share_server() + fake_dest_host = 'fakehost@fakebackend' + writable = True + nondisruptive = True + preserve_snapshots = True + + mock_server_update = self.mock_object(db, 'share_server_update') + mock_server_get = self.mock_object( + db, 'share_server_get', mock.Mock(return_value=fake_share_server)) + mock__server_migration_start_driver = self.mock_object( + self.share_manager, '_share_server_migration_start_driver') + + self.share_manager.share_server_migration_start( + self.context, fake_share_server['id'], fake_dest_host, writable, + nondisruptive, preserve_snapshots, fake_share_network['id'] + ) + + mock_server_update.assert_called_once_with( + self.context, fake_share_server['id'], + {'task_state': constants.TASK_STATE_MIGRATION_IN_PROGRESS} + ) + mock_server_get.assert_called_once_with( + self.context, fake_share_server['id'] + ) + mock__server_migration_start_driver.assert_called_once_with( + self.context, fake_share_server, fake_dest_host, writable, + nondisruptive, preserve_snapshots, fake_share_network['id'] + ) + + @ddt.data(True, False) + def test_share_server_migration_start_exception(self, dhss): + fake_share_server = db_utils.create_share_server() + fake_share_network = db_utils.create_share_server() + fake_dest_host = 'fakehost@fakebackend' + writable = True + nondisruptive = True + preserve_snapshots = True + self.mock_object(self.share_manager, 'driver') + self.share_manager.driver.driver_handles_share_servers = dhss + + mock_server_update = self.mock_object(db, 'share_server_update') + mock_server_get = self.mock_object( + db, 'share_server_get', mock.Mock(return_value=fake_share_server)) + mock__server_migration_start_driver = self.mock_object( + self.share_manager, '_share_server_migration_start_driver', + mock.Mock(side_effect=exception.ShareServerMigrationFailed)) + + self.share_manager.share_server_migration_start( + self.context, fake_share_server['id'], fake_dest_host, writable, + nondisruptive, preserve_snapshots, fake_share_network['id'] + ) + + mock_server_update.assert_has_calls([ + mock.call( + self.context, fake_share_server['id'], + {'task_state': constants.TASK_STATE_MIGRATION_IN_PROGRESS}), + mock.call( + self.context, fake_share_server['id'], + {'task_state': constants.TASK_STATE_MIGRATION_ERROR, + 'status': constants.STATUS_ACTIVE} + ) + ]) + mock_server_get.assert_called_once_with( + self.context, fake_share_server['id'] + ) + if dhss: + mock__server_migration_start_driver.assert_called_once_with( + self.context, fake_share_server, fake_dest_host, writable, + nondisruptive, preserve_snapshots, fake_share_network['id'] + ) + + def _setup_migration_continue_mocks( + self, fake_share_servers, fake_share_instances, + fake_snapshot_instances): + self.mock_object( + db, 'share_server_get_all_by_host', + mock.Mock(return_value=fake_share_servers)) + self.mock_object( + db, 'share_instances_get_all_by_share_server', + mock.Mock(return_value=fake_share_instances)) + self.mock_object( + db, 'share_snapshot_instance_get_all_with_filters', + mock.Mock(return_value=fake_snapshot_instances)) + + @ddt.data(True, False) + def test_share_server_migration_continue(self, finished): + fake_src_share_servers = [ + db_utils.create_share_server( + status=constants.STATUS_SERVER_MIGRATING, + task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS)] + fake_dest_share_servers = [ + db_utils.create_share_server( + source_share_server_id=fake_src_share_servers[0]['id'], + status=constants.STATUS_SERVER_MIGRATING_TO, + task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS + )] + fake_share_instances = [db_utils.create_share()['instance']] + fake_share_instance_ids = [ + instance['id'] for instance in fake_share_instances] + fake_cancelled_share_server = db_utils.create_share_server() + fake_snapshot_instances = [] + server_update_calls = [ + mock.call( + self.context, fake_src_share_servers[0]['id'], + { + 'task_state': + constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE}), + mock.call( + self.context, fake_dest_share_servers[0]['id'], + { + 'task_state': + constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE + }) + ] + + self._setup_migration_continue_mocks( + fake_dest_share_servers, fake_share_instances, + fake_snapshot_instances) + self.mock_object(db, 'share_server_get', + mock.Mock(side_effect=[fake_src_share_servers[0], + fake_cancelled_share_server])) + self.mock_object( + self.share_manager.driver, 'share_server_migration_continue', + mock.Mock(return_value=finished)) + self.mock_object(db, 'share_server_update') + + self.share_manager.share_server_migration_driver_continue( + self.context) + + db.share_server_get_all_by_host.assert_called_once_with( + self.context, self.share_manager.host, + filters={'status': constants.STATUS_SERVER_MIGRATING_TO} + ) + db.share_instances_get_all_by_share_server.assert_called_once_with( + self.context, fake_src_share_servers[0]['id'], + with_share_data=True + ) + (db.share_snapshot_instance_get_all_with_filters. + assert_called_once_with( + self.context, {'share_instance_ids': fake_share_instance_ids})) + (self.share_manager.driver.share_server_migration_continue. + assert_called_once_with( + self.context, fake_src_share_servers[0], + fake_dest_share_servers[0], + fake_share_instances, fake_snapshot_instances)) + if finished: + db.share_server_update.assert_has_calls(server_update_calls) + db.share_server_get.assert_called_once_with( + self.context, fake_src_share_servers[0]['id'] + ) + else: + db.share_server_get.assert_has_calls([ + mock.call(self.context, fake_src_share_servers[0]['id']), + mock.call(self.context, fake_src_share_servers[0]['id']), + ]) + + @ddt.data( + { + 'src_share_server_exists': False, + 'action_migration_continue': { + 'return_value': True + } + }, + { + 'src_share_server_exists': True, + 'action_migration_continue': { + 'side_effect': Exception + } + } + ) + @ddt.unpack + def test_share_server_migration_continue_exception( + self, src_share_server_exists, action_migration_continue): + fake_src_share_server = db_utils.create_share_server( + status=constants.STATUS_SERVER_MIGRATING, + task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS) + fake_dest_share_servers = [ + db_utils.create_share_server( + source_share_server_id=fake_src_share_server['id'], + status=constants.STATUS_SERVER_MIGRATING_TO, + task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS)] + fake_share_instances = [db_utils.create_share()['instance']] + fake_share_instance_ids = [ + instance['id'] for instance in fake_share_instances] + fake_snapshot_instances = [] + fake_snapshot_instance_ids = [] + + server_update_calls = [mock.call( + self.context, fake_dest_share_servers[0]['id'], + {'task_state': constants.TASK_STATE_MIGRATION_ERROR, + 'status': constants.STATUS_ERROR} + )] + if src_share_server_exists: + self.mock_object(db, 'share_server_get', + mock.Mock(return_value=fake_src_share_server)) + server_update_calls.append( + mock.call( + self.context, fake_src_share_server['id'], + { + 'task_state': constants.TASK_STATE_MIGRATION_ERROR, + 'status': constants.STATUS_ACTIVE + })) + else: + self.mock_object(db, 'share_server_get', + mock.Mock(return_value=None)) + + self._setup_migration_continue_mocks( + fake_dest_share_servers, fake_share_instances, + fake_snapshot_instances) + mock_server_update = self.mock_object(db, 'share_server_update') + self.mock_object( + self.share_manager.driver, 'share_server_migration_continue', + mock.Mock(**action_migration_continue) + ) + mock__update_resource_status = self.mock_object( + self.share_manager, '_update_resource_status') + mock__rest_read_only_access_rules = self.mock_object( + self.share_manager, '_reset_read_only_access_rules_for_server' + ) + + self.share_manager.share_server_migration_driver_continue( + self.context) + + db.share_server_get_all_by_host.assert_called_once_with( + self.context, self.share_manager.host, + filters={'status': constants.STATUS_SERVER_MIGRATING_TO}) + db.share_server_get.assert_called_once_with( + self.context, fake_src_share_server['id']) + if src_share_server_exists: + db.share_instances_get_all_by_share_server.assert_called_once_with( + self.context, fake_src_share_server['id'], + with_share_data=True) + (db.share_snapshot_instance_get_all_with_filters. + assert_called_once_with( + self.context, + {'share_instance_ids': fake_share_instance_ids})) + mock__update_resource_status.assert_called_once_with( + self.context, constants.STATUS_AVAILABLE, + share_instance_ids=fake_share_instance_ids, + snapshot_instance_ids=fake_snapshot_instance_ids + ) + mock__rest_read_only_access_rules.assert_called_once_with( + self.context, fake_share_instances, fake_src_share_server, + dest_host=fake_src_share_server['host'] + ) + mock_server_update.assert_has_calls(server_update_calls) + + def _setup_server_migration_complete_mocks( + self, fake_source_share_server, fake_dest_share_server, + fake_share_instances, fake_snapshot_instances): + self.mock_object( + db, 'share_server_get', + mock.Mock(side_effect=[fake_dest_share_server, + fake_source_share_server])) + self.mock_object( + db, 'share_instances_get_all_by_share_server', + mock.Mock(return_value=fake_share_instances)) + self.mock_object( + db, 'share_snapshot_instance_get_all_with_filters', + mock.Mock(return_value=fake_snapshot_instances)) + self.mock_object( + self.share_manager, '_update_resource_status') + self.mock_object(db, 'share_server_update') + + def test_share_server_migration_complete_exception(self): + fake_source_share_server = db_utils.create_share_server() + fake_dest_share_server = db_utils.create_share_server() + fake_share_instances = [db_utils.create_share()['instance']] + fake_share_instance_ids = [ + instance['id'] for instance in fake_share_instances] + fake_snapshot_instances = [] + fake_snapshot_instance_ids = [] + + self._setup_server_migration_complete_mocks( + fake_source_share_server, fake_dest_share_server, + fake_share_instances, fake_snapshot_instances + ) + mock__server_migration_complete = self.mock_object( + self.share_manager, '_server_migration_complete_driver', + mock.Mock(side_effect=Exception)) + + self.assertRaises( + exception.ShareServerMigrationFailed, + self.share_manager.share_server_migration_complete, + self.context, fake_source_share_server['id'], + fake_dest_share_server['id'] + ) + db.share_server_get.assert_has_calls( + [mock.call(self.context, fake_dest_share_server['id']), + mock.call(self.context, fake_source_share_server['id'])] + ) + db.share_instances_get_all_by_share_server.assert_called_once_with( + self.context, fake_source_share_server['id'], with_share_data=True) + (db.share_snapshot_instance_get_all_with_filters. + assert_called_once_with( + self.context, {'share_instance_ids': fake_share_instance_ids})) + mock__server_migration_complete.assert_called_once_with( + self.context, fake_source_share_server, fake_share_instances, + fake_snapshot_instances, fake_dest_share_server) + self.share_manager._update_resource_status.assert_called_once_with( + self.context, constants.STATUS_ERROR, + share_instance_ids=fake_share_instance_ids, + snapshot_instance_ids=fake_snapshot_instance_ids) + db.share_server_update.assert_has_calls([ + mock.call(self.context, fake_source_share_server['id'], + {'task_state': constants.TASK_STATE_MIGRATION_ERROR, + 'status': constants.STATUS_ERROR}), + mock.call( + self.context, fake_dest_share_server['id'], + {'task_state': constants.TASK_STATE_MIGRATION_ERROR, + 'status': constants.STATUS_ERROR})]) + + def test_share_server_migration_complete(self): + fake_source_share_server = db_utils.create_share_server() + fake_dest_share_server = db_utils.create_share_server() + fake_share_instances = [db_utils.create_share()['instance']] + fake_share_instance_ids = [ + instance['id'] for instance in fake_share_instances] + fake_snapshot_instances = [] + fake_snapshot_instance_ids = [] + + self._setup_server_migration_complete_mocks( + fake_source_share_server, fake_dest_share_server, + fake_share_instances, fake_snapshot_instances + ) + mock__server_migration_complete = self.mock_object( + self.share_manager, '_server_migration_complete_driver') + + self.share_manager.share_server_migration_complete( + self.context, fake_source_share_server['id'], + fake_dest_share_server['id']) + db.share_server_get.assert_has_calls( + [mock.call(self.context, fake_dest_share_server['id']), + mock.call(self.context, fake_source_share_server['id'])] + ) + db.share_instances_get_all_by_share_server.assert_called_once_with( + self.context, fake_source_share_server['id'], with_share_data=True) + (db.share_snapshot_instance_get_all_with_filters. + assert_called_once_with( + self.context, {'share_instance_ids': fake_share_instance_ids})) + mock__server_migration_complete.assert_called_once_with( + self.context, fake_source_share_server, fake_share_instances, + fake_snapshot_instances, fake_dest_share_server) + self.share_manager._update_resource_status.assert_called_once_with( + self.context, constants.STATUS_AVAILABLE, + share_instance_ids=fake_share_instance_ids, + snapshot_instance_ids=fake_snapshot_instance_ids) + db.share_server_update.assert_called_once_with( + self.context, fake_dest_share_server['id'], + {'task_state': constants.TASK_STATE_MIGRATION_SUCCESS, + 'status': constants.STATUS_ACTIVE}) + + @ddt.data( + {'unmanage_source_server': False, + 'snapshot_updates': {}, + 'share_updates': {}}, + {'unmanage_source_server': True, + 'snapshot_updates': {}, + 'share_updates': {}}, + ) + def test__server_migration_complete_driver(self, model_update): + fake_share_network = db_utils.create_share_network() + fake_share_network_subnet = db_utils.create_share_network_subnet( + share_network_id=fake_share_network['id']) + fake_source_share_server = db_utils.create_share_server() + fake_dest_share_server = db_utils.create_share_server( + share_network_subnet_id=fake_share_network_subnet['id']) + fake_share = db_utils.create_share() + fake_snapshot = db_utils.create_snapshot(share_id=fake_share['id']) + fake_service = {'availability_zone_id': 'fake_az_id', + 'availability_zone': {'name': 'fake_az1'}} + fake_share_instances = [fake_share['instance']] + fake_snapshot_instances = [fake_snapshot['instance']] + fake_share_instance_id = fake_share['instance']['id'] + fake_allocation_data = {} + model_update['share_updates'][fake_share['instance']['id']] = { + 'export_locations': { + "path": "10.10.10.31:/fake_mount_point", + "metadata": { + "preferred": True, + }, + "is_admin_only": False, + }, + 'pool_name': 'fakepool' + } + snapshot_el_update = { + "path": "10.10.10.31:/fake_snap_mount_point", + "is_admin_only": False, + } + model_update['snapshot_updates'][fake_snapshot['instance']['id']] = { + 'export_locations': [snapshot_el_update] + } + fake_instance_update = { + 'share_server_id': fake_dest_share_server['id'], + 'host': fake_dest_share_server['host'] + '#fakepool', + 'share_network_id': fake_share_network['id'], + 'availability_zone_id': fake_service['availability_zone_id'], + } + + mock_server_update = self.mock_object(db, 'share_server_update') + mock_network_get = self.mock_object( + db, 'share_network_get', mock.Mock(return_vale=fake_share_network)) + mock_subnet_get = self.mock_object( + db, 'share_network_subnet_get', + mock.Mock(return_value=fake_share_network_subnet)) + self.mock_object( + self.share_manager, '_form_server_setup_info', + mock.Mock(return_value=fake_allocation_data)) + mock_server_migration_complete = self.mock_object( + self.share_manager.driver, 'share_server_migration_complete', + mock.Mock(return_value=model_update)) + mock_service_get_by_args = self.mock_object( + db, 'service_get_by_args', mock.Mock(return_value=fake_service)) + mock_instance_update = self.mock_object(db, 'share_instance_update') + mock_el_update = self.mock_object(db, 'share_export_locations_update') + mock_snap_el_update = self.mock_object( + db, 'share_snapshot_instance_export_locations_update') + mock_reset_access_rules = self.mock_object( + self.share_manager, '_reset_read_only_access_rules_for_server') + mock_unmanage_server = self.mock_object( + rpcapi.ShareAPI, 'unmanage_share_server') + mock_check_delete_server = self.mock_object( + self.share_manager, '_check_delete_share_server') + + self.share_manager._server_migration_complete_driver( + self.context, fake_source_share_server, fake_share_instances, + fake_snapshot_instances, fake_dest_share_server) + + mock_server_update.assert_has_calls( + [mock.call( + self.context, fake_source_share_server['id'], + {'task_state': constants.TASK_STATE_MIGRATION_COMPLETING}), + mock.call( + self.context, fake_dest_share_server['id'], + {'task_state': constants.TASK_STATE_MIGRATION_COMPLETING}), + mock.call( + self.context, fake_source_share_server['id'], + {'task_state': constants.TASK_STATE_MIGRATION_SUCCESS, + 'status': constants.STATUS_INACTIVE})]) + mock_network_get.assert_called_once_with( + self.context, fake_share_network['id']) + mock_subnet_get.assert_called_once_with( + self.context, fake_share_network_subnet['id']) + mock_server_migration_complete.assert_called_once_with( + self.context, fake_source_share_server, fake_dest_share_server, + fake_share_instances, fake_snapshot_instances, fake_allocation_data + ) + mock_service_get_by_args.assert_called_once_with( + self.context, fake_dest_share_server['host'], 'manila-share') + mock_instance_update.assert_called_once_with( + self.context, fake_share_instance_id, fake_instance_update) + mock_el_update.assert_called_once_with( + self.context, fake_share_instance_id, + model_update['share_updates'][fake_share_instance_id][ + 'export_locations']) + mock_snap_el_update.assert_called_once_with( + self.context, fake_snapshot['instance']['id'], [snapshot_el_update] + ) + mock_reset_access_rules.assert_called_once_with( + self.context, fake_share_instances, fake_source_share_server, + dest_host=fake_source_share_server['host']) + if model_update.get('unmanage_share_server') is True: + mock_unmanage_server.assert_called_once_with( + self.context, fake_source_share_server) + else: + mock_check_delete_server.assert_called_once_with( + self.context, share_server=fake_source_share_server, + remote_host=True + ) + + @ddt.data(constants.TASK_STATE_MIGRATION_SUCCESS, + constants.TASK_STATE_MIGRATION_IN_PROGRESS) + def test_server_migration_cancel_exception(self, task_state): + fake_source_share_server = db_utils.create_share_server( + task_state=task_state) + fake_dest_share_server = db_utils.create_share_server() + + mock_server_get = self.mock_object( + db, 'share_server_get', + mock.Mock(side_effect=[fake_source_share_server, + fake_dest_share_server])) + + self.assertRaises( + exception.InvalidShareServer, + self.share_manager.share_server_migration_cancel, + self.context, fake_source_share_server['id'], + fake_dest_share_server['id'] + ) + + mock_server_get.assert_has_calls([ + mock.call(self.context, fake_source_share_server['id']), + mock.call(self.context, fake_dest_share_server['id'])]) + + @ddt.data( + constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE, + constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS) + def test_share_server_migration_cancel(self, task_state): + fake_source_share_server = db_utils.create_share_server( + task_state=task_state) + fake_dest_share_server = db_utils.create_share_server() + fake_share = db_utils.create_share() + fake_share_instances = [fake_share['instance']] + fake_share_instance_ids = [fake_share['instance']['id']] + fake_snapshot = db_utils.create_snapshot(share_id=fake_share['id']) + fake_snapshot_instances = [fake_snapshot['instance']] + fake_snapshot_instance_ids = [fake_snapshot['instance']['id']] + + mock_server_get = self.mock_object( + db, 'share_server_get', + mock.Mock(side_effect=[fake_source_share_server, + fake_dest_share_server])) + mock_get_instances = self.mock_object( + db, 'share_instances_get_all_by_share_server', + mock.Mock(return_value=fake_share_instances)) + mock_get_snap_instances = self.mock_object( + db, 'share_snapshot_instance_get_all_with_filters', + mock.Mock(return_value=fake_snapshot_instances)) + mock_migration_cancel = self.mock_object( + self.share_manager.driver, 'share_server_migration_cancel') + mock_server_update = self.mock_object(db, 'share_server_update') + mock_check_delete_server = self.mock_object( + self.share_manager, '_check_delete_share_server') + mock_update_resource = self.mock_object( + self.share_manager, '_update_resource_status') + mock_reset_read_only_rules = self.mock_object( + self.share_manager, '_reset_read_only_access_rules_for_server') + + self.share_manager.share_server_migration_cancel( + self.context, fake_source_share_server['id'], + fake_dest_share_server['id']) + + mock_server_get.assert_has_calls([ + mock.call(self.context, fake_source_share_server['id']), + mock.call(self.context, fake_dest_share_server['id'])]) + mock_get_instances.assert_called_once_with( + self.context, fake_source_share_server['id'], with_share_data=True) + mock_get_snap_instances.assert_called_once_with( + self.context, {'share_instance_ids': fake_share_instance_ids}) + mock_migration_cancel.assert_called_once_with( + self.context, fake_source_share_server, fake_dest_share_server, + fake_share_instances, fake_snapshot_instances) + mock_server_update.assert_has_calls([ + mock.call( + self.context, fake_dest_share_server['id'], + {'task_state': constants.TASK_STATE_MIGRATION_CANCELLED, + 'status': constants.STATUS_INACTIVE} + ), + mock.call( + self.context, fake_source_share_server['id'], + {'task_state': constants.TASK_STATE_MIGRATION_CANCELLED, + 'status': constants.STATUS_ACTIVE} + ) + ]) + mock_check_delete_server.assert_called_once_with( + self.context, share_server=fake_dest_share_server) + mock_update_resource.assert_called_once_with( + self.context, constants.STATUS_AVAILABLE, + share_instance_ids=fake_share_instance_ids, + snapshot_instance_ids=fake_snapshot_instance_ids) + mock_reset_read_only_rules.assert_called_once_with( + self.context, fake_share_instances, fake_source_share_server, + dest_host=fake_source_share_server['host']) + + @ddt.data( + constants.TASK_STATE_MIGRATION_STARTING, + constants.TASK_STATE_MIGRATION_CANCELLED, + ) + def test_migration_get_progress_exception(self, task_state): + fake_source_share_server = db_utils.create_share_server( + task_state=task_state) + fake_dest_share_server = db_utils.create_share_server() + + self.mock_object( + db, 'share_server_get', + mock.Mock(side_effect=[fake_source_share_server, + fake_dest_share_server])) + + self.assertRaises( + exception.InvalidShareServer, + self.share_manager.share_server_migration_cancel, + self.context, fake_source_share_server['id'], + fake_dest_share_server['id'] + ) + + def test_share_server_migration_get_progress(self): + fake_source_share_server = db_utils.create_share_server( + task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS) + fake_dest_share_server = db_utils.create_share_server() + fake_progress = {"total_progress": 75} + fake_share = db_utils.create_share() + fake_share_instances = [fake_share['instance']] + fake_share_instance_ids = [fake_share['instance']['id']] + fake_snapshot = db_utils.create_snapshot(share_id=fake_share['id']) + fake_snapshot_instances = [fake_snapshot['instance']] + + mock_server_get = self.mock_object( + db, 'share_server_get', + mock.Mock(side_effect=[fake_source_share_server, + fake_dest_share_server])) + mock_get_instances = self.mock_object( + db, 'share_instances_get_all_by_share_server', + mock.Mock(return_value=fake_share_instances)) + mock_get_snap_instances = self.mock_object( + db, 'share_snapshot_instance_get_all_with_filters', + mock.Mock(return_value=fake_snapshot_instances)) + mock_migration_get_progress = self.mock_object( + self.share_manager.driver, 'share_server_migration_get_progress', + mock.Mock(return_value=fake_progress)) + + self.share_manager.share_server_migration_get_progress( + self.context, fake_source_share_server['id'], + fake_dest_share_server['id']) + + mock_get_instances.assert_called_once_with( + self.context, fake_source_share_server['id'], with_share_data=True) + mock_get_snap_instances.assert_called_once_with( + self.context, {'share_instance_ids': fake_share_instance_ids}) + mock_server_get.assert_has_calls([ + mock.call(self.context, fake_source_share_server['id']), + mock.call(self.context, fake_dest_share_server['id'])]) + mock_migration_get_progress.assert_called_once_with( + self.context, fake_source_share_server, fake_dest_share_server, + fake_share_instances, fake_snapshot_instances) + @ddt.ddt class HookWrapperTestCase(test.TestCase): diff --git a/manila/tests/share/test_migration.py b/manila/tests/share/test_migration.py index a5269c919c..a3b3018364 100644 --- a/manila/tests/share/test_migration.py +++ b/manila/tests/share/test_migration.py @@ -25,6 +25,7 @@ from manila import exception from manila.share import access as access_helper from manila.share import api as share_api from manila.share import migration +from manila.share import rpcapi as share_rpcapi from manila import test from manila.tests import db_utils from manila import utils @@ -43,7 +44,7 @@ class ShareMigrationHelperTestCase(test.TestCase): self.access_helper = access_helper.ShareInstanceAccess(db, None) self.context = context.get_admin_context() self.helper = migration.ShareMigrationHelper( - self.context, db, self.share, self.access_helper) + self.context, db, self.access_helper) def test_delete_instance_and_wait(self): @@ -250,10 +251,12 @@ class ShareMigrationHelperTestCase(test.TestCase): # asserts db.share_server_get.assert_called_with(self.context, 'fake_server_id') - def test_revert_access_rules(self): + @ddt.data(None, 'fakehost@fakebackend') + def test_revert_access_rules(self, dest_host): share_instance = db_utils.create_share_instance( share_id=self.share['id'], status=constants.STATUS_AVAILABLE) + share_instance_ids = [instance['id'] for instance in [share_instance]] access = db_utils.create_access(share_id=self.share['id'], access_to='fake_ip', @@ -266,16 +269,23 @@ class ShareMigrationHelperTestCase(test.TestCase): get_and_update_call = self.mock_object( self.access_helper, 'get_and_update_share_instance_access_rules', mock.Mock(return_value=[access])) + mock_update_access_for_instances = self.mock_object( + share_rpcapi.ShareAPI, 'update_access_for_instances') # run - self.helper.revert_access_rules(share_instance, server) + self.helper.revert_access_rules([share_instance], server, + dest_host=dest_host) # asserts get_and_update_call.assert_called_once_with( self.context, share_instance_id=share_instance['id'], updates={'state': constants.ACCESS_STATE_QUEUED_TO_APPLY}) - self.access_helper.update_access_rules.assert_called_once_with( - self.context, share_instance['id'], share_server=server) + if dest_host: + mock_update_access_for_instances.assert_called_once_with( + self.context, dest_host, share_instance_ids, server) + else: + self.access_helper.update_access_rules.assert_called_once_with( + self.context, share_instance['id'], share_server=server) @ddt.data(True, False) def test_apply_new_access_rules_there_are_rules(self, prior_rules): @@ -297,7 +307,8 @@ class ShareMigrationHelperTestCase(test.TestCase): self.mock_object(utils, 'wait_for_access_update') # run - self.helper.apply_new_access_rules(new_share_instance) + self.helper.apply_new_access_rules(new_share_instance, + self.share['id']) # asserts db.share_instance_access_copy.assert_called_once_with( @@ -346,7 +357,7 @@ class ShareMigrationHelperTestCase(test.TestCase): # asserts self.helper.revert_access_rules.assert_called_once_with( - self.share_instance, server) + self.share_instance, server, None) if exc: self.assertEqual(1, migration.LOG.warning.call_count) diff --git a/manila/tests/share/test_rpcapi.py b/manila/tests/share/test_rpcapi.py index 85bd0641d1..9a73378077 100644 --- a/manila/tests/share/test_rpcapi.py +++ b/manila/tests/share/test_rpcapi.py @@ -44,10 +44,10 @@ class ShareRpcAPITestCase(test.TestCase): share_id='fake_share_id', host='fake_host', ) - share_server = db_utils.create_share_server() share_group = {'id': 'fake_share_group_id', 'host': 'fake_host'} share_group_snapshot = {'id': 'fake_share_group_id'} host = 'fake_host' + share_server = db_utils.create_share_server(host=host) self.fake_share = jsonutils.to_primitive(share) # mock out the getattr on the share db model object since jsonutils # doesn't know about those extra attributes to pull in @@ -115,11 +115,25 @@ class ShareRpcAPITestCase(test.TestCase): if 'snapshot_instance' in expected_msg: snapshot_instance = expected_msg.pop('snapshot_instance', None) expected_msg['snapshot_instance_id'] = snapshot_instance['id'] + share_server_id_methods = [ + 'manage_share_server', 'unmanage_share_server', + 'share_server_migration_start', 'share_server_migration_check'] + src_dest_share_server_methods = [ + 'share_server_migration_cancel', + 'share_server_migration_get_progress', + 'share_server_migration_complete'] if ('share_server' in expected_msg - and (method == 'manage_share_server') - or method == 'unmanage_share_server'): + and method in share_server_id_methods): share_server = expected_msg.pop('share_server', None) expected_msg['share_server_id'] = share_server['id'] + if ('share_server' in expected_msg + and method in src_dest_share_server_methods): + share_server = expected_msg.pop('share_server', None) + expected_msg['src_share_server_id'] = share_server['id'] + if ('dest_share_server' in expected_msg + and method in src_dest_share_server_methods): + share_server = expected_msg.pop('dest_share_server', None) + expected_msg['dest_share_server_id'] = share_server['id'] if 'host' in kwargs: host = kwargs['host'] @@ -388,3 +402,58 @@ class ShareRpcAPITestCase(test.TestCase): version='1.17', snapshot_instance=self.fake_snapshot[ 'share_instance']) + + def test_share_server_migration_start(self): + self._test_share_api('share_server_migration_start', + rpc_method='cast', + version='1.21', + share_server=self.fake_share_server, + dest_host=self.fake_host, + writable=True, + nondisruptive=False, + preserve_snapshots=True, + new_share_network_id='fake_share_network_id') + + def test_share_server_migration_check(self): + self._test_share_api('share_server_migration_check', + rpc_method='call', + version='1.21', + share_server_id=self.fake_share_server['id'], + dest_host=self.fake_host, + writable=True, + nondisruptive=False, + preserve_snapshots=True, + new_share_network_id='fake_net_id') + + def test_share_server_migration_cancel(self): + self._test_share_api('share_server_migration_cancel', + rpc_method='cast', + version='1.21', + dest_host=self.fake_host, + share_server=self.fake_share_server, + dest_share_server=self.fake_share_server) + + def test_share_server_migration_get_progress(self): + self._test_share_api('share_server_migration_get_progress', + rpc_method='call', + version='1.21', + dest_host=self.fake_host, + share_server=self.fake_share_server, + dest_share_server=self.fake_share_server) + + def test_share_server_migration_complete(self): + self._test_share_api('share_server_migration_complete', + rpc_method='cast', + version='1.21', + dest_host=self.fake_host, + share_server=self.fake_share_server, + dest_share_server=self.fake_share_server) + + def test_update_access_for_share_instances(self): + self._test_share_api( + 'update_access_for_instances', + rpc_method='cast', + version='1.21', + dest_host=self.fake_host, + share_instance_ids=[self.fake_share['instance']['id']], + share_server_id=self.fake_share_server['id']) diff --git a/releasenotes/notes/add-share-server-migration-51deb30212859277.yaml b/releasenotes/notes/add-share-server-migration-51deb30212859277.yaml new file mode 100644 index 0000000000..72405b0dba --- /dev/null +++ b/releasenotes/notes/add-share-server-migration-51deb30212859277.yaml @@ -0,0 +1,26 @@ +--- +features: + - | + Added the ability to migrate share servers within and across backends in + Manila. As designed in share migration, a two-phase approach is now + available for share servers, with the addition of a new API to check the + feasibility of a migration, called ``share-server-migration-check``. Now, + Manila can start, complete, cancel and retrieve the progress of a share + server migration. These operations were designed for Administrators and + will work only when operating under `driver_handles_share_servers=True` + mode. When starting a share server migration, it is possible to choose + which capabilities must be supported by the driver: remain ``writable`` + during the first phase, ``preserve_snapshots``, be ``nondisruptive`` and + migrate to a different share network. +upgrade: + - | + The share server entity now contains two new fields: ``task_state`` and + `source_share_server_id`. The `task_state` field helps tracking the + migration progress of a share server. The ``source_share_server_id`` field + will hold the source share server identification until the migration gets + completed or cancelled. + New statuses were added in order to control whether a share server, its + shares or snapshots are being migrated to a different location. Share + server shares’ are going to remain in the status ``server_migrating`` while + the migration is in course. When the migration gets completed, the + statuses are going to be updated.