Merge "Add share server migration"
This commit is contained in:
commit
4036ff32bd
@ -81,6 +81,8 @@ echo "MANILA_DATA_COPY_CHECK_HASH=${MANILA_DATA_COPY_CHECK_HASH:=True}" >> $loca
|
|||||||
|
|
||||||
# Share Migration CI tests migration_continue period task interval
|
# Share Migration CI tests migration_continue period task interval
|
||||||
echo "MANILA_SHARE_MIGRATION_PERIOD_TASK_INTERVAL=${MANILA_SHARE_MIGRATION_PERIOD_TASK_INTERVAL:=1}" >> $localconf
|
echo "MANILA_SHARE_MIGRATION_PERIOD_TASK_INTERVAL=${MANILA_SHARE_MIGRATION_PERIOD_TASK_INTERVAL:=1}" >> $localconf
|
||||||
|
# Share Server Migration CI tests migration_continue period task interval
|
||||||
|
echo "MANILA_SERVER_MIGRATION_PERIOD_TASK_INTERVAL=${MANILA_SERVER_MIGRATION_PERIOD_TASK_INTERVAL:=10}" >> $localconf
|
||||||
|
|
||||||
MANILA_SERVICE_IMAGE_ENABLED=${MANILA_SERVICE_IMAGE_ENABLED:-False}
|
MANILA_SERVICE_IMAGE_ENABLED=${MANILA_SERVICE_IMAGE_ENABLED:-False}
|
||||||
DEFAULT_EXTRA_SPECS=${DEFAULT_EXTRA_SPECS:-"'snapshot_support=True create_share_from_snapshot_support=True'"}
|
DEFAULT_EXTRA_SPECS=${DEFAULT_EXTRA_SPECS:-"'snapshot_support=True create_share_from_snapshot_support=True'"}
|
||||||
|
@ -237,6 +237,10 @@ function configure_manila {
|
|||||||
iniset $MANILA_CONF DEFAULT migration_driver_continue_update_interval $MANILA_SHARE_MIGRATION_PERIOD_TASK_INTERVAL
|
iniset $MANILA_CONF DEFAULT migration_driver_continue_update_interval $MANILA_SHARE_MIGRATION_PERIOD_TASK_INTERVAL
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if ! [[ -z $MANILA_SERVER_MIGRATION_PERIOD_TASK_INTERVAL ]]; then
|
||||||
|
iniset $MANILA_CONF DEFAULT server_migration_driver_continue_update_interval $MANILA_SERVER_MIGRATION_PERIOD_TASK_INTERVAL
|
||||||
|
fi
|
||||||
|
|
||||||
if ! [[ -z $MANILA_DATA_COPY_CHECK_HASH ]]; then
|
if ! [[ -z $MANILA_DATA_COPY_CHECK_HASH ]]; then
|
||||||
iniset $MANILA_CONF DEFAULT check_hash $MANILA_DATA_COPY_CHECK_HASH
|
iniset $MANILA_CONF DEFAULT check_hash $MANILA_DATA_COPY_CHECK_HASH
|
||||||
fi
|
fi
|
||||||
|
@ -149,13 +149,20 @@ REST_API_VERSION_HISTORY = """
|
|||||||
operation as a percentage.
|
operation as a percentage.
|
||||||
* 2.55 - Share groups feature is no longer considered experimental.
|
* 2.55 - Share groups feature is no longer considered experimental.
|
||||||
* 2.56 - Share replication feature is no longer considered experimental.
|
* 2.56 - Share replication feature is no longer considered experimental.
|
||||||
|
* 2.57 - Added Share server migration operations:
|
||||||
|
'share_server_migration_check'
|
||||||
|
'share_server_migration_cancel'
|
||||||
|
'share_server_migration_complete'
|
||||||
|
'share_server_migration_start'
|
||||||
|
'share_server_migration_get_progress'
|
||||||
|
'share_server_reset_task_state'
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# The minimum and maximum versions of the API supported
|
# The minimum and maximum versions of the API supported
|
||||||
# The default api version request is defined to be the
|
# The default api version request is defined to be the
|
||||||
# minimum version of the API supported.
|
# minimum version of the API supported.
|
||||||
_MIN_API_VERSION = "2.0"
|
_MIN_API_VERSION = "2.0"
|
||||||
_MAX_API_VERSION = "2.56"
|
_MAX_API_VERSION = "2.57"
|
||||||
DEFAULT_API_VERSION = _MIN_API_VERSION
|
DEFAULT_API_VERSION = _MIN_API_VERSION
|
||||||
|
|
||||||
|
|
||||||
|
@ -309,3 +309,8 @@ user documentation.
|
|||||||
2.56
|
2.56
|
||||||
----
|
----
|
||||||
Share replication feature is no longer considered experimental.
|
Share replication feature is no longer considered experimental.
|
||||||
|
|
||||||
|
2.57
|
||||||
|
----
|
||||||
|
Added share server migration feature. A two-phase approach that migrates
|
||||||
|
a share server and all its resources to a new host.
|
||||||
|
@ -1204,6 +1204,7 @@ class AdminActionsMixin(object):
|
|||||||
constants.STATUS_ERROR_DELETING,
|
constants.STATUS_ERROR_DELETING,
|
||||||
constants.STATUS_MIGRATING,
|
constants.STATUS_MIGRATING,
|
||||||
constants.STATUS_MIGRATING_TO,
|
constants.STATUS_MIGRATING_TO,
|
||||||
|
constants.STATUS_SERVER_MIGRATING,
|
||||||
]),
|
]),
|
||||||
'replica_state': set([
|
'replica_state': set([
|
||||||
constants.REPLICA_STATE_ACTIVE,
|
constants.REPLICA_STATE_ACTIVE,
|
||||||
|
@ -20,6 +20,7 @@ from webob import exc
|
|||||||
|
|
||||||
from manila.api.openstack import wsgi
|
from manila.api.openstack import wsgi
|
||||||
from manila.api.v1 import share_servers
|
from manila.api.v1 import share_servers
|
||||||
|
from manila.api.views import share_server_migration as server_migration_views
|
||||||
from manila.common import constants
|
from manila.common import constants
|
||||||
from manila.db import api as db_api
|
from manila.db import api as db_api
|
||||||
from manila import exception
|
from manila import exception
|
||||||
@ -35,17 +36,13 @@ class ShareServerController(share_servers.ShareServerController,
|
|||||||
wsgi.AdminActionsMixin):
|
wsgi.AdminActionsMixin):
|
||||||
"""The Share Server API V2 controller for the OpenStack API."""
|
"""The Share Server API V2 controller for the OpenStack API."""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super(ShareServerController, self).__init__()
|
||||||
|
self._migration_view_builder = server_migration_views.ViewBuilder()
|
||||||
|
|
||||||
valid_statuses = {
|
valid_statuses = {
|
||||||
'status': {
|
'status': set(constants.SHARE_SERVER_STATUSES),
|
||||||
constants.STATUS_ACTIVE,
|
'task_state': set(constants.SERVER_TASK_STATE_STATUSES),
|
||||||
constants.STATUS_ERROR,
|
|
||||||
constants.STATUS_DELETING,
|
|
||||||
constants.STATUS_CREATING,
|
|
||||||
constants.STATUS_MANAGING,
|
|
||||||
constants.STATUS_UNMANAGING,
|
|
||||||
constants.STATUS_UNMANAGE_ERROR,
|
|
||||||
constants.STATUS_MANAGE_ERROR,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
def _update(self, context, id, update):
|
def _update(self, context, id, update):
|
||||||
@ -204,6 +201,183 @@ class ShareServerController(share_servers.ShareServerController,
|
|||||||
|
|
||||||
return identifier, host, share_network, driver_opts, network_subnet
|
return identifier, host, share_network, driver_opts, network_subnet
|
||||||
|
|
||||||
|
@wsgi.Controller.api_version('2.57', experimental=True)
|
||||||
|
@wsgi.action("migration_start")
|
||||||
|
@wsgi.Controller.authorize
|
||||||
|
@wsgi.response(http_client.ACCEPTED)
|
||||||
|
def share_server_migration_start(self, req, id, body):
|
||||||
|
"""Migrate a share server to the specified host."""
|
||||||
|
context = req.environ['manila.context']
|
||||||
|
try:
|
||||||
|
share_server = db_api.share_server_get(
|
||||||
|
context, id)
|
||||||
|
except exception.ShareServerNotFound as e:
|
||||||
|
raise exc.HTTPNotFound(explanation=e.msg)
|
||||||
|
|
||||||
|
params = body.get('migration_start')
|
||||||
|
|
||||||
|
if not params:
|
||||||
|
raise exc.HTTPBadRequest(explanation=_("Request is missing body."))
|
||||||
|
|
||||||
|
bool_params = ['writable', 'nondisruptive', 'preserve_snapshots']
|
||||||
|
mandatory_params = bool_params + ['host']
|
||||||
|
|
||||||
|
utils.check_params_exist(mandatory_params, params)
|
||||||
|
bool_param_values = utils.check_params_are_boolean(bool_params, params)
|
||||||
|
|
||||||
|
pool_was_specified = len(params['host'].split('#')) > 1
|
||||||
|
|
||||||
|
if pool_was_specified:
|
||||||
|
msg = _('The destination host can not contain pool information.')
|
||||||
|
raise exc.HTTPBadRequest(explanation=msg)
|
||||||
|
|
||||||
|
new_share_network = None
|
||||||
|
|
||||||
|
new_share_network_id = params.get('new_share_network_id', None)
|
||||||
|
if new_share_network_id:
|
||||||
|
try:
|
||||||
|
new_share_network = db_api.share_network_get(
|
||||||
|
context, new_share_network_id)
|
||||||
|
except exception.NotFound:
|
||||||
|
msg = _("Share network %s not "
|
||||||
|
"found.") % new_share_network_id
|
||||||
|
raise exc.HTTPBadRequest(explanation=msg)
|
||||||
|
|
||||||
|
try:
|
||||||
|
self.share_api.share_server_migration_start(
|
||||||
|
context, share_server, params['host'],
|
||||||
|
bool_param_values['writable'],
|
||||||
|
bool_param_values['nondisruptive'],
|
||||||
|
bool_param_values['preserve_snapshots'],
|
||||||
|
new_share_network=new_share_network)
|
||||||
|
except exception.ServiceIsDown as e:
|
||||||
|
# NOTE(dviroel): user should check if the host is healthy
|
||||||
|
raise exc.HTTPBadRequest(explanation=e.msg)
|
||||||
|
except exception.InvalidShareServer as e:
|
||||||
|
# NOTE(dviroel): invalid share server meaning that some internal
|
||||||
|
# resource have a invalid state.
|
||||||
|
raise exc.HTTPConflict(explanation=e.msg)
|
||||||
|
|
||||||
|
@wsgi.Controller.api_version('2.57', experimental=True)
|
||||||
|
@wsgi.action("migration_complete")
|
||||||
|
@wsgi.Controller.authorize
|
||||||
|
def share_server_migration_complete(self, req, id, body):
|
||||||
|
"""Invokes 2nd phase of share server migration."""
|
||||||
|
context = req.environ['manila.context']
|
||||||
|
try:
|
||||||
|
share_server = db_api.share_server_get(
|
||||||
|
context, id)
|
||||||
|
except exception.ShareServerNotFound as e:
|
||||||
|
raise exc.HTTPNotFound(explanation=e.msg)
|
||||||
|
|
||||||
|
try:
|
||||||
|
result = self.share_api.share_server_migration_complete(
|
||||||
|
context, share_server)
|
||||||
|
except (exception.InvalidShareServer,
|
||||||
|
exception.ServiceIsDown) as e:
|
||||||
|
raise exc.HTTPBadRequest(explanation=e.msg)
|
||||||
|
|
||||||
|
return self._migration_view_builder.migration_complete(req, result)
|
||||||
|
|
||||||
|
@wsgi.Controller.api_version('2.57', experimental=True)
|
||||||
|
@wsgi.action("migration_cancel")
|
||||||
|
@wsgi.Controller.authorize
|
||||||
|
@wsgi.response(http_client.ACCEPTED)
|
||||||
|
def share_server_migration_cancel(self, req, id, body):
|
||||||
|
"""Attempts to cancel share migration."""
|
||||||
|
context = req.environ['manila.context']
|
||||||
|
try:
|
||||||
|
share_server = db_api.share_server_get(
|
||||||
|
context, id)
|
||||||
|
except exception.ShareServerNotFound as e:
|
||||||
|
raise exc.HTTPNotFound(explanation=e.msg)
|
||||||
|
|
||||||
|
try:
|
||||||
|
self.share_api.share_server_migration_cancel(context, share_server)
|
||||||
|
except (exception.InvalidShareServer,
|
||||||
|
exception.ServiceIsDown) as e:
|
||||||
|
raise exc.HTTPBadRequest(explanation=e.msg)
|
||||||
|
|
||||||
|
@wsgi.Controller.api_version('2.57', experimental=True)
|
||||||
|
@wsgi.action("migration_get_progress")
|
||||||
|
@wsgi.Controller.authorize
|
||||||
|
def share_server_migration_get_progress(self, req, id, body):
|
||||||
|
"""Retrieve share server migration progress for a given share."""
|
||||||
|
context = req.environ['manila.context']
|
||||||
|
try:
|
||||||
|
result = self.share_api.share_server_migration_get_progress(
|
||||||
|
context, id)
|
||||||
|
except exception.ServiceIsDown as e:
|
||||||
|
raise exc.HTTPConflict(explanation=e.msg)
|
||||||
|
except exception.InvalidShareServer as e:
|
||||||
|
raise exc.HTTPBadRequest(explanation=e.msg)
|
||||||
|
|
||||||
|
return self._migration_view_builder.get_progress(req, result)
|
||||||
|
|
||||||
|
@wsgi.Controller.api_version('2.57', experimental=True)
|
||||||
|
@wsgi.action("reset_task_state")
|
||||||
|
@wsgi.Controller.authorize
|
||||||
|
def share_server_reset_task_state(self, req, id, body):
|
||||||
|
return self._reset_status(req, id, body, status_attr='task_state')
|
||||||
|
|
||||||
|
@wsgi.Controller.api_version('2.57', experimental=True)
|
||||||
|
@wsgi.action("migration_check")
|
||||||
|
@wsgi.Controller.authorize
|
||||||
|
def share_server_migration_check(self, req, id, body):
|
||||||
|
"""Check if can migrate a share server to the specified host."""
|
||||||
|
context = req.environ['manila.context']
|
||||||
|
try:
|
||||||
|
share_server = db_api.share_server_get(
|
||||||
|
context, id)
|
||||||
|
except exception.ShareServerNotFound as e:
|
||||||
|
raise exc.HTTPNotFound(explanation=e.msg)
|
||||||
|
|
||||||
|
params = body.get('migration_check')
|
||||||
|
|
||||||
|
if not params:
|
||||||
|
raise exc.HTTPBadRequest(explanation=_("Request is missing body."))
|
||||||
|
|
||||||
|
bool_params = ['writable', 'nondisruptive', 'preserve_snapshots']
|
||||||
|
mandatory_params = bool_params + ['host']
|
||||||
|
|
||||||
|
utils.check_params_exist(mandatory_params, params)
|
||||||
|
bool_param_values = utils.check_params_are_boolean(bool_params, params)
|
||||||
|
|
||||||
|
pool_was_specified = len(params['host'].split('#')) > 1
|
||||||
|
|
||||||
|
if pool_was_specified:
|
||||||
|
msg = _('The destination host can not contain pool information.')
|
||||||
|
raise exc.HTTPBadRequest(explanation=msg)
|
||||||
|
|
||||||
|
new_share_network = None
|
||||||
|
new_share_network_id = params.get('new_share_network_id', None)
|
||||||
|
if new_share_network_id:
|
||||||
|
try:
|
||||||
|
new_share_network = db_api.share_network_get(
|
||||||
|
context, new_share_network_id)
|
||||||
|
except exception.NotFound:
|
||||||
|
msg = _("Share network %s not "
|
||||||
|
"found.") % new_share_network_id
|
||||||
|
raise exc.HTTPBadRequest(explanation=msg)
|
||||||
|
|
||||||
|
try:
|
||||||
|
result = self.share_api.share_server_migration_check(
|
||||||
|
context, share_server, params['host'],
|
||||||
|
bool_param_values['writable'],
|
||||||
|
bool_param_values['nondisruptive'],
|
||||||
|
bool_param_values['preserve_snapshots'],
|
||||||
|
new_share_network=new_share_network)
|
||||||
|
except exception.ServiceIsDown as e:
|
||||||
|
# NOTE(dviroel): user should check if the host is healthy
|
||||||
|
raise exc.HTTPBadRequest(explanation=e.msg)
|
||||||
|
except exception.InvalidShareServer as e:
|
||||||
|
# NOTE(dviroel): invalid share server meaning that some internal
|
||||||
|
# resource have a invalid state.
|
||||||
|
raise exc.HTTPConflict(explanation=e.msg)
|
||||||
|
|
||||||
|
return self._migration_view_builder.build_check_migration(
|
||||||
|
req, params, result)
|
||||||
|
|
||||||
|
|
||||||
def create_resource():
|
def create_resource():
|
||||||
return wsgi.Resource(ShareServerController())
|
return wsgi.Resource(ShareServerController())
|
||||||
|
78
manila/api/views/share_server_migration.py
Normal file
78
manila/api/views/share_server_migration.py
Normal file
@ -0,0 +1,78 @@
|
|||||||
|
# Copyright (c) 2020 NetApp, Inc.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import copy
|
||||||
|
|
||||||
|
from manila.api import common
|
||||||
|
|
||||||
|
|
||||||
|
class ViewBuilder(common.ViewBuilder):
|
||||||
|
"""Model share server migration view data response as a python dictionary.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
_collection_name = 'share_server_migration'
|
||||||
|
_detail_version_modifiers = []
|
||||||
|
|
||||||
|
def get_progress(self, request, params):
|
||||||
|
"""View of share server migration job progress."""
|
||||||
|
result = {
|
||||||
|
'total_progress': params['total_progress'],
|
||||||
|
'task_state': params['task_state'],
|
||||||
|
'destination_share_server_id':
|
||||||
|
params['destination_share_server_id'],
|
||||||
|
}
|
||||||
|
|
||||||
|
self.update_versioned_resource_dict(request, result, params)
|
||||||
|
return result
|
||||||
|
|
||||||
|
def build_check_migration(self, request, params, result):
|
||||||
|
"""View of share server migration check."""
|
||||||
|
requested_capabilities = {
|
||||||
|
'writable': params['writable'],
|
||||||
|
'nondisruptive': params['nondisruptive'],
|
||||||
|
'preserve_snapshots': params['preserve_snapshots'],
|
||||||
|
'share_network_id': params['new_share_network_id'],
|
||||||
|
'host': params['host'],
|
||||||
|
}
|
||||||
|
supported_capabilities = {
|
||||||
|
'writable': result['writable'],
|
||||||
|
'nondisruptive': result['nondisruptive'],
|
||||||
|
'preserve_snapshots': result['preserve_snapshots'],
|
||||||
|
'share_network_id': result['share_network_id'],
|
||||||
|
'migration_cancel': result['migration_cancel'],
|
||||||
|
'migration_get_progress': result['migration_get_progress']
|
||||||
|
}
|
||||||
|
view = {
|
||||||
|
'compatible': result['compatible'],
|
||||||
|
'requested_capabilities': requested_capabilities,
|
||||||
|
'supported_capabilities': supported_capabilities,
|
||||||
|
}
|
||||||
|
capabilities = {
|
||||||
|
'requested': copy.copy(params),
|
||||||
|
'supported': copy.copy(result)
|
||||||
|
}
|
||||||
|
self.update_versioned_resource_dict(request, view, capabilities)
|
||||||
|
return view
|
||||||
|
|
||||||
|
def migration_complete(self, request, params):
|
||||||
|
"""View of share server migration complete command."""
|
||||||
|
result = {
|
||||||
|
'destination_share_server_id':
|
||||||
|
params['destination_share_server_id'],
|
||||||
|
}
|
||||||
|
|
||||||
|
self.update_versioned_resource_dict(request, result, params)
|
||||||
|
return result
|
@ -22,7 +22,8 @@ class ViewBuilder(common.ViewBuilder):
|
|||||||
_collection_name = 'share_servers'
|
_collection_name = 'share_servers'
|
||||||
_detail_version_modifiers = [
|
_detail_version_modifiers = [
|
||||||
"add_is_auto_deletable_and_identifier_fields",
|
"add_is_auto_deletable_and_identifier_fields",
|
||||||
"add_share_network_subnet_id_field"
|
"add_share_network_subnet_id_field",
|
||||||
|
"add_task_state_and_source_server_fields"
|
||||||
]
|
]
|
||||||
|
|
||||||
def build_share_server(self, request, share_server):
|
def build_share_server(self, request, share_server):
|
||||||
@ -74,3 +75,10 @@ class ViewBuilder(common.ViewBuilder):
|
|||||||
share_server_dict['is_auto_deletable'] = (
|
share_server_dict['is_auto_deletable'] = (
|
||||||
share_server['is_auto_deletable'])
|
share_server['is_auto_deletable'])
|
||||||
share_server_dict['identifier'] = share_server['identifier']
|
share_server_dict['identifier'] = share_server['identifier']
|
||||||
|
|
||||||
|
@common.ViewBuilder.versioned_method("2.57")
|
||||||
|
def add_task_state_and_source_server_fields(
|
||||||
|
self, context, share_server_dict, share_server):
|
||||||
|
share_server_dict['task_state'] = share_server['task_state']
|
||||||
|
share_server_dict['source_share_server_id'] = (
|
||||||
|
share_server['source_share_server_id'])
|
||||||
|
@ -62,6 +62,10 @@ STATUS_NEW = 'new'
|
|||||||
STATUS_OUT_OF_SYNC = 'out_of_sync'
|
STATUS_OUT_OF_SYNC = 'out_of_sync'
|
||||||
STATUS_ACTIVE = 'active'
|
STATUS_ACTIVE = 'active'
|
||||||
|
|
||||||
|
# Share server migration statuses
|
||||||
|
STATUS_SERVER_MIGRATING = 'server_migrating'
|
||||||
|
STATUS_SERVER_MIGRATING_TO = 'server_migrating_to'
|
||||||
|
|
||||||
ACCESS_RULES_STATES = (
|
ACCESS_RULES_STATES = (
|
||||||
ACCESS_STATE_QUEUED_TO_APPLY,
|
ACCESS_STATE_QUEUED_TO_APPLY,
|
||||||
ACCESS_STATE_QUEUED_TO_DENY,
|
ACCESS_STATE_QUEUED_TO_DENY,
|
||||||
@ -71,16 +75,18 @@ ACCESS_RULES_STATES = (
|
|||||||
ACCESS_STATE_ERROR,
|
ACCESS_STATE_ERROR,
|
||||||
ACCESS_STATE_DELETED,
|
ACCESS_STATE_DELETED,
|
||||||
)
|
)
|
||||||
|
# Share and share server migration task states
|
||||||
TASK_STATE_MIGRATION_STARTING = 'migration_starting'
|
TASK_STATE_MIGRATION_STARTING = 'migration_starting'
|
||||||
TASK_STATE_MIGRATION_IN_PROGRESS = 'migration_in_progress'
|
TASK_STATE_MIGRATION_IN_PROGRESS = 'migration_in_progress'
|
||||||
TASK_STATE_MIGRATION_COMPLETING = 'migration_completing'
|
TASK_STATE_MIGRATION_COMPLETING = 'migration_completing'
|
||||||
TASK_STATE_MIGRATION_SUCCESS = 'migration_success'
|
TASK_STATE_MIGRATION_SUCCESS = 'migration_success'
|
||||||
TASK_STATE_MIGRATION_ERROR = 'migration_error'
|
TASK_STATE_MIGRATION_ERROR = 'migration_error'
|
||||||
TASK_STATE_MIGRATION_CANCELLED = 'migration_cancelled'
|
TASK_STATE_MIGRATION_CANCELLED = 'migration_cancelled'
|
||||||
|
TASK_STATE_MIGRATION_CANCEL_IN_PROGRESS = 'migration_cancel_in_progress'
|
||||||
TASK_STATE_MIGRATION_DRIVER_STARTING = 'migration_driver_starting'
|
TASK_STATE_MIGRATION_DRIVER_STARTING = 'migration_driver_starting'
|
||||||
TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS = 'migration_driver_in_progress'
|
TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS = 'migration_driver_in_progress'
|
||||||
TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE = 'migration_driver_phase1_done'
|
TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE = 'migration_driver_phase1_done'
|
||||||
|
# Share statuses used by data service and host assisted migration
|
||||||
TASK_STATE_DATA_COPYING_STARTING = 'data_copying_starting'
|
TASK_STATE_DATA_COPYING_STARTING = 'data_copying_starting'
|
||||||
TASK_STATE_DATA_COPYING_IN_PROGRESS = 'data_copying_in_progress'
|
TASK_STATE_DATA_COPYING_IN_PROGRESS = 'data_copying_in_progress'
|
||||||
TASK_STATE_DATA_COPYING_COMPLETING = 'data_copying_completing'
|
TASK_STATE_DATA_COPYING_COMPLETING = 'data_copying_completing'
|
||||||
@ -113,6 +119,7 @@ TRANSITIONAL_STATUSES = (
|
|||||||
STATUS_EXTENDING, STATUS_SHRINKING,
|
STATUS_EXTENDING, STATUS_SHRINKING,
|
||||||
STATUS_MIGRATING, STATUS_MIGRATING_TO,
|
STATUS_MIGRATING, STATUS_MIGRATING_TO,
|
||||||
STATUS_RESTORING, STATUS_REVERTING,
|
STATUS_RESTORING, STATUS_REVERTING,
|
||||||
|
STATUS_SERVER_MIGRATING, STATUS_SERVER_MIGRATING_TO,
|
||||||
)
|
)
|
||||||
|
|
||||||
INVALID_SHARE_INSTANCE_STATUSES_FOR_ACCESS_RULE_UPDATES = (
|
INVALID_SHARE_INSTANCE_STATUSES_FOR_ACCESS_RULE_UPDATES = (
|
||||||
@ -182,6 +189,33 @@ TASK_STATE_STATUSES = (
|
|||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
SERVER_TASK_STATE_STATUSES = (
|
||||||
|
TASK_STATE_MIGRATION_STARTING,
|
||||||
|
TASK_STATE_MIGRATION_IN_PROGRESS,
|
||||||
|
TASK_STATE_MIGRATION_COMPLETING,
|
||||||
|
TASK_STATE_MIGRATION_SUCCESS,
|
||||||
|
TASK_STATE_MIGRATION_ERROR,
|
||||||
|
TASK_STATE_MIGRATION_CANCEL_IN_PROGRESS,
|
||||||
|
TASK_STATE_MIGRATION_CANCELLED,
|
||||||
|
TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS,
|
||||||
|
TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE,
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
|
||||||
|
SHARE_SERVER_STATUSES = (
|
||||||
|
STATUS_ACTIVE,
|
||||||
|
STATUS_ERROR,
|
||||||
|
STATUS_DELETING,
|
||||||
|
STATUS_CREATING,
|
||||||
|
STATUS_MANAGING,
|
||||||
|
STATUS_UNMANAGING,
|
||||||
|
STATUS_UNMANAGE_ERROR,
|
||||||
|
STATUS_MANAGE_ERROR,
|
||||||
|
STATUS_INACTIVE,
|
||||||
|
STATUS_SERVER_MIGRATING,
|
||||||
|
STATUS_SERVER_MIGRATING_TO,
|
||||||
|
)
|
||||||
|
|
||||||
REPLICA_STATE_ACTIVE = 'active'
|
REPLICA_STATE_ACTIVE = 'active'
|
||||||
REPLICA_STATE_IN_SYNC = 'in_sync'
|
REPLICA_STATE_IN_SYNC = 'in_sync'
|
||||||
REPLICA_STATE_OUT_OF_SYNC = 'out_of_sync'
|
REPLICA_STATE_OUT_OF_SYNC = 'out_of_sync'
|
||||||
|
@ -324,6 +324,21 @@ def share_instance_update(context, instance_id, values, with_share_data=False):
|
|||||||
with_share_data=with_share_data)
|
with_share_data=with_share_data)
|
||||||
|
|
||||||
|
|
||||||
|
def share_and_snapshot_instances_status_update(
|
||||||
|
context, values, share_instance_ids=None, snapshot_instance_ids=None,
|
||||||
|
current_expected_status=None):
|
||||||
|
return IMPL.share_and_snapshot_instances_status_update(
|
||||||
|
context, values, share_instance_ids=share_instance_ids,
|
||||||
|
snapshot_instance_ids=snapshot_instance_ids,
|
||||||
|
current_expected_status=current_expected_status)
|
||||||
|
|
||||||
|
|
||||||
|
def share_instances_status_update(context, share_instance_ids, values):
|
||||||
|
"""Updates the status of a bunch of share instances at once."""
|
||||||
|
return IMPL.share_instances_status_update(
|
||||||
|
context, share_instance_ids, values)
|
||||||
|
|
||||||
|
|
||||||
def share_instances_host_update(context, current_host, new_host):
|
def share_instances_host_update(context, current_host, new_host):
|
||||||
"""Update the host attr of all share instances that are on current_host."""
|
"""Update the host attr of all share instances that are on current_host."""
|
||||||
return IMPL.share_instances_host_update(context, current_host, new_host)
|
return IMPL.share_instances_host_update(context, current_host, new_host)
|
||||||
@ -334,10 +349,11 @@ def share_instances_get_all(context, filters=None):
|
|||||||
return IMPL.share_instances_get_all(context, filters=filters)
|
return IMPL.share_instances_get_all(context, filters=filters)
|
||||||
|
|
||||||
|
|
||||||
def share_instances_get_all_by_share_server(context, share_server_id):
|
def share_instances_get_all_by_share_server(context, share_server_id,
|
||||||
|
with_share_data=False):
|
||||||
"""Returns all share instances with given share_server_id."""
|
"""Returns all share instances with given share_server_id."""
|
||||||
return IMPL.share_instances_get_all_by_share_server(context,
|
return IMPL.share_instances_get_all_by_share_server(
|
||||||
share_server_id)
|
context, share_server_id, with_share_data=with_share_data)
|
||||||
|
|
||||||
|
|
||||||
def share_instances_get_all_by_host(context, host, with_share_data=False,
|
def share_instances_get_all_by_host(context, host, with_share_data=False,
|
||||||
@ -517,6 +533,13 @@ def share_snapshot_instance_update(context, instance_id, values):
|
|||||||
return IMPL.share_snapshot_instance_update(context, instance_id, values)
|
return IMPL.share_snapshot_instance_update(context, instance_id, values)
|
||||||
|
|
||||||
|
|
||||||
|
def share_snapshot_instances_status_update(
|
||||||
|
context, snapshot_instance_ids, values):
|
||||||
|
"""Updates the status of a bunch of share snapshot instances at once."""
|
||||||
|
return IMPL.share_snapshot_instances_status_update(
|
||||||
|
context, snapshot_instance_ids, values)
|
||||||
|
|
||||||
|
|
||||||
def share_snapshot_instance_create(context, snapshot_id, values):
|
def share_snapshot_instance_create(context, snapshot_id, values):
|
||||||
"""Create a share snapshot instance for an existing snapshot."""
|
"""Create a share snapshot instance for an existing snapshot."""
|
||||||
return IMPL.share_snapshot_instance_create(
|
return IMPL.share_snapshot_instance_create(
|
||||||
@ -659,6 +682,13 @@ def share_snapshot_instance_export_location_create(context, values):
|
|||||||
return IMPL.share_snapshot_instance_export_location_create(context, values)
|
return IMPL.share_snapshot_instance_export_location_create(context, values)
|
||||||
|
|
||||||
|
|
||||||
|
def share_snapshot_instance_export_locations_update(
|
||||||
|
context, share_snapshot_instance_id, export_locations, delete=True):
|
||||||
|
"""Update export locations of a share instance."""
|
||||||
|
return IMPL.share_snapshot_instance_export_locations_update(
|
||||||
|
context, share_snapshot_instance_id, export_locations, delete=delete)
|
||||||
|
|
||||||
|
|
||||||
def share_snapshot_instance_export_locations_get_all(
|
def share_snapshot_instance_export_locations_get_all(
|
||||||
context, share_snapshot_instance_id):
|
context, share_snapshot_instance_id):
|
||||||
"""Get the share snapshot instance export locations for given id."""
|
"""Get the share snapshot instance export locations for given id."""
|
||||||
@ -974,9 +1004,14 @@ def share_server_get_all(context):
|
|||||||
return IMPL.share_server_get_all(context)
|
return IMPL.share_server_get_all(context)
|
||||||
|
|
||||||
|
|
||||||
def share_server_get_all_by_host(context, host):
|
def share_server_get_all_with_filters(context, filters):
|
||||||
|
"""Get all share servers that match with the specified filters."""
|
||||||
|
return IMPL.share_server_get_all_with_filters(context, filters)
|
||||||
|
|
||||||
|
|
||||||
|
def share_server_get_all_by_host(context, host, filters=None):
|
||||||
"""Get all share servers related to particular host."""
|
"""Get all share servers related to particular host."""
|
||||||
return IMPL.share_server_get_all_by_host(context, host)
|
return IMPL.share_server_get_all_by_host(context, host, filters=filters)
|
||||||
|
|
||||||
|
|
||||||
def share_server_get_all_unused_deletable(context, host, updated_before):
|
def share_server_get_all_unused_deletable(context, host, updated_before):
|
||||||
|
@ -0,0 +1,64 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Add task_state field for share servers table
|
||||||
|
|
||||||
|
Revision ID: 5aa813ae673d
|
||||||
|
Revises: e6d88547b381
|
||||||
|
Create Date: 2020-06-23 12:04:47.821793
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
# revision identifiers, used by Alembic.
|
||||||
|
revision = '5aa813ae673d'
|
||||||
|
down_revision = 'e6d88547b381'
|
||||||
|
|
||||||
|
from alembic import op
|
||||||
|
from oslo_log import log
|
||||||
|
import sqlalchemy as sa
|
||||||
|
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
share_servers_fk_name = (
|
||||||
|
"fk_share_servers_source_share_server_id")
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade():
|
||||||
|
|
||||||
|
try:
|
||||||
|
op.add_column('share_servers', sa.Column(
|
||||||
|
'task_state', sa.String(length=255), default=None))
|
||||||
|
op.add_column(
|
||||||
|
'share_servers', sa.Column(
|
||||||
|
'source_share_server_id', sa.String(length=36),
|
||||||
|
sa.ForeignKey('share_servers.id', name=share_servers_fk_name),
|
||||||
|
default=None,
|
||||||
|
nullable=True))
|
||||||
|
|
||||||
|
except Exception:
|
||||||
|
LOG.error("Column share_servers.task_state and/or "
|
||||||
|
"share_server.source_share_server_id not created!")
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade():
|
||||||
|
try:
|
||||||
|
op.drop_column('share_servers', 'task_state')
|
||||||
|
op.drop_constraint(share_servers_fk_name, 'share_servers',
|
||||||
|
type_='foreignkey')
|
||||||
|
op.drop_column('share_servers', 'source_share_server_id')
|
||||||
|
except Exception:
|
||||||
|
LOG.error("Column share_servers.task_state and/or "
|
||||||
|
"share_servers.source_share_server_id not dropped!")
|
||||||
|
raise
|
@ -1425,6 +1425,68 @@ def share_instance_update(context, share_instance_id, values,
|
|||||||
return instance_ref
|
return instance_ref
|
||||||
|
|
||||||
|
|
||||||
|
def share_and_snapshot_instances_status_update(
|
||||||
|
context, values, share_instance_ids=None, snapshot_instance_ids=None,
|
||||||
|
current_expected_status=None):
|
||||||
|
updated_share_instances = None
|
||||||
|
updated_snapshot_instances = None
|
||||||
|
session = get_session()
|
||||||
|
with session.begin():
|
||||||
|
if current_expected_status and share_instance_ids:
|
||||||
|
filters = {'instance_ids': share_instance_ids}
|
||||||
|
share_instances = share_instances_get_all(
|
||||||
|
context, filters=filters, session=session)
|
||||||
|
all_instances_are_compliant = all(
|
||||||
|
instance['status'] == current_expected_status
|
||||||
|
for instance in share_instances)
|
||||||
|
|
||||||
|
if not all_instances_are_compliant:
|
||||||
|
msg = _('At least one of the shares is not in the %(status)s '
|
||||||
|
'status.') % {
|
||||||
|
'status': current_expected_status
|
||||||
|
}
|
||||||
|
raise exception.InvalidShareInstance(reason=msg)
|
||||||
|
|
||||||
|
if current_expected_status and snapshot_instance_ids:
|
||||||
|
filters = {'instance_ids': snapshot_instance_ids}
|
||||||
|
snapshot_instances = share_snapshot_instance_get_all_with_filters(
|
||||||
|
context, filters, session=session)
|
||||||
|
all_snap_instances_are_compliant = all(
|
||||||
|
snap_instance['status'] == current_expected_status
|
||||||
|
for snap_instance in snapshot_instances)
|
||||||
|
if not all_snap_instances_are_compliant:
|
||||||
|
msg = _('At least one of the snapshots is not in the '
|
||||||
|
'%(status)s status.') % {
|
||||||
|
'status': current_expected_status
|
||||||
|
}
|
||||||
|
raise exception.InvalidShareSnapshotInstance(reason=msg)
|
||||||
|
|
||||||
|
if share_instance_ids:
|
||||||
|
updated_share_instances = share_instances_status_update(
|
||||||
|
context, share_instance_ids, values, session=session)
|
||||||
|
|
||||||
|
if snapshot_instance_ids:
|
||||||
|
updated_snapshot_instances = (
|
||||||
|
share_snapshot_instances_status_update(
|
||||||
|
context, snapshot_instance_ids, values, session=session))
|
||||||
|
|
||||||
|
return updated_share_instances, updated_snapshot_instances
|
||||||
|
|
||||||
|
|
||||||
|
@require_context
|
||||||
|
def share_instances_status_update(
|
||||||
|
context, share_instance_ids, values, session=None):
|
||||||
|
session = session or get_session()
|
||||||
|
|
||||||
|
result = (
|
||||||
|
model_query(
|
||||||
|
context, models.ShareInstance, read_deleted="no",
|
||||||
|
session=session).filter(
|
||||||
|
models.ShareInstance.id.in_(share_instance_ids)).update(
|
||||||
|
values, synchronize_session=False))
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
def _share_instance_update(context, share_instance_id, values, session):
|
def _share_instance_update(context, share_instance_id, values, session):
|
||||||
share_instance_ref = share_instance_get(context, share_instance_id,
|
share_instance_ref = share_instance_get(context, share_instance_id,
|
||||||
session=session)
|
session=session)
|
||||||
@ -1457,8 +1519,8 @@ def share_instance_get(context, share_instance_id, session=None,
|
|||||||
|
|
||||||
|
|
||||||
@require_admin_context
|
@require_admin_context
|
||||||
def share_instances_get_all(context, filters=None):
|
def share_instances_get_all(context, filters=None, session=None):
|
||||||
session = get_session()
|
session = session or get_session()
|
||||||
query = model_query(
|
query = model_query(
|
||||||
context, models.ShareInstance, session=session, read_deleted="no",
|
context, models.ShareInstance, session=session, read_deleted="no",
|
||||||
).options(
|
).options(
|
||||||
@ -1483,6 +1545,10 @@ def share_instances_get_all(context, filters=None):
|
|||||||
models.ShareInstanceExportLocations.uuid ==
|
models.ShareInstanceExportLocations.uuid ==
|
||||||
export_location_id)
|
export_location_id)
|
||||||
|
|
||||||
|
instance_ids = filters.get('instance_ids')
|
||||||
|
if instance_ids:
|
||||||
|
query = query.filter(models.ShareInstance.id.in_(instance_ids))
|
||||||
|
|
||||||
# Returns list of share instances that satisfy filters.
|
# Returns list of share instances that satisfy filters.
|
||||||
query = query.all()
|
query = query.all()
|
||||||
return query
|
return query
|
||||||
@ -1612,13 +1678,19 @@ def share_instances_get_all_by_share_network(context, share_network_id):
|
|||||||
|
|
||||||
|
|
||||||
@require_context
|
@require_context
|
||||||
def share_instances_get_all_by_share_server(context, share_server_id):
|
def share_instances_get_all_by_share_server(context, share_server_id,
|
||||||
|
with_share_data=False):
|
||||||
"""Returns list of share instance with given share server."""
|
"""Returns list of share instance with given share server."""
|
||||||
|
session = get_session()
|
||||||
result = (
|
result = (
|
||||||
model_query(context, models.ShareInstance).filter(
|
model_query(context, models.ShareInstance).filter(
|
||||||
models.ShareInstance.share_server_id == share_server_id,
|
models.ShareInstance.share_server_id == share_server_id,
|
||||||
).all()
|
).all()
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if with_share_data:
|
||||||
|
result = _set_instances_share_data(context, result, session)
|
||||||
|
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
@ -2738,6 +2810,21 @@ def share_snapshot_update(context, snapshot_id, values):
|
|||||||
|
|
||||||
return snapshot_ref
|
return snapshot_ref
|
||||||
|
|
||||||
|
|
||||||
|
@require_context
|
||||||
|
def share_snapshot_instances_status_update(
|
||||||
|
context, snapshot_instance_ids, values, session=None):
|
||||||
|
session = session or get_session()
|
||||||
|
|
||||||
|
result = (
|
||||||
|
model_query(
|
||||||
|
context, models.ShareSnapshotInstance,
|
||||||
|
read_deleted="no", session=session).filter(
|
||||||
|
models.ShareSnapshotInstance.id.in_(snapshot_instance_ids)
|
||||||
|
).update(values, synchronize_session=False))
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
#################################
|
#################################
|
||||||
|
|
||||||
|
|
||||||
@ -2974,9 +3061,10 @@ def share_snapshot_export_locations_get(context, snapshot_id):
|
|||||||
|
|
||||||
@require_context
|
@require_context
|
||||||
def share_snapshot_instance_export_locations_get_all(
|
def share_snapshot_instance_export_locations_get_all(
|
||||||
context, share_snapshot_instance_id):
|
context, share_snapshot_instance_id, session=None):
|
||||||
|
|
||||||
session = get_session()
|
if not session:
|
||||||
|
session = get_session()
|
||||||
export_locations = _share_snapshot_instance_export_locations_get_query(
|
export_locations = _share_snapshot_instance_export_locations_get_query(
|
||||||
context, session,
|
context, session,
|
||||||
{'share_snapshot_instance_id': share_snapshot_instance_id}).all()
|
{'share_snapshot_instance_id': share_snapshot_instance_id}).all()
|
||||||
@ -3009,6 +3097,82 @@ def share_snapshot_instance_export_location_delete(context, el_id):
|
|||||||
|
|
||||||
el.soft_delete(session=session)
|
el.soft_delete(session=session)
|
||||||
|
|
||||||
|
|
||||||
|
@require_context
|
||||||
|
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
|
||||||
|
def share_snapshot_instance_export_locations_update(
|
||||||
|
context, share_snapshot_instance_id, export_locations, delete):
|
||||||
|
# NOTE(dviroel): Lets keep this backward compatibility for driver that
|
||||||
|
# may still return export_locations as string
|
||||||
|
if not isinstance(export_locations, (list, tuple, set)):
|
||||||
|
export_locations = (export_locations, )
|
||||||
|
export_locations_as_dicts = []
|
||||||
|
for el in export_locations:
|
||||||
|
export_location = el
|
||||||
|
if isinstance(el, six.string_types):
|
||||||
|
export_location = {
|
||||||
|
"path": el,
|
||||||
|
"is_admin_only": False,
|
||||||
|
}
|
||||||
|
elif not isinstance(export_location, dict):
|
||||||
|
raise exception.ManilaException(
|
||||||
|
_("Wrong export location type '%s'.") % type(export_location))
|
||||||
|
export_locations_as_dicts.append(export_location)
|
||||||
|
export_locations = export_locations_as_dicts
|
||||||
|
|
||||||
|
export_locations_paths = [el['path'] for el in export_locations]
|
||||||
|
|
||||||
|
session = get_session()
|
||||||
|
|
||||||
|
current_el_rows = share_snapshot_instance_export_locations_get_all(
|
||||||
|
context, share_snapshot_instance_id, session=session)
|
||||||
|
|
||||||
|
def get_path_list_from_rows(rows):
|
||||||
|
return set([row['path'] for row in rows])
|
||||||
|
|
||||||
|
current_el_paths = get_path_list_from_rows(current_el_rows)
|
||||||
|
|
||||||
|
def create_indexed_time_dict(key_list):
|
||||||
|
base = timeutils.utcnow()
|
||||||
|
return {
|
||||||
|
# NOTE(u_glide): Incrementing timestamp by microseconds to make
|
||||||
|
# timestamp order match index order.
|
||||||
|
key: base + datetime.timedelta(microseconds=index)
|
||||||
|
for index, key in enumerate(key_list)
|
||||||
|
}
|
||||||
|
|
||||||
|
indexed_update_time = create_indexed_time_dict(export_locations_paths)
|
||||||
|
|
||||||
|
for el in current_el_rows:
|
||||||
|
if delete and el['path'] not in export_locations_paths:
|
||||||
|
el.soft_delete(session)
|
||||||
|
else:
|
||||||
|
updated_at = indexed_update_time[el['path']]
|
||||||
|
el.update({
|
||||||
|
'updated_at': updated_at,
|
||||||
|
})
|
||||||
|
el.save(session=session)
|
||||||
|
|
||||||
|
# Now add new export locations
|
||||||
|
for el in export_locations:
|
||||||
|
if el['path'] in current_el_paths:
|
||||||
|
# Already updated
|
||||||
|
continue
|
||||||
|
|
||||||
|
location_ref = models.ShareSnapshotInstanceExportLocation()
|
||||||
|
location_ref.update({
|
||||||
|
'id': uuidutils.generate_uuid(),
|
||||||
|
'path': el['path'],
|
||||||
|
'share_snapshot_instance_id': share_snapshot_instance_id,
|
||||||
|
'updated_at': indexed_update_time[el['path']],
|
||||||
|
'is_admin_only': el.get('is_admin_only', False),
|
||||||
|
})
|
||||||
|
location_ref.save(session=session)
|
||||||
|
|
||||||
|
return get_path_list_from_rows(
|
||||||
|
share_snapshot_instance_export_locations_get_all(
|
||||||
|
context, share_snapshot_instance_id, session=session))
|
||||||
|
|
||||||
#################################
|
#################################
|
||||||
|
|
||||||
|
|
||||||
@ -3816,8 +3980,28 @@ def share_server_get_all(context):
|
|||||||
|
|
||||||
|
|
||||||
@require_context
|
@require_context
|
||||||
def share_server_get_all_by_host(context, host):
|
def share_server_get_all_with_filters(context, filters):
|
||||||
return _server_get_query(context).filter_by(host=host).all()
|
|
||||||
|
query = _server_get_query(context)
|
||||||
|
|
||||||
|
if filters.get('host'):
|
||||||
|
query = query.filter_by(host=filters.get('host'))
|
||||||
|
if filters.get('status'):
|
||||||
|
query = query.filter_by(status=filters.get('status'))
|
||||||
|
if filters.get('source_share_server_id'):
|
||||||
|
query = query.filter_by(
|
||||||
|
source_share_server_id=filters.get('source_share_server_id'))
|
||||||
|
|
||||||
|
return query.all()
|
||||||
|
|
||||||
|
|
||||||
|
@require_context
|
||||||
|
def share_server_get_all_by_host(context, host, filters=None):
|
||||||
|
if filters:
|
||||||
|
filters.update({'host': host})
|
||||||
|
else:
|
||||||
|
filters = {'host': host}
|
||||||
|
return share_server_get_all_with_filters(context, filters=filters)
|
||||||
|
|
||||||
|
|
||||||
@require_context
|
@require_context
|
||||||
|
@ -264,8 +264,10 @@ class Share(BASE, ManilaBase):
|
|||||||
if len(self.instances) > 0:
|
if len(self.instances) > 0:
|
||||||
order = (constants.STATUS_REVERTING,
|
order = (constants.STATUS_REVERTING,
|
||||||
constants.STATUS_REPLICATION_CHANGE,
|
constants.STATUS_REPLICATION_CHANGE,
|
||||||
constants.STATUS_MIGRATING, constants.STATUS_AVAILABLE,
|
constants.STATUS_MIGRATING,
|
||||||
constants.STATUS_ERROR)
|
constants.STATUS_SERVER_MIGRATING,
|
||||||
|
constants.STATUS_AVAILABLE,
|
||||||
|
constants.STATUS_ERROR, )
|
||||||
other_statuses = (
|
other_statuses = (
|
||||||
[x['status'] for x in self.instances if
|
[x['status'] for x in self.instances if
|
||||||
x['status'] not in order and
|
x['status'] not in order and
|
||||||
@ -671,8 +673,9 @@ class ShareSnapshot(BASE, ManilaBase):
|
|||||||
lambda x: qualified_replica(x.share_instance), self.instances))
|
lambda x: qualified_replica(x.share_instance), self.instances))
|
||||||
|
|
||||||
migrating_snapshots = list(filter(
|
migrating_snapshots = list(filter(
|
||||||
lambda x: x.share_instance['status'] ==
|
lambda x: x.share_instance['status'] in (
|
||||||
constants.STATUS_MIGRATING, self.instances))
|
constants.STATUS_MIGRATING,
|
||||||
|
constants.STATUS_SERVER_MIGRATING), self.instances))
|
||||||
|
|
||||||
snapshot_instances = (replica_snapshots or migrating_snapshots
|
snapshot_instances = (replica_snapshots or migrating_snapshots
|
||||||
or self.instances)
|
or self.instances)
|
||||||
@ -704,6 +707,7 @@ class ShareSnapshot(BASE, ManilaBase):
|
|||||||
|
|
||||||
order = (constants.STATUS_DELETING, constants.STATUS_CREATING,
|
order = (constants.STATUS_DELETING, constants.STATUS_CREATING,
|
||||||
constants.STATUS_ERROR, constants.STATUS_MIGRATING,
|
constants.STATUS_ERROR, constants.STATUS_MIGRATING,
|
||||||
|
constants.STATUS_SERVER_MIGRATING,
|
||||||
constants.STATUS_AVAILABLE)
|
constants.STATUS_AVAILABLE)
|
||||||
other_statuses = [x['status'] for x in self.instances if
|
other_statuses = [x['status'] for x in self.instances if
|
||||||
x['status'] not in order]
|
x['status'] not in order]
|
||||||
@ -1006,12 +1010,17 @@ class ShareServer(BASE, ManilaBase):
|
|||||||
host = Column(String(255), nullable=False)
|
host = Column(String(255), nullable=False)
|
||||||
is_auto_deletable = Column(Boolean, default=True)
|
is_auto_deletable = Column(Boolean, default=True)
|
||||||
identifier = Column(String(255), nullable=True)
|
identifier = Column(String(255), nullable=True)
|
||||||
|
task_state = Column(String(255), nullable=True)
|
||||||
|
source_share_server_id = Column(String(36), ForeignKey('share_servers.id'),
|
||||||
|
nullable=True)
|
||||||
status = Column(Enum(
|
status = Column(Enum(
|
||||||
constants.STATUS_INACTIVE, constants.STATUS_ACTIVE,
|
constants.STATUS_INACTIVE, constants.STATUS_ACTIVE,
|
||||||
constants.STATUS_ERROR, constants.STATUS_DELETING,
|
constants.STATUS_ERROR, constants.STATUS_DELETING,
|
||||||
constants.STATUS_CREATING, constants.STATUS_DELETED,
|
constants.STATUS_CREATING, constants.STATUS_DELETED,
|
||||||
constants.STATUS_MANAGING, constants.STATUS_UNMANAGING,
|
constants.STATUS_MANAGING, constants.STATUS_UNMANAGING,
|
||||||
constants.STATUS_UNMANAGE_ERROR, constants.STATUS_MANAGE_ERROR),
|
constants.STATUS_UNMANAGE_ERROR, constants.STATUS_MANAGE_ERROR,
|
||||||
|
constants.STATUS_SERVER_MIGRATING,
|
||||||
|
constants.STATUS_SERVER_MIGRATING_TO),
|
||||||
default=constants.STATUS_INACTIVE)
|
default=constants.STATUS_INACTIVE)
|
||||||
network_allocations = orm.relationship(
|
network_allocations = orm.relationship(
|
||||||
"NetworkAllocation",
|
"NetworkAllocation",
|
||||||
|
@ -246,8 +246,16 @@ class ShareServerInUse(InUse):
|
|||||||
message = _("Share server %(share_server_id)s is in use.")
|
message = _("Share server %(share_server_id)s is in use.")
|
||||||
|
|
||||||
|
|
||||||
|
class ShareServerMigrationError(ManilaException):
|
||||||
|
message = _("Error in share server migration: %(reason)s")
|
||||||
|
|
||||||
|
|
||||||
|
class ShareServerMigrationFailed(ManilaException):
|
||||||
|
message = _("Share server migration failed: %(reason)s")
|
||||||
|
|
||||||
|
|
||||||
class InvalidShareServer(Invalid):
|
class InvalidShareServer(Invalid):
|
||||||
message = _("Share server %(share_server_id)s is not valid.")
|
message = _("Invalid share server: %(reason)s")
|
||||||
|
|
||||||
|
|
||||||
class ShareMigrationError(ManilaException):
|
class ShareMigrationError(ManilaException):
|
||||||
|
@ -93,6 +93,68 @@ share_server_policies = [
|
|||||||
'path': '/share-servers/{share_server_id}/action'
|
'path': '/share-servers/{share_server_id}/action'
|
||||||
}
|
}
|
||||||
]),
|
]),
|
||||||
|
policy.DocumentedRuleDefault(
|
||||||
|
name=BASE_POLICY_NAME % 'share_server_migration_start',
|
||||||
|
check_str=base.RULE_ADMIN_API,
|
||||||
|
description="Migrates a share server to the specified host.",
|
||||||
|
operations=[
|
||||||
|
{
|
||||||
|
'method': 'POST',
|
||||||
|
'path': '/share-servers/{share_server_id}/action',
|
||||||
|
}
|
||||||
|
]),
|
||||||
|
policy.DocumentedRuleDefault(
|
||||||
|
name=BASE_POLICY_NAME % 'share_server_migration_check',
|
||||||
|
check_str=base.RULE_ADMIN_API,
|
||||||
|
description="Check if can migrates a share server to the specified "
|
||||||
|
"host.",
|
||||||
|
operations=[
|
||||||
|
{
|
||||||
|
'method': 'POST',
|
||||||
|
'path': '/share-servers/{share_server_id}/action',
|
||||||
|
}
|
||||||
|
]),
|
||||||
|
policy.DocumentedRuleDefault(
|
||||||
|
name=BASE_POLICY_NAME % 'share_server_migration_complete',
|
||||||
|
check_str=base.RULE_ADMIN_API,
|
||||||
|
description="Invokes the 2nd phase of share server migration.",
|
||||||
|
operations=[
|
||||||
|
{
|
||||||
|
'method': 'POST',
|
||||||
|
'path': '/share-servers/{share_server_id}/action',
|
||||||
|
}
|
||||||
|
]),
|
||||||
|
policy.DocumentedRuleDefault(
|
||||||
|
name=BASE_POLICY_NAME % 'share_server_migration_cancel',
|
||||||
|
check_str=base.RULE_ADMIN_API,
|
||||||
|
description="Attempts to cancel share server migration.",
|
||||||
|
operations=[
|
||||||
|
{
|
||||||
|
'method': 'POST',
|
||||||
|
'path': '/share-servers/{share_server_id}/action',
|
||||||
|
}
|
||||||
|
]),
|
||||||
|
policy.DocumentedRuleDefault(
|
||||||
|
name=BASE_POLICY_NAME % 'share_server_migration_get_progress',
|
||||||
|
check_str=base.RULE_ADMIN_API,
|
||||||
|
description=("Retrieves the share server migration progress for a "
|
||||||
|
"given share server."),
|
||||||
|
operations=[
|
||||||
|
{
|
||||||
|
'method': 'POST',
|
||||||
|
'path': '/share-servers/{share_server_id}/action',
|
||||||
|
}
|
||||||
|
]),
|
||||||
|
policy.DocumentedRuleDefault(
|
||||||
|
name=BASE_POLICY_NAME % 'share_server_reset_task_state',
|
||||||
|
check_str=base.RULE_ADMIN_API,
|
||||||
|
description=("Resets task state."),
|
||||||
|
operations=[
|
||||||
|
{
|
||||||
|
'method': 'POST',
|
||||||
|
'path': '/share-servers/{share_server_id}/action',
|
||||||
|
}
|
||||||
|
]),
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@ -805,8 +805,8 @@ class API(base.Base):
|
|||||||
raise exception.InvalidInput(reason=msg)
|
raise exception.InvalidInput(reason=msg)
|
||||||
|
|
||||||
if share_server['status'] != constants.STATUS_ACTIVE:
|
if share_server['status'] != constants.STATUS_ACTIVE:
|
||||||
msg = _("Share Server specified is not active.")
|
msg = _("The provided share server is not active.")
|
||||||
raise exception.InvalidShareServer(message=msg)
|
raise exception.InvalidShareServer(reason=msg)
|
||||||
subnet = self.db.share_network_subnet_get(
|
subnet = self.db.share_network_subnet_get(
|
||||||
context, share_server['share_network_subnet_id'])
|
context, share_server['share_network_subnet_id'])
|
||||||
share_data['share_network_id'] = subnet['share_network_id']
|
share_data['share_network_id'] = subnet['share_network_id']
|
||||||
@ -850,7 +850,9 @@ class API(base.Base):
|
|||||||
if share is None:
|
if share is None:
|
||||||
share = {'instance': {}}
|
share = {'instance': {}}
|
||||||
|
|
||||||
share_instance = share['instance']
|
# NOTE(dviroel): The share object can be a share instance object with
|
||||||
|
# share data.
|
||||||
|
share_instance = share.get('instance', share)
|
||||||
|
|
||||||
share_properties = {
|
share_properties = {
|
||||||
'size': kwargs.get('size', share.get('size')),
|
'size': kwargs.get('size', share.get('size')),
|
||||||
@ -1610,6 +1612,7 @@ class API(base.Base):
|
|||||||
if task_state in (constants.TASK_STATE_MIGRATION_SUCCESS,
|
if task_state in (constants.TASK_STATE_MIGRATION_SUCCESS,
|
||||||
constants.TASK_STATE_DATA_COPYING_ERROR,
|
constants.TASK_STATE_DATA_COPYING_ERROR,
|
||||||
constants.TASK_STATE_MIGRATION_CANCELLED,
|
constants.TASK_STATE_MIGRATION_CANCELLED,
|
||||||
|
constants.TASK_STATE_MIGRATION_CANCEL_IN_PROGRESS,
|
||||||
constants.TASK_STATE_MIGRATION_COMPLETING,
|
constants.TASK_STATE_MIGRATION_COMPLETING,
|
||||||
constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE,
|
constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE,
|
||||||
constants.TASK_STATE_DATA_COPYING_COMPLETED,
|
constants.TASK_STATE_DATA_COPYING_COMPLETED,
|
||||||
@ -1625,22 +1628,30 @@ class API(base.Base):
|
|||||||
else:
|
else:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def _migration_validate_error_message(self, share):
|
def _migration_validate_error_message(self, resource,
|
||||||
|
resource_type='share'):
|
||||||
task_state = share['task_state']
|
task_state = resource['task_state']
|
||||||
if task_state == constants.TASK_STATE_MIGRATION_SUCCESS:
|
if task_state == constants.TASK_STATE_MIGRATION_SUCCESS:
|
||||||
msg = _("Migration of share %s has already "
|
msg = _("Migration of %(resource_type)s %(resource_id)s has "
|
||||||
"completed.") % share['id']
|
"already completed.") % {
|
||||||
|
'resource_id': resource['id'],
|
||||||
|
'resource_type': resource_type}
|
||||||
elif task_state in (None, constants.TASK_STATE_MIGRATION_ERROR):
|
elif task_state in (None, constants.TASK_STATE_MIGRATION_ERROR):
|
||||||
msg = _("There is no migration being performed for share %s "
|
msg = _("There is no migration being performed for "
|
||||||
"at this moment.") % share['id']
|
"%(resource_type)s %(resource_id)s at this moment.") % {
|
||||||
|
'resource_id': resource['id'],
|
||||||
|
'resource_type': resource_type}
|
||||||
elif task_state == constants.TASK_STATE_MIGRATION_CANCELLED:
|
elif task_state == constants.TASK_STATE_MIGRATION_CANCELLED:
|
||||||
msg = _("Migration of share %s was already "
|
msg = _("Migration of %(resource_type)s %(resource_id)s was "
|
||||||
"cancelled.") % share['id']
|
"already cancelled.") % {
|
||||||
|
'resource_id': resource['id'],
|
||||||
|
'resource_type': resource_type}
|
||||||
elif task_state in (constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE,
|
elif task_state in (constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE,
|
||||||
constants.TASK_STATE_DATA_COPYING_COMPLETED):
|
constants.TASK_STATE_DATA_COPYING_COMPLETED):
|
||||||
msg = _("Migration of share %s has already completed first "
|
msg = _("Migration of %(resource_type)s %(resource_id)s has "
|
||||||
"phase.") % share['id']
|
"already completed first phase.") % {
|
||||||
|
'resource_id': resource['id'],
|
||||||
|
'resource_type': resource_type}
|
||||||
else:
|
else:
|
||||||
return None
|
return None
|
||||||
return msg
|
return msg
|
||||||
@ -2234,3 +2245,401 @@ class API(base.Base):
|
|||||||
def snapshot_export_location_get(self, context, el_id):
|
def snapshot_export_location_get(self, context, el_id):
|
||||||
return self.db.share_snapshot_instance_export_location_get(context,
|
return self.db.share_snapshot_instance_export_location_get(context,
|
||||||
el_id)
|
el_id)
|
||||||
|
|
||||||
|
def share_server_migration_get_destination(self, context, source_server_id,
|
||||||
|
status=None):
|
||||||
|
filters = {'source_share_server_id': source_server_id}
|
||||||
|
if status:
|
||||||
|
filters.update({'status': status})
|
||||||
|
|
||||||
|
dest_share_servers = self.db.share_server_get_all_with_filters(
|
||||||
|
context, filters=filters)
|
||||||
|
if not dest_share_servers:
|
||||||
|
msg = _("A destination share server wasn't found for source "
|
||||||
|
"share server %s.") % source_server_id
|
||||||
|
raise exception.InvalidShareServer(reason=msg)
|
||||||
|
if len(dest_share_servers) > 1:
|
||||||
|
msg = _("More than one destination share server was found for "
|
||||||
|
"source share server %s. Aborting...") % source_server_id
|
||||||
|
raise exception.InvalidShareServer(reason=msg)
|
||||||
|
|
||||||
|
return dest_share_servers[0]
|
||||||
|
|
||||||
|
def get_share_server_migration_request_spec_dict(
|
||||||
|
self, context, share_instances, snapshot_instances, **kwargs):
|
||||||
|
"""Returns request specs related to share server and all its shares."""
|
||||||
|
|
||||||
|
shares_total_size = sum([instance.get('size', 0)
|
||||||
|
for instance in share_instances])
|
||||||
|
snapshots_total_size = sum([instance.get('size', 0)
|
||||||
|
for instance in snapshot_instances])
|
||||||
|
|
||||||
|
shares_req_spec = []
|
||||||
|
for share_instance in share_instances:
|
||||||
|
share_type_id = share_instance['share_type_id']
|
||||||
|
share_type = share_types.get_share_type(context, share_type_id)
|
||||||
|
req_spec = self._get_request_spec_dict(share_instance,
|
||||||
|
share_type,
|
||||||
|
**kwargs)
|
||||||
|
shares_req_spec.append(req_spec)
|
||||||
|
|
||||||
|
server_request_spec = {
|
||||||
|
'shares_size': shares_total_size,
|
||||||
|
'snapshots_size': snapshots_total_size,
|
||||||
|
'shares_req_spec': shares_req_spec,
|
||||||
|
}
|
||||||
|
return server_request_spec
|
||||||
|
|
||||||
|
def _migration_initial_checks(self, context, share_server, dest_host,
|
||||||
|
new_share_network):
|
||||||
|
shares = self.db.share_get_all_by_share_server(
|
||||||
|
context, share_server['id'])
|
||||||
|
|
||||||
|
if len(shares) == 0:
|
||||||
|
msg = _("Share server %s does not have shares."
|
||||||
|
% share_server['id'])
|
||||||
|
raise exception.InvalidShareServer(reason=msg)
|
||||||
|
|
||||||
|
# We only handle "active" share servers for now
|
||||||
|
if share_server['status'] != constants.STATUS_ACTIVE:
|
||||||
|
msg = _('Share server %(server_id)s status must be active, '
|
||||||
|
'but current status is: %(server_status)s.') % {
|
||||||
|
'server_id': share_server['id'],
|
||||||
|
'server_status': share_server['status']}
|
||||||
|
raise exception.InvalidShareServer(reason=msg)
|
||||||
|
|
||||||
|
share_groups_related_to_share_server = (
|
||||||
|
self.db.share_group_get_all_by_share_server(
|
||||||
|
context, share_server['id']))
|
||||||
|
|
||||||
|
if share_groups_related_to_share_server:
|
||||||
|
msg = _("The share server %s can not be migrated because it is "
|
||||||
|
"related to a share group.") % share_server['id']
|
||||||
|
raise exception.InvalidShareServer(reason=msg)
|
||||||
|
|
||||||
|
# Same backend and same network, nothing changes
|
||||||
|
src_backend = share_utils.extract_host(share_server['host'],
|
||||||
|
level='backend_name')
|
||||||
|
dest_backend = share_utils.extract_host(dest_host,
|
||||||
|
level='backend_name')
|
||||||
|
current_share_network_id = shares[0]['instance']['share_network_id']
|
||||||
|
if (src_backend == dest_backend and
|
||||||
|
(new_share_network is None or
|
||||||
|
new_share_network['id'] == current_share_network_id)):
|
||||||
|
msg = _('There is no difference between source and destination '
|
||||||
|
'backends and between source and destination share '
|
||||||
|
'networks. Share server migration will not proceed.')
|
||||||
|
raise exception.InvalidShareServer(reason=msg)
|
||||||
|
|
||||||
|
filters = {'source_share_server_id': share_server['id'],
|
||||||
|
'status': constants.STATUS_SERVER_MIGRATING_TO}
|
||||||
|
dest_share_servers = self.db.share_server_get_all_with_filters(
|
||||||
|
context, filters=filters)
|
||||||
|
if len(dest_share_servers):
|
||||||
|
msg = _("There is at least one destination share server pointing "
|
||||||
|
"to this source share server. Clean up your environment "
|
||||||
|
"before starting a new migration.")
|
||||||
|
raise exception.InvalidShareServer(reason=msg)
|
||||||
|
|
||||||
|
dest_service_host = share_utils.extract_host(dest_host)
|
||||||
|
# Make sure the host is in the list of available hosts
|
||||||
|
utils.validate_service_host(context, dest_service_host)
|
||||||
|
|
||||||
|
service = self.db.service_get_by_args(
|
||||||
|
context, dest_service_host, 'manila-share')
|
||||||
|
|
||||||
|
# Get all share types
|
||||||
|
type_ids = set([share['instance']['share_type_id']
|
||||||
|
for share in shares])
|
||||||
|
types = [share_types.get_share_type(context, type_id)
|
||||||
|
for type_id in type_ids]
|
||||||
|
|
||||||
|
# Check if share type azs are supported by the destination host
|
||||||
|
for share_type in types:
|
||||||
|
azs = share_type['extra_specs'].get('availability_zones', '')
|
||||||
|
if azs and service['availability_zone']['name'] not in azs:
|
||||||
|
msg = _("Share server %(server)s cannot be migrated to host "
|
||||||
|
"%(dest)s because the share type %(type)s is used by "
|
||||||
|
"one of the shares, and this share type is not "
|
||||||
|
"supported within the availability zone (%(az)s) that "
|
||||||
|
"the host is in.")
|
||||||
|
type_name = '%s' % (share_type['name'] or '')
|
||||||
|
type_id = '(ID: %s)' % share_type['id']
|
||||||
|
payload = {'type': '%s%s' % (type_name, type_id),
|
||||||
|
'az': service['availability_zone']['name'],
|
||||||
|
'server': share_server['id'],
|
||||||
|
'dest': dest_host}
|
||||||
|
raise exception.InvalidShareServer(reason=msg % payload)
|
||||||
|
|
||||||
|
if new_share_network:
|
||||||
|
new_share_network_id = new_share_network['id']
|
||||||
|
else:
|
||||||
|
new_share_network_id = shares[0]['instance']['share_network_id']
|
||||||
|
# NOTE(carloss): check if the new or old share network has a subnet
|
||||||
|
# that spans the availability zone of the destination host, otherwise
|
||||||
|
# we should deny this operation.
|
||||||
|
dest_az = self.db.availability_zone_get(
|
||||||
|
context, service['availability_zone']['name'])
|
||||||
|
compatible_subnet = (
|
||||||
|
self.db.share_network_subnet_get_by_availability_zone_id(
|
||||||
|
context, new_share_network_id, dest_az['id']))
|
||||||
|
|
||||||
|
if not compatible_subnet:
|
||||||
|
msg = _("The share network %(network)s does not have a subnet "
|
||||||
|
"that spans the destination host availability zone.")
|
||||||
|
payload = {'network': new_share_network_id}
|
||||||
|
raise exception.InvalidShareServer(reason=msg % payload)
|
||||||
|
|
||||||
|
# NOTE(carloss): Refreshing the list of shares since something could've
|
||||||
|
# changed from the initial list.
|
||||||
|
shares = self.db.share_get_all_by_share_server(
|
||||||
|
context, share_server['id'])
|
||||||
|
for share in shares:
|
||||||
|
if share['status'] != constants.STATUS_AVAILABLE:
|
||||||
|
msg = _('Share %(share_id)s status must be available, '
|
||||||
|
'but current status is: %(share_status)s.') % {
|
||||||
|
'share_id': share['id'],
|
||||||
|
'share_status': share['status']}
|
||||||
|
raise exception.InvalidShareServer(reason=msg)
|
||||||
|
|
||||||
|
if share.has_replicas:
|
||||||
|
msg = _('Share %s has replicas. Remove the replicas of all '
|
||||||
|
'shares in the share server before attempting to '
|
||||||
|
'migrate it.') % share['id']
|
||||||
|
LOG.error(msg)
|
||||||
|
raise exception.InvalidShareServer(reason=msg)
|
||||||
|
|
||||||
|
# NOTE(carloss): Not validating the flag preserve_snapshots at this
|
||||||
|
# point, considering that even if the admin set the value to False,
|
||||||
|
# the driver can still support preserving snapshots and the
|
||||||
|
# snapshots would be copied anyway. So the share/manager will be
|
||||||
|
# responsible for checking if the driver does not support snapshot
|
||||||
|
# preservation, and if there are snapshots in the share server.
|
||||||
|
share_snapshots = self.db.share_snapshot_get_all_for_share(
|
||||||
|
context, share['id'])
|
||||||
|
all_snapshots_are_available = all(
|
||||||
|
[snapshot['status'] == constants.STATUS_AVAILABLE
|
||||||
|
for snapshot in share_snapshots])
|
||||||
|
if not all_snapshots_are_available:
|
||||||
|
msg = _(
|
||||||
|
"All snapshots must have '%(status)s' status to be "
|
||||||
|
"migrated by the driver along with share "
|
||||||
|
"%(resource_id)s.") % {
|
||||||
|
'resource_id': share['id'],
|
||||||
|
'status': constants.STATUS_AVAILABLE,
|
||||||
|
}
|
||||||
|
LOG.error(msg)
|
||||||
|
raise exception.InvalidShareServer(reason=msg)
|
||||||
|
|
||||||
|
if share.get('share_group_id'):
|
||||||
|
msg = _('Share %s is a member of a group. This operation is '
|
||||||
|
'not currently supported for share servers that '
|
||||||
|
'contain shares members of groups.') % share['id']
|
||||||
|
LOG.error(msg)
|
||||||
|
raise exception.InvalidShareServer(reason=msg)
|
||||||
|
|
||||||
|
share_instance = share['instance']
|
||||||
|
# Access rules status must not be error
|
||||||
|
if share_instance['access_rules_status'] == constants.STATUS_ERROR:
|
||||||
|
msg = _(
|
||||||
|
'Share instance %(instance_id)s access rules status must '
|
||||||
|
'not be in %(error)s when attempting to start a share '
|
||||||
|
'server migration.') % {
|
||||||
|
'instance_id': share_instance['id'],
|
||||||
|
'error': constants.STATUS_ERROR}
|
||||||
|
raise exception.InvalidShareServer(reason=msg)
|
||||||
|
try:
|
||||||
|
self._check_is_share_busy(share)
|
||||||
|
except exception.ShareBusyException as e:
|
||||||
|
raise exception.InvalidShareServer(reason=e.msg)
|
||||||
|
|
||||||
|
return shares, types, service, new_share_network_id
|
||||||
|
|
||||||
|
def share_server_migration_check(self, context, share_server, dest_host,
|
||||||
|
writable, nondisruptive,
|
||||||
|
preserve_snapshots,
|
||||||
|
new_share_network=None):
|
||||||
|
"""Migrates share server to a new host."""
|
||||||
|
shares, types, service, new_share_network_id = (
|
||||||
|
self._migration_initial_checks(context, share_server, dest_host,
|
||||||
|
new_share_network))
|
||||||
|
|
||||||
|
# NOTE(dviroel): Service is up according to validations made on initial
|
||||||
|
# checks
|
||||||
|
result = self.share_rpcapi.share_server_migration_check(
|
||||||
|
context, share_server['id'], dest_host, writable, nondisruptive,
|
||||||
|
preserve_snapshots, new_share_network_id)
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
def share_server_migration_start(
|
||||||
|
self, context, share_server, dest_host, writable, nondisruptive,
|
||||||
|
preserve_snapshots, new_share_network=None):
|
||||||
|
"""Migrates share server to a new host."""
|
||||||
|
|
||||||
|
shares, types, dest_service, new_share_network_id = (
|
||||||
|
self._migration_initial_checks(context, share_server,
|
||||||
|
dest_host,
|
||||||
|
new_share_network))
|
||||||
|
|
||||||
|
# Updates the share server status to migration starting
|
||||||
|
self.db.share_server_update(
|
||||||
|
context, share_server['id'],
|
||||||
|
{'task_state': constants.TASK_STATE_MIGRATION_STARTING,
|
||||||
|
'status': constants.STATUS_SERVER_MIGRATING})
|
||||||
|
|
||||||
|
share_snapshots = [
|
||||||
|
self.db.share_snapshot_get_all_for_share(context, share['id'])
|
||||||
|
for share in shares]
|
||||||
|
snapshot_instance_ids = []
|
||||||
|
for snapshot_list in share_snapshots:
|
||||||
|
for snapshot in snapshot_list:
|
||||||
|
snapshot_instance_ids.append(snapshot['instance']['id'])
|
||||||
|
share_instance_ids = [share['instance']['id'] for share in shares]
|
||||||
|
|
||||||
|
# Updates all shares and snapshot instances
|
||||||
|
self.db.share_and_snapshot_instances_status_update(
|
||||||
|
context, {'status': constants.STATUS_SERVER_MIGRATING},
|
||||||
|
share_instance_ids=share_instance_ids,
|
||||||
|
snapshot_instance_ids=snapshot_instance_ids,
|
||||||
|
current_expected_status=constants.STATUS_AVAILABLE
|
||||||
|
)
|
||||||
|
|
||||||
|
# NOTE(dviroel): Service is up according to validations made on initial
|
||||||
|
# checks
|
||||||
|
self.share_rpcapi.share_server_migration_start(
|
||||||
|
context, share_server, dest_host, writable, nondisruptive,
|
||||||
|
preserve_snapshots, new_share_network_id)
|
||||||
|
|
||||||
|
def share_server_migration_complete(self, context, share_server):
|
||||||
|
"""Invokes 2nd phase of share server migration."""
|
||||||
|
if share_server['status'] != constants.STATUS_SERVER_MIGRATING:
|
||||||
|
msg = _("Share server %s is not migrating") % share_server['id']
|
||||||
|
LOG.error(msg)
|
||||||
|
raise exception.InvalidShareServer(reason=msg)
|
||||||
|
if (share_server['task_state'] !=
|
||||||
|
constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE):
|
||||||
|
msg = _("The first phase of migration has to finish to "
|
||||||
|
"request the completion of server %s's "
|
||||||
|
"migration.") % share_server['id']
|
||||||
|
LOG.error(msg)
|
||||||
|
raise exception.InvalidShareServer(reason=msg)
|
||||||
|
|
||||||
|
dest_share_server = self.share_server_migration_get_destination(
|
||||||
|
context, share_server['id'],
|
||||||
|
status=constants.STATUS_SERVER_MIGRATING_TO
|
||||||
|
)
|
||||||
|
|
||||||
|
dest_host = share_utils.extract_host(dest_share_server['host'])
|
||||||
|
utils.validate_service_host(context, dest_host)
|
||||||
|
|
||||||
|
self.share_rpcapi.share_server_migration_complete(
|
||||||
|
context, dest_share_server['host'], share_server,
|
||||||
|
dest_share_server)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'destination_share_server_id': dest_share_server['id']
|
||||||
|
}
|
||||||
|
|
||||||
|
def share_server_migration_cancel(self, context, share_server):
|
||||||
|
"""Attempts to cancel share server migration."""
|
||||||
|
if share_server['status'] != constants.STATUS_SERVER_MIGRATING:
|
||||||
|
msg = _("Migration of share server %s cannot be cancelled because "
|
||||||
|
"the provided share server is not being migrated.")
|
||||||
|
LOG.error(msg)
|
||||||
|
raise exception.InvalidShareServer(reason=msg)
|
||||||
|
|
||||||
|
if share_server['task_state'] in (
|
||||||
|
constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE,
|
||||||
|
constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS):
|
||||||
|
|
||||||
|
dest_share_server = self.share_server_migration_get_destination(
|
||||||
|
context, share_server['id'],
|
||||||
|
status=constants.STATUS_SERVER_MIGRATING_TO
|
||||||
|
)
|
||||||
|
|
||||||
|
dest_host = share_utils.extract_host(dest_share_server['host'])
|
||||||
|
utils.validate_service_host(context, dest_host)
|
||||||
|
|
||||||
|
self.share_rpcapi.share_server_migration_cancel(
|
||||||
|
context, dest_share_server['host'], share_server,
|
||||||
|
dest_share_server)
|
||||||
|
else:
|
||||||
|
msg = self._migration_validate_error_message(
|
||||||
|
share_server, resource_type='share_server')
|
||||||
|
if msg is None:
|
||||||
|
msg = _("Migration of share server %s can be cancelled only "
|
||||||
|
"after the driver already started the migration, or "
|
||||||
|
"when the first phase of the migration gets "
|
||||||
|
"completed.") % share_server['id']
|
||||||
|
LOG.error(msg)
|
||||||
|
raise exception.InvalidShareServer(reason=msg)
|
||||||
|
|
||||||
|
def share_server_migration_get_progress(self, context,
|
||||||
|
src_share_server_id):
|
||||||
|
"""Retrieve migration progress for a given share server."""
|
||||||
|
try:
|
||||||
|
share_server = self.db.share_server_get(context,
|
||||||
|
src_share_server_id)
|
||||||
|
except exception.ShareServerNotFound:
|
||||||
|
msg = _('Share server %s was not found. We will search for a '
|
||||||
|
'successful migration') % src_share_server_id
|
||||||
|
LOG.debug(msg)
|
||||||
|
# Search for a successful migration, raise an error if not found
|
||||||
|
dest_share_server = self.share_server_migration_get_destination(
|
||||||
|
context, src_share_server_id,
|
||||||
|
status=constants.STATUS_ACTIVE
|
||||||
|
)
|
||||||
|
return {
|
||||||
|
'total_progress': 100,
|
||||||
|
'destination_share_server_id': dest_share_server['id'],
|
||||||
|
'task_state': dest_share_server['task_state'],
|
||||||
|
}
|
||||||
|
# Source server still exists so it must be in 'server_migrating' status
|
||||||
|
if (share_server and
|
||||||
|
share_server['status'] != constants.STATUS_SERVER_MIGRATING):
|
||||||
|
msg = _("Migration progress of share server %s cannot be "
|
||||||
|
"obtained. The provided share server is not being "
|
||||||
|
"migrated.") % share_server['id']
|
||||||
|
LOG.error(msg)
|
||||||
|
raise exception.InvalidShareServer(reason=msg)
|
||||||
|
|
||||||
|
dest_share_server = self.share_server_migration_get_destination(
|
||||||
|
context, share_server['id'],
|
||||||
|
status=constants.STATUS_SERVER_MIGRATING_TO
|
||||||
|
)
|
||||||
|
|
||||||
|
if (share_server['task_state'] ==
|
||||||
|
constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS):
|
||||||
|
|
||||||
|
dest_host = share_utils.extract_host(dest_share_server['host'])
|
||||||
|
utils.validate_service_host(context, dest_host)
|
||||||
|
|
||||||
|
try:
|
||||||
|
result = (
|
||||||
|
self.share_rpcapi.share_server_migration_get_progress(
|
||||||
|
context, dest_share_server['host'],
|
||||||
|
share_server, dest_share_server))
|
||||||
|
except Exception:
|
||||||
|
msg = _("Failed to obtain migration progress of share "
|
||||||
|
"server %s.") % share_server['id']
|
||||||
|
LOG.exception(msg)
|
||||||
|
raise exception.ShareServerMigrationError(reason=msg)
|
||||||
|
|
||||||
|
else:
|
||||||
|
result = self._migration_get_progress_state(share_server)
|
||||||
|
|
||||||
|
if not (result and result.get('total_progress') is not None):
|
||||||
|
msg = self._migration_validate_error_message(
|
||||||
|
share_server, resource_type='share_server')
|
||||||
|
if msg is None:
|
||||||
|
msg = _("Migration progress of share server %s cannot be "
|
||||||
|
"obtained at this moment.") % share_server['id']
|
||||||
|
LOG.error(msg)
|
||||||
|
raise exception.InvalidShareServer(reason=msg)
|
||||||
|
|
||||||
|
result.update({
|
||||||
|
'destination_share_server_id': dest_share_server['id'],
|
||||||
|
'task_state': dest_share_server['task_state']
|
||||||
|
})
|
||||||
|
return result
|
||||||
|
@ -2861,3 +2861,298 @@ class ShareDriver(object):
|
|||||||
to 'error'.
|
to 'error'.
|
||||||
"""
|
"""
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
def share_server_migration_start(self, context, src_share_server,
|
||||||
|
dest_share_server, shares, snapshots):
|
||||||
|
"""Starts migration of a given share server to another host.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
Is called in destination share server's backend to start migration.
|
||||||
|
|
||||||
|
Driver should implement this method if willing to perform a server
|
||||||
|
migration in driver-assisted way, useful when source share server's
|
||||||
|
backend driver is compatible with destination backend driver. This
|
||||||
|
method should start the migration procedure in the backend and return
|
||||||
|
immediately.
|
||||||
|
Following steps should be done in 'share_server_migration_continue'.
|
||||||
|
|
||||||
|
:param context: The 'context.RequestContext' object for the request.
|
||||||
|
:param src_share_server: Reference to the original share server.
|
||||||
|
:param dest_share_server: Reference to the share server to be used by
|
||||||
|
as destination.
|
||||||
|
:param shares: All shares in the source share server that should be
|
||||||
|
migrated.
|
||||||
|
:param snapshots: All snapshots in the source share server that should
|
||||||
|
be migrated.
|
||||||
|
"""
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
def share_server_migration_continue(self, context, src_share_server,
|
||||||
|
dest_share_server, shares, snapshots):
|
||||||
|
"""Continues migration of a given share server to another host.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
Is called in destination share server's backend to continue
|
||||||
|
migration.
|
||||||
|
|
||||||
|
Driver should implement this method to continue monitor the migration
|
||||||
|
progress in storage and perform following steps until 1st phase is
|
||||||
|
completed.
|
||||||
|
|
||||||
|
:param context: The 'context.RequestContext' object for the request.
|
||||||
|
:param src_share_server: Reference to the original share server.
|
||||||
|
:param dest_share_server: Reference to the share server to be used as
|
||||||
|
destination.
|
||||||
|
:param shares: All shares in the source share server that should be
|
||||||
|
migrated.
|
||||||
|
:param snapshots: All snapshots in the source share server that should
|
||||||
|
be migrated.
|
||||||
|
:return: Boolean value to indicate if 1st phase is finished.
|
||||||
|
"""
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
def share_server_migration_get_progress(self, context, src_share_server,
|
||||||
|
dest_share_server, shares,
|
||||||
|
snapshots):
|
||||||
|
"""Obtains progress of migration of a share server to another host.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
Is called in destination share's backend to obtain migration
|
||||||
|
progress.
|
||||||
|
|
||||||
|
If possible, driver can implement a way to return migration progress
|
||||||
|
information.
|
||||||
|
|
||||||
|
:param context: The 'context.RequestContext' object for the request.
|
||||||
|
:param src_share_server: Reference to the original share server.
|
||||||
|
:param dest_share_server: Reference to the share server to be used as
|
||||||
|
destination.
|
||||||
|
:param shares: All shares in the source share server that should be
|
||||||
|
migrated.
|
||||||
|
:param snapshots: All snapshots in the source share server that should
|
||||||
|
be migrated.
|
||||||
|
:return: A dictionary with at least 'total_progress' field containing
|
||||||
|
the percentage value.
|
||||||
|
"""
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
def share_server_migration_cancel(self, context, src_share_server,
|
||||||
|
dest_share_server, shares, snapshots):
|
||||||
|
"""Cancels migration of a given share server to another host.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
Is called in destination share server's backend to continue
|
||||||
|
migration.
|
||||||
|
|
||||||
|
If possible, driver can implement a way to cancel an in-progress
|
||||||
|
migration.
|
||||||
|
|
||||||
|
:param context: The 'context.RequestContext' object for the request.
|
||||||
|
:param src_share_server: Reference to the original share server.
|
||||||
|
:param dest_share_server: Reference to the share server to be used as
|
||||||
|
destination.
|
||||||
|
:param shares: All shares in the source share server that should be
|
||||||
|
migrated.
|
||||||
|
:param snapshots: All snapshots in the source share server that should
|
||||||
|
be migrated.
|
||||||
|
"""
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
def share_server_migration_check_compatibility(
|
||||||
|
self, context, share_server, dest_host, old_share_network,
|
||||||
|
new_share_network, shares_request_spec):
|
||||||
|
"""Checks destination compatibility for migration of a share server.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
Is called in destination share server's backend to continue
|
||||||
|
migration. Can be called by an admin to check if a given host is
|
||||||
|
compatible or by the share manager to test compatibility with
|
||||||
|
destination backend.
|
||||||
|
|
||||||
|
Driver should check if it is compatible with destination backend so
|
||||||
|
driver-assisted migration can proceed.
|
||||||
|
|
||||||
|
:param context: The 'context.RequestContext' object for the request.
|
||||||
|
:param share_server: Share server model.
|
||||||
|
:param dest_host: Reference to the hos to be used by the migrated
|
||||||
|
share server.
|
||||||
|
:param old_share_network: Share network model where the source share
|
||||||
|
server is placed.
|
||||||
|
:param new_share_network: Share network model where the share
|
||||||
|
server is going to be migrated to.
|
||||||
|
:param shares_request_spec: Dict. Contains information about all shares
|
||||||
|
and share types that belong to the source share server. The drivers
|
||||||
|
can use this information to check if the capabilities match with
|
||||||
|
the destination backend and if there is available space to hold the
|
||||||
|
new share server and all its resource.
|
||||||
|
|
||||||
|
Example::
|
||||||
|
|
||||||
|
{
|
||||||
|
'shares_size': 100,
|
||||||
|
'snapshots_size': 100,
|
||||||
|
'shares_req_spec':
|
||||||
|
[
|
||||||
|
{
|
||||||
|
'share_properties':
|
||||||
|
{
|
||||||
|
'size': 10
|
||||||
|
'user_id': '2f5c1df4-5203-444e-b68e-1e60f3f26fc3'
|
||||||
|
'project_id': '0b82b278-51d6-4357-b273-0d7263982c31'
|
||||||
|
'snapshot_support': True
|
||||||
|
'create_share_from_snapshot_support': True
|
||||||
|
'revert_to_snapshot_support': False
|
||||||
|
'mount_snapshot_support': False
|
||||||
|
'share_proto': NFS
|
||||||
|
'share_type_id': '360e01c1-a4f7-4782-9676-dc013f1a2f21'
|
||||||
|
'is_public': False
|
||||||
|
'share_group_id': None
|
||||||
|
'source_share_group_snapshot_member_id': None
|
||||||
|
'snapshot_id': None
|
||||||
|
},
|
||||||
|
'share_instance_properties':
|
||||||
|
{
|
||||||
|
'availability_zone_id':
|
||||||
|
'02377ad7-381c-4b25-a04c-6fd218f22a91',
|
||||||
|
'share_network_id': '691544aa-da83-4669-8522-22719f236e16',
|
||||||
|
'share_server_id': 'cd658413-d02c-4d1b-ac8a-b6b972e76bac',
|
||||||
|
'share_id': 'e42fec45-781e-4dcc-a4d2-44354ad5ae91',
|
||||||
|
'host': 'hostA@backend1#pool0',
|
||||||
|
'status': 'available',
|
||||||
|
},
|
||||||
|
'share_type':
|
||||||
|
{
|
||||||
|
'id': '360e01c1-a4f7-4782-9676-dc013f1a2f21',
|
||||||
|
'name': 'dhss_false',
|
||||||
|
'is_public': False,
|
||||||
|
'extra_specs':
|
||||||
|
{
|
||||||
|
'driver_handles_share_servers': False,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
'share_id': e42fec45-781e-4dcc-a4d2-44354ad5ae91,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
}
|
||||||
|
|
||||||
|
:return: A dictionary containing values indicating if destination
|
||||||
|
backend is compatible, if share can remain writable during
|
||||||
|
migration, if it can preserve all file metadata and if it can
|
||||||
|
perform migration of given share non-disruptively.
|
||||||
|
|
||||||
|
Example::
|
||||||
|
|
||||||
|
{
|
||||||
|
'compatible': True,
|
||||||
|
'writable': True,
|
||||||
|
'nondisruptive': True,
|
||||||
|
'preserve_snapshots': True,
|
||||||
|
'migration_cancel': True,
|
||||||
|
'migration_get_progress': False
|
||||||
|
}
|
||||||
|
|
||||||
|
"""
|
||||||
|
return {
|
||||||
|
'compatible': False,
|
||||||
|
'writable': False,
|
||||||
|
'nondisruptive': False,
|
||||||
|
'preserve_snapshots': False,
|
||||||
|
'migration_cancel': False,
|
||||||
|
'migration_get_progress': False
|
||||||
|
}
|
||||||
|
|
||||||
|
def share_server_migration_complete(self, context, src_share_server,
|
||||||
|
dest_share_server, shares, snapshots,
|
||||||
|
new_network_info):
|
||||||
|
"""Completes migration of a given share server to another host.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
Is called in destination share server's backend to complete
|
||||||
|
migration.
|
||||||
|
|
||||||
|
If driver is implementing 2-phase migration, this method should
|
||||||
|
perform the disruptive tasks related to the 2nd phase of migration,
|
||||||
|
thus completing it. Driver should also delete all original data from
|
||||||
|
source backend.
|
||||||
|
|
||||||
|
It expected that all shares and snapshots will be available at the
|
||||||
|
destination share server in the end of the migration complete and all
|
||||||
|
updates provided in the returned model update.
|
||||||
|
|
||||||
|
:param context: The 'context.RequestContext' object for the request.
|
||||||
|
:param src_share_server: Reference to the original share server.
|
||||||
|
:param dest_share_server: Reference to the share server to be used as
|
||||||
|
destination.
|
||||||
|
:param shares: All shares in the source share server that should be
|
||||||
|
migrated.
|
||||||
|
:param snapshots: All snapshots in the source share server that should
|
||||||
|
be migrated.
|
||||||
|
:param new_network_info: Network allocation associated to the
|
||||||
|
destination share server.
|
||||||
|
:return: If the migration changes the shares export locations,
|
||||||
|
snapshots provider locations or snapshots export locations, this
|
||||||
|
method should return a dictionary containing a list of share
|
||||||
|
instances and snapshot instances indexed by their id's, where each
|
||||||
|
instance should provide a dict with the relevant information that
|
||||||
|
need to be updated.
|
||||||
|
|
||||||
|
Example::
|
||||||
|
|
||||||
|
{
|
||||||
|
'share_updates':
|
||||||
|
{
|
||||||
|
'4363eb92-23ca-4888-9e24-502387816e2a':
|
||||||
|
{
|
||||||
|
'export_locations':
|
||||||
|
[
|
||||||
|
{
|
||||||
|
'path': '1.2.3.4:/foo',
|
||||||
|
'metadata': {},
|
||||||
|
'is_admin_only': False
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'path': '5.6.7.8:/foo',
|
||||||
|
'metadata': {},
|
||||||
|
'is_admin_only': True
|
||||||
|
},
|
||||||
|
],
|
||||||
|
'pool_name': 'poolA',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
'snapshot_updates':
|
||||||
|
{
|
||||||
|
'bc4e3b28-0832-4168-b688-67fdc3e9d408':
|
||||||
|
{
|
||||||
|
'provider_location': '/snapshots/foo/bar_1',
|
||||||
|
'export_locations':
|
||||||
|
[
|
||||||
|
{
|
||||||
|
'path': '1.2.3.4:/snapshots/foo/bar_1',
|
||||||
|
'is_admin_only': False,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'path': '5.6.7.8:/snapshots/foo/bar_1',
|
||||||
|
'is_admin_only': True,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
'2e62b7ea-4e30-445f-bc05-fd523ca62941':
|
||||||
|
{
|
||||||
|
'provider_location': '/snapshots/foo/bar_2',
|
||||||
|
'export_locations':
|
||||||
|
[
|
||||||
|
{
|
||||||
|
'path': '1.2.3.4:/snapshots/foo/bar_2',
|
||||||
|
'is_admin_only': False,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'path': '5.6.7.8:/snapshots/foo/bar_2',
|
||||||
|
'is_admin_only': True,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
"""
|
||||||
|
raise NotImplementedError()
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -23,6 +23,7 @@ from manila.common import constants
|
|||||||
from manila import exception
|
from manila import exception
|
||||||
from manila.i18n import _
|
from manila.i18n import _
|
||||||
from manila.share import api as share_api
|
from manila.share import api as share_api
|
||||||
|
from manila.share import rpcapi as share_rpcapi
|
||||||
import manila.utils as utils
|
import manila.utils as utils
|
||||||
|
|
||||||
|
|
||||||
@ -47,10 +48,9 @@ CONF.register_opts(migration_opts)
|
|||||||
|
|
||||||
class ShareMigrationHelper(object):
|
class ShareMigrationHelper(object):
|
||||||
|
|
||||||
def __init__(self, context, db, share, access_helper):
|
def __init__(self, context, db, access_helper):
|
||||||
|
|
||||||
self.db = db
|
self.db = db
|
||||||
self.share = share
|
|
||||||
self.context = context
|
self.context = context
|
||||||
self.access_helper = access_helper
|
self.access_helper = access_helper
|
||||||
self.api = share_api.API()
|
self.api = share_api.API()
|
||||||
@ -132,36 +132,50 @@ class ShareMigrationHelper(object):
|
|||||||
try:
|
try:
|
||||||
self.delete_instance_and_wait(new_instance)
|
self.delete_instance_and_wait(new_instance)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.warning("Failed to cleanup new instance during generic"
|
LOG.warning("Failed to cleanup new instance during generic "
|
||||||
" migration for share %s.", self.share['id'])
|
"migration for share %s.", new_instance['share_id'])
|
||||||
|
|
||||||
def cleanup_access_rules(self, share_instance, share_server):
|
def cleanup_access_rules(self, share_instances, share_server,
|
||||||
|
dest_host=None):
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.revert_access_rules(share_instance, share_server)
|
self.revert_access_rules(share_instances, share_server, dest_host)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.warning("Failed to cleanup access rules during generic"
|
LOG.warning("Failed to cleanup access rules during generic"
|
||||||
" migration for share %s.", self.share['id'])
|
" migration.")
|
||||||
|
|
||||||
def revert_access_rules(self, share_instance, share_server):
|
def revert_access_rules(self, share_instances, share_server,
|
||||||
|
dest_host=None):
|
||||||
|
shares_instance_ids = []
|
||||||
|
for share_instance in share_instances:
|
||||||
|
# Cast all rules to 'queued_to_apply' so that they can be
|
||||||
|
# re-applied.
|
||||||
|
shares_instance_ids.append(share_instance['id'])
|
||||||
|
updates = {'state': constants.ACCESS_STATE_QUEUED_TO_APPLY}
|
||||||
|
self.access_helper.get_and_update_share_instance_access_rules(
|
||||||
|
self.context, updates=updates,
|
||||||
|
share_instance_id=share_instance['id'])
|
||||||
|
|
||||||
# Cast all rules to 'queued_to_apply' so that they can be re-applied.
|
if dest_host:
|
||||||
updates = {'state': constants.ACCESS_STATE_QUEUED_TO_APPLY}
|
rpcapi = share_rpcapi.ShareAPI()
|
||||||
self.access_helper.get_and_update_share_instance_access_rules(
|
rpcapi.update_access_for_instances(self.context, dest_host,
|
||||||
self.context, updates=updates,
|
shares_instance_ids,
|
||||||
share_instance_id=share_instance['id'])
|
share_server)
|
||||||
|
else:
|
||||||
|
for share_instance in share_instances:
|
||||||
|
self.access_helper.update_access_rules(
|
||||||
|
self.context, share_instance['id'],
|
||||||
|
share_server=share_server)
|
||||||
|
|
||||||
self.access_helper.update_access_rules(
|
for share_instance in share_instances:
|
||||||
self.context, share_instance['id'], share_server=share_server)
|
utils.wait_for_access_update(
|
||||||
|
self.context, self.db, share_instance,
|
||||||
|
self.migration_wait_access_rules_timeout)
|
||||||
|
|
||||||
utils.wait_for_access_update(
|
def apply_new_access_rules(self, new_share_instance, share_id):
|
||||||
self.context, self.db, share_instance,
|
|
||||||
self.migration_wait_access_rules_timeout)
|
|
||||||
|
|
||||||
def apply_new_access_rules(self, new_share_instance):
|
|
||||||
|
|
||||||
rules = self.db.share_instance_access_copy(
|
rules = self.db.share_instance_access_copy(
|
||||||
self.context, self.share['id'], new_share_instance['id'])
|
self.context, share_id, new_share_instance['id'])
|
||||||
|
|
||||||
if rules:
|
if rules:
|
||||||
self.api.allow_access_to_instance(self.context, new_share_instance)
|
self.api.allow_access_to_instance(self.context, new_share_instance)
|
||||||
|
@ -77,6 +77,8 @@ class ShareAPI(object):
|
|||||||
1.18 - Remove unused "share_id" parameter from revert_to_snapshot()
|
1.18 - Remove unused "share_id" parameter from revert_to_snapshot()
|
||||||
1.19 - Add manage_share_server() and unmanage_share_server()
|
1.19 - Add manage_share_server() and unmanage_share_server()
|
||||||
1.20 - Add share_instance_id parameter for create_share_server() method
|
1.20 - Add share_instance_id parameter for create_share_server() method
|
||||||
|
1.21 - Add share_server_migration_start, share_server_migration_check()
|
||||||
|
and share_server_get_progress()
|
||||||
"""
|
"""
|
||||||
|
|
||||||
BASE_RPC_API_VERSION = '1.0'
|
BASE_RPC_API_VERSION = '1.0'
|
||||||
@ -85,7 +87,7 @@ class ShareAPI(object):
|
|||||||
super(ShareAPI, self).__init__()
|
super(ShareAPI, self).__init__()
|
||||||
target = messaging.Target(topic=CONF.share_topic,
|
target = messaging.Target(topic=CONF.share_topic,
|
||||||
version=self.BASE_RPC_API_VERSION)
|
version=self.BASE_RPC_API_VERSION)
|
||||||
self.client = rpc.get_client(target, version_cap='1.20')
|
self.client = rpc.get_client(target, version_cap='1.21')
|
||||||
|
|
||||||
def create_share_instance(self, context, share_instance, host,
|
def create_share_instance(self, context, share_instance, host,
|
||||||
request_spec, filter_properties,
|
request_spec, filter_properties,
|
||||||
@ -180,6 +182,64 @@ class ShareAPI(object):
|
|||||||
new_share_network_id=new_share_network_id,
|
new_share_network_id=new_share_network_id,
|
||||||
new_share_type_id=new_share_type_id)
|
new_share_type_id=new_share_type_id)
|
||||||
|
|
||||||
|
def share_server_migration_start(self, context, share_server, dest_host,
|
||||||
|
writable, nondisruptive,
|
||||||
|
preserve_snapshots, new_share_network_id):
|
||||||
|
host = utils.extract_host(dest_host)
|
||||||
|
call_context = self.client.prepare(server=host, version='1.21')
|
||||||
|
call_context.cast(
|
||||||
|
context,
|
||||||
|
'share_server_migration_start',
|
||||||
|
share_server_id=share_server['id'],
|
||||||
|
dest_host=dest_host,
|
||||||
|
writable=writable,
|
||||||
|
nondisruptive=nondisruptive,
|
||||||
|
preserve_snapshots=preserve_snapshots,
|
||||||
|
new_share_network_id=new_share_network_id)
|
||||||
|
|
||||||
|
def share_server_migration_check(self, context, share_server_id, dest_host,
|
||||||
|
writable, nondisruptive,
|
||||||
|
preserve_snapshots, new_share_network_id):
|
||||||
|
host = utils.extract_host(dest_host)
|
||||||
|
call_context = self.client.prepare(server=host, version='1.21')
|
||||||
|
return call_context.call(
|
||||||
|
context,
|
||||||
|
'share_server_migration_check',
|
||||||
|
share_server_id=share_server_id,
|
||||||
|
dest_host=dest_host,
|
||||||
|
writable=writable,
|
||||||
|
nondisruptive=nondisruptive,
|
||||||
|
preserve_snapshots=preserve_snapshots,
|
||||||
|
new_share_network_id=new_share_network_id)
|
||||||
|
|
||||||
|
def share_server_migration_cancel(self, context, dest_host, share_server,
|
||||||
|
dest_share_server):
|
||||||
|
host = utils.extract_host(dest_host)
|
||||||
|
call_context = self.client.prepare(server=host, version='1.21')
|
||||||
|
call_context.cast(
|
||||||
|
context,
|
||||||
|
'share_server_migration_cancel',
|
||||||
|
src_share_server_id=share_server['id'],
|
||||||
|
dest_share_server_id=dest_share_server['id'])
|
||||||
|
|
||||||
|
def share_server_migration_get_progress(self, context, dest_host,
|
||||||
|
share_server, dest_share_server):
|
||||||
|
host = utils.extract_host(dest_host)
|
||||||
|
call_context = self.client.prepare(server=host, version='1.21')
|
||||||
|
return call_context.call(context,
|
||||||
|
'share_server_migration_get_progress',
|
||||||
|
src_share_server_id=share_server['id'],
|
||||||
|
dest_share_server_id=dest_share_server['id'])
|
||||||
|
|
||||||
|
def share_server_migration_complete(self, context, dest_host,
|
||||||
|
share_server, dest_share_server):
|
||||||
|
host = utils.extract_host(dest_host)
|
||||||
|
call_context = self.client.prepare(server=host, version='1.21')
|
||||||
|
call_context.cast(context,
|
||||||
|
'share_server_migration_complete',
|
||||||
|
src_share_server_id=share_server['id'],
|
||||||
|
dest_share_server_id=dest_share_server['id'])
|
||||||
|
|
||||||
def connection_get_info(self, context, share_instance):
|
def connection_get_info(self, context, share_instance):
|
||||||
new_host = utils.extract_host(share_instance['host'])
|
new_host = utils.extract_host(share_instance['host'])
|
||||||
call_context = self.client.prepare(server=new_host, version='1.12')
|
call_context = self.client.prepare(server=new_host, version='1.12')
|
||||||
@ -234,6 +294,14 @@ class ShareAPI(object):
|
|||||||
call_context.cast(context, 'update_access',
|
call_context.cast(context, 'update_access',
|
||||||
share_instance_id=share_instance['id'])
|
share_instance_id=share_instance['id'])
|
||||||
|
|
||||||
|
def update_access_for_instances(self, context, dest_host,
|
||||||
|
share_instance_ids, share_server_id=None):
|
||||||
|
host = utils.extract_host(dest_host)
|
||||||
|
call_context = self.client.prepare(server=host, version='1.21')
|
||||||
|
call_context.cast(context, 'update_access_for_instances',
|
||||||
|
share_instance_ids=share_instance_ids,
|
||||||
|
share_server_id=share_server_id)
|
||||||
|
|
||||||
def publish_service_capabilities(self, context):
|
def publish_service_capabilities(self, context):
|
||||||
call_context = self.client.prepare(fanout=True, version='1.0')
|
call_context = self.client.prepare(fanout=True, version='1.0')
|
||||||
call_context.cast(context, 'publish_service_capabilities')
|
call_context.cast(context, 'publish_service_capabilities')
|
||||||
|
@ -174,8 +174,7 @@ class ShareManageTest(test.TestCase):
|
|||||||
body = get_fake_manage_body()
|
body = get_fake_manage_body()
|
||||||
self._setup_manage_mocks()
|
self._setup_manage_mocks()
|
||||||
error = mock.Mock(
|
error = mock.Mock(
|
||||||
side_effect=exception.InvalidShareServer(message="",
|
side_effect=exception.InvalidShareServer(reason="")
|
||||||
share_server_id="")
|
|
||||||
)
|
)
|
||||||
self.mock_object(share_api.API, 'manage', mock.Mock(side_effect=error))
|
self.mock_object(share_api.API, 'manage', mock.Mock(side_effect=error))
|
||||||
|
|
||||||
|
@ -40,6 +40,8 @@ fake_share_server_list = {
|
|||||||
'project_id': 'fake_project_id',
|
'project_id': 'fake_project_id',
|
||||||
'id': 'fake_server_id',
|
'id': 'fake_server_id',
|
||||||
'is_auto_deletable': False,
|
'is_auto_deletable': False,
|
||||||
|
'task_state': None,
|
||||||
|
'source_share_server_id': None,
|
||||||
'identifier': 'fake_id'
|
'identifier': 'fake_id'
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -52,6 +54,8 @@ fake_share_server_list = {
|
|||||||
'project_id': 'fake_project_id_2',
|
'project_id': 'fake_project_id_2',
|
||||||
'id': 'fake_server_id_2',
|
'id': 'fake_server_id_2',
|
||||||
'is_auto_deletable': True,
|
'is_auto_deletable': True,
|
||||||
|
'task_state': None,
|
||||||
|
'source_share_server_id': None,
|
||||||
'identifier': 'fake_id_2'
|
'identifier': 'fake_id_2'
|
||||||
},
|
},
|
||||||
]
|
]
|
||||||
@ -88,6 +92,8 @@ fake_share_server_get_result = {
|
|||||||
'fake_key_2': 'fake_value_2',
|
'fake_key_2': 'fake_value_2',
|
||||||
},
|
},
|
||||||
'is_auto_deletable': False,
|
'is_auto_deletable': False,
|
||||||
|
'task_state': None,
|
||||||
|
'source_share_server_id': None,
|
||||||
'identifier': 'fake_id'
|
'identifier': 'fake_id'
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -122,6 +128,8 @@ class FakeShareServer(object):
|
|||||||
self.project_id = 'fake_project_id'
|
self.project_id = 'fake_project_id'
|
||||||
self.identifier = kwargs.get('identifier', 'fake_id')
|
self.identifier = kwargs.get('identifier', 'fake_id')
|
||||||
self.is_auto_deletable = kwargs.get('is_auto_deletable', False)
|
self.is_auto_deletable = kwargs.get('is_auto_deletable', False)
|
||||||
|
self.task_state = kwargs.get('task_state')
|
||||||
|
self.source_share_server_id = kwargs.get('source_share_server_id')
|
||||||
self.backend_details = share_server_backend_details
|
self.backend_details = share_server_backend_details
|
||||||
|
|
||||||
def __getitem__(self, item):
|
def __getitem__(self, item):
|
||||||
@ -138,6 +146,7 @@ def fake_share_server_get_all():
|
|||||||
'share_network_id': 'fake_sn_id_2',
|
'share_network_id': 'fake_sn_id_2',
|
||||||
},
|
},
|
||||||
identifier='fake_id_2',
|
identifier='fake_id_2',
|
||||||
|
task_state=None,
|
||||||
is_auto_deletable=True,
|
is_auto_deletable=True,
|
||||||
status=constants.STATUS_ERROR)
|
status=constants.STATUS_ERROR)
|
||||||
]
|
]
|
||||||
|
@ -20,6 +20,7 @@ import webob
|
|||||||
|
|
||||||
from manila.api.v2 import share_servers
|
from manila.api.v2 import share_servers
|
||||||
from manila.common import constants
|
from manila.common import constants
|
||||||
|
from manila import context as ctx_api
|
||||||
from manila.db import api as db_api
|
from manila.db import api as db_api
|
||||||
from manila import exception
|
from manila import exception
|
||||||
from manila import policy
|
from manila import policy
|
||||||
@ -456,3 +457,612 @@ class ShareServerControllerTest(test.TestCase):
|
|||||||
mock_unmanage.assert_called_once_with(context, server, force=True)
|
mock_unmanage.assert_called_once_with(context, server, force=True)
|
||||||
policy.check_policy.assert_called_once_with(
|
policy.check_policy.assert_called_once_with(
|
||||||
context, self.resource_name, 'unmanage_share_server')
|
context, self.resource_name, 'unmanage_share_server')
|
||||||
|
|
||||||
|
def _get_server_migration_request(self, server_id):
|
||||||
|
req = fakes.HTTPRequest.blank(
|
||||||
|
'/share-servers/%s/action' % server_id,
|
||||||
|
use_admin_context=True, version='2.57')
|
||||||
|
req.method = 'POST'
|
||||||
|
req.headers['content-type'] = 'application/json'
|
||||||
|
req.api_version_request.experimental = True
|
||||||
|
return req
|
||||||
|
|
||||||
|
def test_share_server_migration_start(self):
|
||||||
|
server = db_utils.create_share_server(id='fake_server_id',
|
||||||
|
status=constants.STATUS_ACTIVE)
|
||||||
|
share_network = db_utils.create_share_network()
|
||||||
|
req = self._get_server_migration_request(server['id'])
|
||||||
|
context = req.environ['manila.context']
|
||||||
|
|
||||||
|
self.mock_object(db_api, 'share_network_get', mock.Mock(
|
||||||
|
return_value=share_network))
|
||||||
|
self.mock_object(db_api, 'share_server_get',
|
||||||
|
mock.Mock(return_value=server))
|
||||||
|
self.mock_object(share_api.API, 'share_server_migration_start')
|
||||||
|
|
||||||
|
body = {
|
||||||
|
'migration_start': {
|
||||||
|
'host': 'fake_host',
|
||||||
|
'preserve_snapshots': True,
|
||||||
|
'writable': True,
|
||||||
|
'nondisruptive': True,
|
||||||
|
'new_share_network_id': 'fake_net_id',
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
self.controller.share_server_migration_start(req, server['id'], body)
|
||||||
|
|
||||||
|
db_api.share_server_get.assert_called_once_with(
|
||||||
|
context, server['id'])
|
||||||
|
share_api.API.share_server_migration_start.assert_called_once_with(
|
||||||
|
context, server, 'fake_host', True, True, True,
|
||||||
|
new_share_network=share_network)
|
||||||
|
db_api.share_network_get.assert_called_once_with(
|
||||||
|
context, 'fake_net_id')
|
||||||
|
|
||||||
|
@ddt.data({'api_exception': exception.ServiceIsDown(service='fake_srv'),
|
||||||
|
'expected_exception': webob.exc.HTTPBadRequest},
|
||||||
|
{'api_exception': exception.InvalidShareServer(reason=""),
|
||||||
|
'expected_exception': webob.exc.HTTPConflict})
|
||||||
|
@ddt.unpack
|
||||||
|
def test_share_server_migration_start_conflict(self, api_exception,
|
||||||
|
expected_exception):
|
||||||
|
server = db_utils.create_share_server(
|
||||||
|
id='fake_server_id', status=constants.STATUS_ACTIVE)
|
||||||
|
req = self._get_server_migration_request(server['id'])
|
||||||
|
context = req.environ['manila.context']
|
||||||
|
body = {
|
||||||
|
'migration_start': {
|
||||||
|
'host': 'fake_host',
|
||||||
|
'preserve_snapshots': True,
|
||||||
|
'writable': True,
|
||||||
|
'nondisruptive': True,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
self.mock_object(share_api.API, 'share_server_migration_start',
|
||||||
|
mock.Mock(side_effect=api_exception))
|
||||||
|
self.mock_object(db_api, 'share_server_get',
|
||||||
|
mock.Mock(return_value=server))
|
||||||
|
|
||||||
|
self.assertRaises(expected_exception,
|
||||||
|
self.controller.share_server_migration_start,
|
||||||
|
req, server['id'], body)
|
||||||
|
|
||||||
|
db_api.share_server_get.assert_called_once_with(context,
|
||||||
|
server['id'])
|
||||||
|
migration_start_params = body['migration_start']
|
||||||
|
share_api.API.share_server_migration_start.assert_called_once_with(
|
||||||
|
context, server, migration_start_params['host'],
|
||||||
|
migration_start_params['writable'],
|
||||||
|
migration_start_params['nondisruptive'],
|
||||||
|
migration_start_params['preserve_snapshots'],
|
||||||
|
new_share_network=None)
|
||||||
|
|
||||||
|
@ddt.data('host', 'body')
|
||||||
|
def test_share_server_migration_start_missing_mandatory(self, param):
|
||||||
|
server = db_utils.create_share_server(
|
||||||
|
id='fake_server_id', status=constants.STATUS_ACTIVE)
|
||||||
|
req = self._get_server_migration_request(server['id'])
|
||||||
|
context = req.environ['manila.context']
|
||||||
|
|
||||||
|
body = {
|
||||||
|
'migration_start': {
|
||||||
|
'host': 'fake_host',
|
||||||
|
'preserve_metadata': True,
|
||||||
|
'preserve_snapshots': True,
|
||||||
|
'writable': True,
|
||||||
|
'nondisruptive': True,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if param == 'body':
|
||||||
|
body.pop('migration_start')
|
||||||
|
else:
|
||||||
|
body['migration_start'].pop(param)
|
||||||
|
|
||||||
|
method = 'share_server_migration_start'
|
||||||
|
|
||||||
|
self.mock_object(share_api.API, method)
|
||||||
|
self.mock_object(db_api, 'share_server_get',
|
||||||
|
mock.Mock(return_value=server))
|
||||||
|
|
||||||
|
self.assertRaises(webob.exc.HTTPBadRequest,
|
||||||
|
getattr(self.controller, method),
|
||||||
|
req, server['id'], body)
|
||||||
|
db_api.share_server_get.assert_called_once_with(context,
|
||||||
|
server['id'])
|
||||||
|
|
||||||
|
@ddt.data('nondisruptive', 'writable', 'preserve_snapshots')
|
||||||
|
def test_share_server_migration_start_non_boolean(self, param):
|
||||||
|
server = db_utils.create_share_server(
|
||||||
|
id='fake_server_id', status=constants.STATUS_ACTIVE)
|
||||||
|
req = self._get_server_migration_request(server['id'])
|
||||||
|
context = req.environ['manila.context']
|
||||||
|
|
||||||
|
body = {
|
||||||
|
'migration_start': {
|
||||||
|
'host': 'fake_host',
|
||||||
|
'preserve_snapshots': True,
|
||||||
|
'writable': True,
|
||||||
|
'nondisruptive': True,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
body['migration_start'][param] = None
|
||||||
|
|
||||||
|
method = 'share_server_migration_start'
|
||||||
|
|
||||||
|
self.mock_object(share_api.API, method)
|
||||||
|
self.mock_object(db_api, 'share_server_get',
|
||||||
|
mock.Mock(return_value=server))
|
||||||
|
|
||||||
|
self.assertRaises(webob.exc.HTTPBadRequest,
|
||||||
|
getattr(self.controller, method),
|
||||||
|
req, server['id'], body)
|
||||||
|
db_api.share_server_get.assert_called_once_with(context,
|
||||||
|
server['id'])
|
||||||
|
|
||||||
|
def test_share_server_migration_start_share_server_not_found(self):
|
||||||
|
fake_id = 'fake_server_id'
|
||||||
|
req = self._get_server_migration_request(fake_id)
|
||||||
|
context = req.environ['manila.context']
|
||||||
|
|
||||||
|
body = {'migration_start': {'host': 'fake_host'}}
|
||||||
|
|
||||||
|
self.mock_object(db_api, 'share_server_get',
|
||||||
|
mock.Mock(side_effect=exception.ShareServerNotFound(
|
||||||
|
share_server_id=fake_id)))
|
||||||
|
|
||||||
|
self.assertRaises(webob.exc.HTTPNotFound,
|
||||||
|
self.controller.share_server_migration_start,
|
||||||
|
req, fake_id, body)
|
||||||
|
db_api.share_server_get.assert_called_once_with(context,
|
||||||
|
fake_id)
|
||||||
|
|
||||||
|
def test_share_server_migration_start_new_share_network_not_found(self):
|
||||||
|
server = db_utils.create_share_server(
|
||||||
|
id='fake_server_id', status=constants.STATUS_ACTIVE)
|
||||||
|
req = self._get_server_migration_request(server['id'])
|
||||||
|
context = req.environ['manila.context']
|
||||||
|
|
||||||
|
body = {
|
||||||
|
'migration_start': {
|
||||||
|
'host': 'fake_host',
|
||||||
|
'preserve_metadata': True,
|
||||||
|
'preserve_snapshots': True,
|
||||||
|
'writable': True,
|
||||||
|
'nondisruptive': True,
|
||||||
|
'new_share_network_id': 'nonexistent'}}
|
||||||
|
|
||||||
|
self.mock_object(db_api, 'share_network_get',
|
||||||
|
mock.Mock(side_effect=exception.NotFound()))
|
||||||
|
self.mock_object(db_api, 'share_server_get',
|
||||||
|
mock.Mock(return_value=server))
|
||||||
|
|
||||||
|
self.assertRaises(webob.exc.HTTPBadRequest,
|
||||||
|
self.controller.share_server_migration_start,
|
||||||
|
req, server['id'], body)
|
||||||
|
db_api.share_network_get.assert_called_once_with(context,
|
||||||
|
'nonexistent')
|
||||||
|
db_api.share_server_get.assert_called_once_with(context,
|
||||||
|
server['id'])
|
||||||
|
|
||||||
|
def test_share_server_migration_start_host_with_pool(self):
|
||||||
|
server = db_utils.create_share_server(id='fake_server_id',
|
||||||
|
status=constants.STATUS_ACTIVE)
|
||||||
|
req = self._get_server_migration_request(server['id'])
|
||||||
|
|
||||||
|
body = {
|
||||||
|
'migration_start': {
|
||||||
|
'host': 'fake_host@fakebackend#pool',
|
||||||
|
'preserve_snapshots': True,
|
||||||
|
'writable': True,
|
||||||
|
'nondisruptive': True,
|
||||||
|
'new_share_network_id': 'fake_net_id',
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
self.assertRaises(webob.exc.HTTPBadRequest,
|
||||||
|
self.controller.share_server_migration_start,
|
||||||
|
req, server['id'], body)
|
||||||
|
|
||||||
|
def test_share_server_migration_check_host_with_pool(self):
|
||||||
|
server = db_utils.create_share_server(id='fake_server_id',
|
||||||
|
status=constants.STATUS_ACTIVE)
|
||||||
|
req = self._get_server_migration_request(server['id'])
|
||||||
|
|
||||||
|
body = {
|
||||||
|
'migration_start': {
|
||||||
|
'host': 'fake_host@fakebackend#pool',
|
||||||
|
'preserve_snapshots': True,
|
||||||
|
'writable': True,
|
||||||
|
'nondisruptive': True,
|
||||||
|
'new_share_network_id': 'fake_net_id',
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
self.assertRaises(webob.exc.HTTPBadRequest,
|
||||||
|
self.controller.share_server_migration_check,
|
||||||
|
req, server['id'], body)
|
||||||
|
|
||||||
|
@ddt.data(constants.TASK_STATE_MIGRATION_ERROR, None)
|
||||||
|
def test_reset_task_state(self, task_state):
|
||||||
|
server = db_utils.create_share_server(
|
||||||
|
id='fake_server_id', status=constants.STATUS_ACTIVE)
|
||||||
|
req = self._get_server_migration_request(server['id'])
|
||||||
|
|
||||||
|
update = {'task_state': task_state}
|
||||||
|
body = {'reset_task_state': update}
|
||||||
|
|
||||||
|
self.mock_object(db_api, 'share_server_update')
|
||||||
|
|
||||||
|
response = self.controller.share_server_reset_task_state(
|
||||||
|
req, server['id'], body)
|
||||||
|
|
||||||
|
self.assertEqual(202, response.status_int)
|
||||||
|
|
||||||
|
db_api.share_server_update.assert_called_once_with(utils.IsAMatcher(
|
||||||
|
ctx_api.RequestContext), server['id'], update)
|
||||||
|
|
||||||
|
def test_reset_task_state_error_body(self):
|
||||||
|
server = db_utils.create_share_server(
|
||||||
|
id='fake_server_id', status=constants.STATUS_ACTIVE)
|
||||||
|
req = self._get_server_migration_request(server['id'])
|
||||||
|
|
||||||
|
update = {'error': 'error'}
|
||||||
|
body = {'reset_task_state': update}
|
||||||
|
|
||||||
|
self.assertRaises(webob.exc.HTTPBadRequest,
|
||||||
|
self.controller.share_server_reset_task_state,
|
||||||
|
req, server['id'], body)
|
||||||
|
|
||||||
|
def test_reset_task_state_error_invalid(self):
|
||||||
|
server = db_utils.create_share_server(
|
||||||
|
id='fake_server_id', status=constants.STATUS_ACTIVE)
|
||||||
|
req = self._get_server_migration_request(server['id'])
|
||||||
|
|
||||||
|
update = {'task_state': 'error'}
|
||||||
|
body = {'reset_task_state': update}
|
||||||
|
|
||||||
|
self.assertRaises(webob.exc.HTTPBadRequest,
|
||||||
|
self.controller.share_server_reset_task_state,
|
||||||
|
req, server['id'], body)
|
||||||
|
|
||||||
|
def test_reset_task_state_not_found(self):
|
||||||
|
server = db_utils.create_share_server(
|
||||||
|
id='fake_server_id', status=constants.STATUS_ACTIVE)
|
||||||
|
req = self._get_server_migration_request(server['id'])
|
||||||
|
|
||||||
|
update = {'task_state': constants.TASK_STATE_MIGRATION_ERROR}
|
||||||
|
body = {'reset_task_state': update}
|
||||||
|
|
||||||
|
self.mock_object(db_api, 'share_server_update',
|
||||||
|
mock.Mock(side_effect=exception.ShareServerNotFound(
|
||||||
|
share_server_id='fake_server_id')))
|
||||||
|
|
||||||
|
self.assertRaises(webob.exc.HTTPNotFound,
|
||||||
|
self.controller.share_server_reset_task_state,
|
||||||
|
req, server['id'], body)
|
||||||
|
|
||||||
|
db_api.share_server_update.assert_called_once_with(utils.IsAMatcher(
|
||||||
|
ctx_api.RequestContext), server['id'], update)
|
||||||
|
|
||||||
|
def test_share_server_migration_complete(self):
|
||||||
|
server = db_utils.create_share_server(
|
||||||
|
id='fake_server_id', status=constants.STATUS_ACTIVE)
|
||||||
|
req = self._get_server_migration_request(server['id'])
|
||||||
|
context = req.environ['manila.context']
|
||||||
|
|
||||||
|
body = {'migration_complete': None}
|
||||||
|
api_return = {
|
||||||
|
'destination_share_server_id': 'fake_destination_id'
|
||||||
|
}
|
||||||
|
|
||||||
|
self.mock_object(share_api.API, 'share_server_migration_complete',
|
||||||
|
mock.Mock(return_value=api_return))
|
||||||
|
self.mock_object(db_api, 'share_server_get',
|
||||||
|
mock.Mock(return_value=server))
|
||||||
|
|
||||||
|
result = self.controller.share_server_migration_complete(
|
||||||
|
req, server['id'], body)
|
||||||
|
|
||||||
|
self.assertEqual(api_return, result)
|
||||||
|
share_api.API.share_server_migration_complete.assert_called_once_with(
|
||||||
|
utils.IsAMatcher(ctx_api.RequestContext), server)
|
||||||
|
db_api.share_server_get.assert_called_once_with(context,
|
||||||
|
server['id'])
|
||||||
|
|
||||||
|
def test_share_server_migration_complete_not_found(self):
|
||||||
|
fake_id = 'fake_server_id'
|
||||||
|
req = self._get_server_migration_request(fake_id)
|
||||||
|
context = req.environ['manila.context']
|
||||||
|
|
||||||
|
body = {'migration_complete': None}
|
||||||
|
|
||||||
|
self.mock_object(db_api, 'share_server_get',
|
||||||
|
mock.Mock(side_effect=exception.ShareServerNotFound(
|
||||||
|
share_server_id=fake_id)))
|
||||||
|
self.mock_object(share_api.API, 'share_server_migration_complete')
|
||||||
|
|
||||||
|
self.assertRaises(webob.exc.HTTPNotFound,
|
||||||
|
self.controller.share_server_migration_complete,
|
||||||
|
req, fake_id, body)
|
||||||
|
db_api.share_server_get.assert_called_once_with(context,
|
||||||
|
fake_id)
|
||||||
|
|
||||||
|
@ddt.data({'api_exception': exception.ServiceIsDown(service='fake_srv'),
|
||||||
|
'expected_exception': webob.exc.HTTPBadRequest},
|
||||||
|
{'api_exception': exception.InvalidShareServer(reason=""),
|
||||||
|
'expected_exception': webob.exc.HTTPBadRequest})
|
||||||
|
@ddt.unpack
|
||||||
|
def test_share_server_migration_complete_exceptions(self, api_exception,
|
||||||
|
expected_exception):
|
||||||
|
fake_id = 'fake_server_id'
|
||||||
|
req = self._get_server_migration_request(fake_id)
|
||||||
|
context = req.environ['manila.context']
|
||||||
|
body = {'migration_complete': None}
|
||||||
|
self.mock_object(db_api, 'share_server_get',
|
||||||
|
mock.Mock(return_value='fake_share_server'))
|
||||||
|
self.mock_object(share_api.API, 'share_server_migration_complete',
|
||||||
|
mock.Mock(side_effect=api_exception))
|
||||||
|
|
||||||
|
self.assertRaises(expected_exception,
|
||||||
|
self.controller.share_server_migration_complete,
|
||||||
|
req, fake_id, body)
|
||||||
|
|
||||||
|
db_api.share_server_get.assert_called_once_with(context,
|
||||||
|
fake_id)
|
||||||
|
share_api.API.share_server_migration_complete.assert_called_once_with(
|
||||||
|
context, 'fake_share_server')
|
||||||
|
|
||||||
|
def test_share_server_migration_cancel(self):
|
||||||
|
server = db_utils.create_share_server(
|
||||||
|
id='fake_server_id', status=constants.STATUS_ACTIVE)
|
||||||
|
req = self._get_server_migration_request(server['id'])
|
||||||
|
context = req.environ['manila.context']
|
||||||
|
|
||||||
|
body = {'migration_cancel': None}
|
||||||
|
|
||||||
|
self.mock_object(db_api, 'share_server_get',
|
||||||
|
mock.Mock(return_value=server))
|
||||||
|
self.mock_object(share_api.API, 'share_server_migration_cancel')
|
||||||
|
|
||||||
|
self.controller.share_server_migration_cancel(
|
||||||
|
req, server['id'], body)
|
||||||
|
|
||||||
|
share_api.API.share_server_migration_cancel.assert_called_once_with(
|
||||||
|
utils.IsAMatcher(ctx_api.RequestContext), server)
|
||||||
|
db_api.share_server_get.assert_called_once_with(context,
|
||||||
|
server['id'])
|
||||||
|
|
||||||
|
def test_share_server_migration_cancel_not_found(self):
|
||||||
|
fake_id = 'fake_server_id'
|
||||||
|
req = self._get_server_migration_request(fake_id)
|
||||||
|
context = req.environ['manila.context']
|
||||||
|
|
||||||
|
body = {'migration_cancel': None}
|
||||||
|
|
||||||
|
self.mock_object(db_api, 'share_server_get',
|
||||||
|
mock.Mock(side_effect=exception.ShareServerNotFound(
|
||||||
|
share_server_id=fake_id)))
|
||||||
|
self.mock_object(share_api.API, 'share_server_migration_cancel')
|
||||||
|
|
||||||
|
self.assertRaises(webob.exc.HTTPNotFound,
|
||||||
|
self.controller.share_server_migration_cancel,
|
||||||
|
req, fake_id, body)
|
||||||
|
db_api.share_server_get.assert_called_once_with(context,
|
||||||
|
fake_id)
|
||||||
|
|
||||||
|
@ddt.data({'api_exception': exception.ServiceIsDown(service='fake_srv'),
|
||||||
|
'expected_exception': webob.exc.HTTPBadRequest},
|
||||||
|
{'api_exception': exception.InvalidShareServer(reason=""),
|
||||||
|
'expected_exception': webob.exc.HTTPBadRequest})
|
||||||
|
@ddt.unpack
|
||||||
|
def test_share_server_migration_cancel_exceptions(self, api_exception,
|
||||||
|
expected_exception):
|
||||||
|
fake_id = 'fake_server_id'
|
||||||
|
req = self._get_server_migration_request(fake_id)
|
||||||
|
context = req.environ['manila.context']
|
||||||
|
body = {'migration_complete': None}
|
||||||
|
self.mock_object(db_api, 'share_server_get',
|
||||||
|
mock.Mock(return_value='fake_share_server'))
|
||||||
|
self.mock_object(share_api.API, 'share_server_migration_cancel',
|
||||||
|
mock.Mock(side_effect=api_exception))
|
||||||
|
|
||||||
|
self.assertRaises(expected_exception,
|
||||||
|
self.controller.share_server_migration_cancel,
|
||||||
|
req, fake_id, body)
|
||||||
|
|
||||||
|
db_api.share_server_get.assert_called_once_with(context,
|
||||||
|
fake_id)
|
||||||
|
share_api.API.share_server_migration_cancel.assert_called_once_with(
|
||||||
|
context, 'fake_share_server')
|
||||||
|
|
||||||
|
def test_share_server_migration_get_progress(self):
|
||||||
|
server = db_utils.create_share_server(
|
||||||
|
id='fake_server_id',
|
||||||
|
status=constants.STATUS_ACTIVE,
|
||||||
|
task_state=constants.TASK_STATE_MIGRATION_SUCCESS)
|
||||||
|
req = self._get_server_migration_request(server['id'])
|
||||||
|
|
||||||
|
body = {'migration_get_progress': None}
|
||||||
|
expected = {
|
||||||
|
'total_progress': 'fake',
|
||||||
|
'task_state': constants.TASK_STATE_MIGRATION_SUCCESS,
|
||||||
|
'destination_share_server_id': 'fake_destination_server_id'
|
||||||
|
}
|
||||||
|
|
||||||
|
self.mock_object(share_api.API, 'share_server_migration_get_progress',
|
||||||
|
mock.Mock(return_value=expected))
|
||||||
|
|
||||||
|
response = self.controller.share_server_migration_get_progress(
|
||||||
|
req, server['id'], body)
|
||||||
|
self.assertEqual(expected, response)
|
||||||
|
(share_api.API.share_server_migration_get_progress.
|
||||||
|
assert_called_once_with(utils.IsAMatcher(ctx_api.RequestContext),
|
||||||
|
server['id']))
|
||||||
|
|
||||||
|
@ddt.data({'api_exception': exception.ServiceIsDown(service='fake_srv'),
|
||||||
|
'expected_exception': webob.exc.HTTPConflict},
|
||||||
|
{'api_exception': exception.InvalidShareServer(reason=""),
|
||||||
|
'expected_exception': webob.exc.HTTPBadRequest})
|
||||||
|
@ddt.unpack
|
||||||
|
def test_share_server_migration_get_progress_exceptions(
|
||||||
|
self, api_exception, expected_exception):
|
||||||
|
fake_id = 'fake_server_id'
|
||||||
|
req = self._get_server_migration_request(fake_id)
|
||||||
|
context = req.environ['manila.context']
|
||||||
|
body = {'migration_complete': None}
|
||||||
|
self.mock_object(db_api, 'share_server_get',
|
||||||
|
mock.Mock(return_value='fake_share_server'))
|
||||||
|
mock_get_progress = self.mock_object(
|
||||||
|
share_api.API, 'share_server_migration_get_progress',
|
||||||
|
mock.Mock(side_effect=api_exception))
|
||||||
|
|
||||||
|
self.assertRaises(expected_exception,
|
||||||
|
self.controller.share_server_migration_get_progress,
|
||||||
|
req, fake_id, body)
|
||||||
|
|
||||||
|
mock_get_progress.assert_called_once_with(context, fake_id)
|
||||||
|
|
||||||
|
def test_share_server_migration_check(self):
|
||||||
|
fake_id = 'fake_server_id'
|
||||||
|
fake_share_server = db_utils.create_share_server(id=fake_id)
|
||||||
|
fake_share_network = db_utils.create_share_network()
|
||||||
|
req = self._get_server_migration_request(fake_id)
|
||||||
|
context = req.environ['manila.context']
|
||||||
|
requested_writable = False
|
||||||
|
requested_nondisruptive = False
|
||||||
|
requested_preserve_snapshots = False
|
||||||
|
fake_host = 'fakehost@fakebackend'
|
||||||
|
body = {
|
||||||
|
'migration_check': {
|
||||||
|
'writable': requested_writable,
|
||||||
|
'nondisruptive': requested_nondisruptive,
|
||||||
|
'preserve_snapshots': requested_preserve_snapshots,
|
||||||
|
'new_share_network_id': fake_share_network['id'],
|
||||||
|
'host': fake_host
|
||||||
|
}
|
||||||
|
}
|
||||||
|
driver_result = {
|
||||||
|
'compatible': False,
|
||||||
|
'writable': False,
|
||||||
|
'nondisruptive': True,
|
||||||
|
'preserve_snapshots': False,
|
||||||
|
'share_network_id': 'fake_network_uuid',
|
||||||
|
'migration_cancel': False,
|
||||||
|
'migration_get_progress': False,
|
||||||
|
}
|
||||||
|
|
||||||
|
mock_server_get = self.mock_object(
|
||||||
|
db_api, 'share_server_get',
|
||||||
|
mock.Mock(return_value=fake_share_server))
|
||||||
|
mock_network_get = self.mock_object(
|
||||||
|
db_api, 'share_network_get',
|
||||||
|
mock.Mock(return_value=fake_share_network))
|
||||||
|
mock_migration_check = self.mock_object(
|
||||||
|
share_api.API, 'share_server_migration_check',
|
||||||
|
mock.Mock(return_value=driver_result))
|
||||||
|
|
||||||
|
result = self.controller.share_server_migration_check(
|
||||||
|
req, fake_id, body)
|
||||||
|
|
||||||
|
expected_result_keys = ['compatible', 'requested_capabilities',
|
||||||
|
'supported_capabilities']
|
||||||
|
[self.assertIn(key, result) for key in expected_result_keys]
|
||||||
|
mock_server_get.assert_called_once_with(
|
||||||
|
context, fake_share_server['id'])
|
||||||
|
mock_network_get.assert_called_once_with(
|
||||||
|
context, fake_share_network['id'])
|
||||||
|
mock_migration_check.assert_called_once_with(
|
||||||
|
context, fake_share_server, fake_host, requested_writable,
|
||||||
|
requested_nondisruptive, requested_preserve_snapshots,
|
||||||
|
new_share_network=fake_share_network)
|
||||||
|
|
||||||
|
@ddt.data(
|
||||||
|
(webob.exc.HTTPNotFound, True, False, {'migration_check': {}}),
|
||||||
|
(webob.exc.HTTPBadRequest, False, True,
|
||||||
|
{'migration_check': {'new_share_network_id': 'fake_id'}}),
|
||||||
|
(webob.exc.HTTPBadRequest, False, False, None)
|
||||||
|
)
|
||||||
|
@ddt.unpack
|
||||||
|
def test_share_server_migration_check_exception(
|
||||||
|
self, exception_to_raise, raise_server_get_exception,
|
||||||
|
raise_network_get_action, body):
|
||||||
|
req = self._get_server_migration_request('fake_id')
|
||||||
|
context = req.environ['manila.context']
|
||||||
|
if body:
|
||||||
|
body['migration_check']['writable'] = False
|
||||||
|
body['migration_check']['nondisruptive'] = False
|
||||||
|
body['migration_check']['preserve_snapshots'] = False
|
||||||
|
body['migration_check']['host'] = 'fakehost@fakebackend'
|
||||||
|
else:
|
||||||
|
body = {}
|
||||||
|
|
||||||
|
server_get = mock.Mock()
|
||||||
|
network_get = mock.Mock()
|
||||||
|
if raise_server_get_exception:
|
||||||
|
server_get = mock.Mock(
|
||||||
|
side_effect=exception.ShareServerNotFound(
|
||||||
|
share_server_id='fake'))
|
||||||
|
if raise_network_get_action:
|
||||||
|
network_get = mock.Mock(
|
||||||
|
side_effect=exception.ShareNetworkNotFound(
|
||||||
|
share_network_id='fake'))
|
||||||
|
|
||||||
|
mock_server_get = self.mock_object(
|
||||||
|
db_api, 'share_server_get', server_get)
|
||||||
|
|
||||||
|
mock_network_get = self.mock_object(
|
||||||
|
db_api, 'share_network_get', network_get)
|
||||||
|
|
||||||
|
self.assertRaises(
|
||||||
|
exception_to_raise,
|
||||||
|
self.controller.share_server_migration_check,
|
||||||
|
req, 'fake_id', body
|
||||||
|
)
|
||||||
|
mock_server_get.assert_called_once_with(
|
||||||
|
context, 'fake_id')
|
||||||
|
if raise_network_get_action:
|
||||||
|
mock_network_get.assert_called_once_with(context, 'fake_id')
|
||||||
|
|
||||||
|
@ddt.data(
|
||||||
|
{'api_exception': exception.ServiceIsDown(service='fake_srv'),
|
||||||
|
'expected_exception': webob.exc.HTTPBadRequest},
|
||||||
|
{'api_exception': exception.InvalidShareServer(reason=""),
|
||||||
|
'expected_exception': webob.exc.HTTPConflict})
|
||||||
|
@ddt.unpack
|
||||||
|
def test_share_server_migration_complete_exceptions_from_api(
|
||||||
|
self, api_exception, expected_exception):
|
||||||
|
req = self._get_server_migration_request('fake_id')
|
||||||
|
context = req.environ['manila.context']
|
||||||
|
body = {
|
||||||
|
'migration_check': {
|
||||||
|
'writable': False,
|
||||||
|
'nondisruptive': False,
|
||||||
|
'preserve_snapshots': True,
|
||||||
|
'host': 'fakehost@fakebackend',
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
self.mock_object(db_api, 'share_server_get',
|
||||||
|
mock.Mock(return_value='fake_share_server'))
|
||||||
|
|
||||||
|
self.mock_object(share_api.API, 'share_server_migration_check',
|
||||||
|
mock.Mock(side_effect=api_exception))
|
||||||
|
|
||||||
|
self.assertRaises(
|
||||||
|
expected_exception,
|
||||||
|
self.controller.share_server_migration_check,
|
||||||
|
req, 'fake_id', body
|
||||||
|
)
|
||||||
|
|
||||||
|
db_api.share_server_get.assert_called_once_with(context,
|
||||||
|
'fake_id')
|
||||||
|
migration_check_params = body['migration_check']
|
||||||
|
share_api.API.share_server_migration_check.assert_called_once_with(
|
||||||
|
context, 'fake_share_server', migration_check_params['host'],
|
||||||
|
migration_check_params['writable'],
|
||||||
|
migration_check_params['nondisruptive'],
|
||||||
|
migration_check_params['preserve_snapshots'],
|
||||||
|
new_share_network=None)
|
||||||
|
@ -2946,3 +2946,29 @@ class ShareInstanceProgressFieldChecks(BaseMigrationChecks):
|
|||||||
for si_record in engine.execute(si_table.select()):
|
for si_record in engine.execute(si_table.select()):
|
||||||
self.test_case.assertFalse(hasattr(si_record,
|
self.test_case.assertFalse(hasattr(si_record,
|
||||||
self.progress_field_name))
|
self.progress_field_name))
|
||||||
|
|
||||||
|
|
||||||
|
@map_to_migration('5aa813ae673d')
|
||||||
|
class ShareServerTaskState(BaseMigrationChecks):
|
||||||
|
|
||||||
|
def setup_upgrade_data(self, engine):
|
||||||
|
# Create share server
|
||||||
|
share_server_data = {
|
||||||
|
'id': uuidutils.generate_uuid(),
|
||||||
|
'host': 'fake_host',
|
||||||
|
'status': 'active',
|
||||||
|
}
|
||||||
|
ss_table = utils.load_table('share_servers', engine)
|
||||||
|
engine.execute(ss_table.insert(share_server_data))
|
||||||
|
|
||||||
|
def check_upgrade(self, engine, data):
|
||||||
|
ss_table = utils.load_table('share_servers', engine)
|
||||||
|
for ss in engine.execute(ss_table.select()):
|
||||||
|
self.test_case.assertTrue(hasattr(ss, 'task_state'))
|
||||||
|
self.test_case.assertTrue(hasattr(ss, 'source_share_server_id'))
|
||||||
|
|
||||||
|
def check_downgrade(self, engine):
|
||||||
|
ss_table = utils.load_table('share_servers', engine)
|
||||||
|
for ss in engine.execute(ss_table.select()):
|
||||||
|
self.test_case.assertFalse(hasattr(ss, 'task_state'))
|
||||||
|
self.test_case.assertFalse(hasattr(ss, 'source_share_server_id'))
|
||||||
|
@ -507,6 +507,23 @@ class ShareDatabaseAPITestCase(test.TestCase):
|
|||||||
|
|
||||||
self.assertEqual('share-%s' % instance['id'], instance['name'])
|
self.assertEqual('share-%s' % instance['id'], instance['name'])
|
||||||
|
|
||||||
|
def test_share_instance_get_all_by_ids(self):
|
||||||
|
fake_share = db_utils.create_share()
|
||||||
|
expected_share_instance = db_utils.create_share_instance(
|
||||||
|
share_id=fake_share['id'])
|
||||||
|
|
||||||
|
# Populate the db with a dummy share
|
||||||
|
db_utils.create_share_instance(share_id=fake_share['id'])
|
||||||
|
|
||||||
|
instances = db_api.share_instances_get_all(
|
||||||
|
self.ctxt,
|
||||||
|
filters={'instance_ids': [expected_share_instance['id']]})
|
||||||
|
|
||||||
|
self.assertEqual(1, len(instances))
|
||||||
|
instance = instances[0]
|
||||||
|
|
||||||
|
self.assertEqual('share-%s' % instance['id'], instance['name'])
|
||||||
|
|
||||||
@ddt.data('host', 'share_group_id')
|
@ddt.data('host', 'share_group_id')
|
||||||
def test_share_get_all_sort_by_share_instance_fields(self, sort_key):
|
def test_share_get_all_sort_by_share_instance_fields(self, sort_key):
|
||||||
shares = [db_utils.create_share(**{sort_key: n, 'size': 1})
|
shares = [db_utils.create_share(**{sort_key: n, 'size': 1})
|
||||||
@ -1699,6 +1716,33 @@ class ShareSnapshotDatabaseAPITestCase(test.TestCase):
|
|||||||
self.assertEqual(self.snapshot_instance_export_locations[0][key],
|
self.assertEqual(self.snapshot_instance_export_locations[0][key],
|
||||||
out[0][key])
|
out[0][key])
|
||||||
|
|
||||||
|
def test_share_snapshot_instance_export_locations_update(self):
|
||||||
|
snapshot = db_utils.create_snapshot(with_share=True)
|
||||||
|
initial_locations = ['fake1/1/', 'fake2/2', 'fake3/3']
|
||||||
|
update_locations = ['fake4/4', 'fake2/2', 'fake3/3']
|
||||||
|
|
||||||
|
# add initial locations
|
||||||
|
db_api.share_snapshot_instance_export_locations_update(
|
||||||
|
self.ctxt, snapshot.instance['id'], initial_locations, False)
|
||||||
|
# update locations
|
||||||
|
db_api.share_snapshot_instance_export_locations_update(
|
||||||
|
self.ctxt, snapshot.instance['id'], update_locations, True)
|
||||||
|
|
||||||
|
get_result = db_api.share_snapshot_instance_export_locations_get_all(
|
||||||
|
self.ctxt, snapshot.instance['id'])
|
||||||
|
result_locations = [el['path'] for el in get_result]
|
||||||
|
|
||||||
|
self.assertEqual(sorted(result_locations), sorted(update_locations))
|
||||||
|
|
||||||
|
def test_share_snapshot_instance_export_locations_update_wrong_type(self):
|
||||||
|
snapshot = db_utils.create_snapshot(with_share=True)
|
||||||
|
new_export_locations = [1]
|
||||||
|
|
||||||
|
self.assertRaises(
|
||||||
|
exception.ManilaException,
|
||||||
|
db_api.share_snapshot_instance_export_locations_update,
|
||||||
|
self.ctxt, snapshot.instance['id'], new_export_locations, False)
|
||||||
|
|
||||||
|
|
||||||
class ShareExportLocationsDatabaseAPITestCase(test.TestCase):
|
class ShareExportLocationsDatabaseAPITestCase(test.TestCase):
|
||||||
|
|
||||||
@ -3024,6 +3068,32 @@ class ShareServerDatabaseAPITestCase(test.TestCase):
|
|||||||
self.ctxt, host, updated_before)
|
self.ctxt, host, updated_before)
|
||||||
self.assertEqual(expected_len, len(unused_deletable))
|
self.assertEqual(expected_len, len(unused_deletable))
|
||||||
|
|
||||||
|
@ddt.data({'host': 'fakepool@fakehost'},
|
||||||
|
{'status': constants.STATUS_SERVER_MIGRATING_TO},
|
||||||
|
{'source_share_server_id': 'fake_ss_id'})
|
||||||
|
def test_share_server_get_all_with_filters(self, filters):
|
||||||
|
db_utils.create_share_server(**filters)
|
||||||
|
db_utils.create_share_server()
|
||||||
|
filter_keys = filters.keys()
|
||||||
|
|
||||||
|
results = db_api.share_server_get_all_with_filters(self.ctxt, filters)
|
||||||
|
|
||||||
|
self.assertEqual(1, len(results))
|
||||||
|
for result in results:
|
||||||
|
for key in filter_keys:
|
||||||
|
self.assertEqual(result[key], filters[key])
|
||||||
|
|
||||||
|
@ddt.data('fake@fake', 'host1@backend1')
|
||||||
|
def test_share_server_get_all_by_host(self, host):
|
||||||
|
db_utils.create_share_server(host='fake@fake')
|
||||||
|
db_utils.create_share_server(host='host1@backend1')
|
||||||
|
|
||||||
|
share_servers = db_api.share_server_get_all_by_host(self.ctxt, host)
|
||||||
|
|
||||||
|
self.assertEqual(1, len(share_servers))
|
||||||
|
for share_server in share_servers:
|
||||||
|
self.assertEqual(host, share_server['host'])
|
||||||
|
|
||||||
|
|
||||||
class ServiceDatabaseAPITestCase(test.TestCase):
|
class ServiceDatabaseAPITestCase(test.TestCase):
|
||||||
|
|
||||||
@ -4018,3 +4088,127 @@ class ShareInstancesTestCase(test.TestCase):
|
|||||||
si['host'].startswith(new_host)]
|
si['host'].startswith(new_host)]
|
||||||
self.assertEqual(actual_updates, expected_updates)
|
self.assertEqual(actual_updates, expected_updates)
|
||||||
self.assertEqual(expected_updates, len(host_updates))
|
self.assertEqual(expected_updates, len(host_updates))
|
||||||
|
|
||||||
|
def test_share_instances_status_update(self):
|
||||||
|
for i in range(1, 3):
|
||||||
|
instances = [db_utils.create_share_instance(
|
||||||
|
status=constants.STATUS_SERVER_MIGRATING, share_id='fake')]
|
||||||
|
share_instance_ids = [instance['id'] for instance in instances]
|
||||||
|
values = {'status': constants.STATUS_AVAILABLE}
|
||||||
|
|
||||||
|
db_api.share_instances_status_update(
|
||||||
|
self.context, share_instance_ids, values)
|
||||||
|
|
||||||
|
instances = [
|
||||||
|
db_api.share_instance_get(self.context, instance_id)
|
||||||
|
for instance_id in share_instance_ids]
|
||||||
|
|
||||||
|
for instance in instances:
|
||||||
|
self.assertEqual(constants.STATUS_AVAILABLE, instance['status'])
|
||||||
|
|
||||||
|
def test_share_snapshot_instances_status_update(self):
|
||||||
|
share_instance = db_utils.create_share_instance(
|
||||||
|
status=constants.STATUS_AVAILABLE, share_id='fake')
|
||||||
|
for i in range(1, 3):
|
||||||
|
instances = [db_utils.create_snapshot_instance(
|
||||||
|
'fake_snapshot_id_1', status=constants.STATUS_CREATING,
|
||||||
|
share_instance_id=share_instance['id'])]
|
||||||
|
|
||||||
|
snapshot_instance_ids = [instance['id'] for instance in instances]
|
||||||
|
values = {'status': constants.STATUS_AVAILABLE}
|
||||||
|
|
||||||
|
db_api.share_snapshot_instances_status_update(
|
||||||
|
self.context, snapshot_instance_ids, values)
|
||||||
|
|
||||||
|
instances = [
|
||||||
|
db_api.share_snapshot_instance_get(self.context, instance_id)
|
||||||
|
for instance_id in snapshot_instance_ids]
|
||||||
|
|
||||||
|
for instance in instances:
|
||||||
|
self.assertEqual(constants.STATUS_AVAILABLE, instance['status'])
|
||||||
|
|
||||||
|
def test_share_and_snapshot_instances_status_update(self):
|
||||||
|
share_instance = db_utils.create_share_instance(
|
||||||
|
status=constants.STATUS_AVAILABLE, share_id='fake')
|
||||||
|
share_instance_ids = [share_instance['id']]
|
||||||
|
fake_session = db_api.get_session()
|
||||||
|
for i in range(1, 3):
|
||||||
|
snap_instances = [db_utils.create_snapshot_instance(
|
||||||
|
'fake_snapshot_id_1', status=constants.STATUS_CREATING,
|
||||||
|
share_instance_id=share_instance['id'])]
|
||||||
|
|
||||||
|
snapshot_instance_ids = [instance['id'] for instance in snap_instances]
|
||||||
|
values = {'status': constants.STATUS_AVAILABLE}
|
||||||
|
|
||||||
|
mock_update_share_instances = self.mock_object(
|
||||||
|
db_api, 'share_instances_status_update',
|
||||||
|
mock.Mock(return_value=[share_instance]))
|
||||||
|
mock_update_snap_instances = self.mock_object(
|
||||||
|
db_api, 'share_snapshot_instances_status_update',
|
||||||
|
mock.Mock(return_value=snap_instances))
|
||||||
|
mock_get_session = self.mock_object(
|
||||||
|
db_api, 'get_session', mock.Mock(return_value=fake_session))
|
||||||
|
|
||||||
|
updated_share_instances, updated_snap_instances = (
|
||||||
|
db_api.share_and_snapshot_instances_status_update(
|
||||||
|
self.context, values, share_instance_ids=share_instance_ids,
|
||||||
|
snapshot_instance_ids=snapshot_instance_ids))
|
||||||
|
|
||||||
|
mock_get_session.assert_called()
|
||||||
|
mock_update_share_instances.assert_called_once_with(
|
||||||
|
self.context, share_instance_ids, values, session=fake_session)
|
||||||
|
mock_update_snap_instances.assert_called_once_with(
|
||||||
|
self.context, snapshot_instance_ids, values, session=fake_session)
|
||||||
|
self.assertEqual(updated_share_instances, [share_instance])
|
||||||
|
self.assertEqual(updated_snap_instances, snap_instances)
|
||||||
|
|
||||||
|
@ddt.data(
|
||||||
|
{
|
||||||
|
'share_instance_status': constants.STATUS_ERROR,
|
||||||
|
'snap_instance_status': constants.STATUS_AVAILABLE,
|
||||||
|
'expected_exc': exception.InvalidShareInstance
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'share_instance_status': constants.STATUS_AVAILABLE,
|
||||||
|
'snap_instance_status': constants.STATUS_ERROR,
|
||||||
|
'expected_exc': exception.InvalidShareSnapshotInstance
|
||||||
|
}
|
||||||
|
)
|
||||||
|
@ddt.unpack
|
||||||
|
def test_share_and_snapshot_instances_status_update_invalid_status(
|
||||||
|
self, share_instance_status, snap_instance_status, expected_exc):
|
||||||
|
share_instance = db_utils.create_share_instance(
|
||||||
|
status=share_instance_status, share_id='fake')
|
||||||
|
share_snapshot_instance = db_utils.create_snapshot_instance(
|
||||||
|
'fake_snapshot_id_1', status=snap_instance_status,
|
||||||
|
share_instance_id=share_instance['id'])
|
||||||
|
share_instance_ids = [share_instance['id']]
|
||||||
|
snap_instance_ids = [share_snapshot_instance['id']]
|
||||||
|
values = {'status': constants.STATUS_AVAILABLE}
|
||||||
|
fake_session = db_api.get_session()
|
||||||
|
|
||||||
|
mock_get_session = self.mock_object(
|
||||||
|
db_api, 'get_session', mock.Mock(return_value=fake_session))
|
||||||
|
mock_instances_get_all = self.mock_object(
|
||||||
|
db_api, 'share_instances_get_all',
|
||||||
|
mock.Mock(return_value=[share_instance]))
|
||||||
|
mock_snap_instances_get_all = self.mock_object(
|
||||||
|
db_api, 'share_snapshot_instance_get_all_with_filters',
|
||||||
|
mock.Mock(return_value=[share_snapshot_instance]))
|
||||||
|
|
||||||
|
self.assertRaises(expected_exc,
|
||||||
|
db_api.share_and_snapshot_instances_status_update,
|
||||||
|
self.context,
|
||||||
|
values,
|
||||||
|
share_instance_ids=share_instance_ids,
|
||||||
|
snapshot_instance_ids=snap_instance_ids,
|
||||||
|
current_expected_status=constants.STATUS_AVAILABLE)
|
||||||
|
|
||||||
|
mock_get_session.assert_called()
|
||||||
|
mock_instances_get_all.assert_called_once_with(
|
||||||
|
self.context, filters={'instance_ids': share_instance_ids},
|
||||||
|
session=fake_session)
|
||||||
|
if snap_instance_status == constants.STATUS_ERROR:
|
||||||
|
mock_snap_instances_get_all.assert_called_once_with(
|
||||||
|
self.context, {'instance_ids': snap_instance_ids},
|
||||||
|
session=fake_session)
|
||||||
|
@ -42,6 +42,7 @@ def fake_share(**kwargs):
|
|||||||
'id': 'fake_share_instance_id',
|
'id': 'fake_share_instance_id',
|
||||||
'host': 'fakehost',
|
'host': 'fakehost',
|
||||||
'share_type_id': '1',
|
'share_type_id': '1',
|
||||||
|
'share_network_id': 'fake share network id',
|
||||||
},
|
},
|
||||||
'mount_snapshot_support': False,
|
'mount_snapshot_support': False,
|
||||||
}
|
}
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -581,6 +581,64 @@ class ShareDriverTestCase(test.TestCase):
|
|||||||
share_driver.migration_get_progress,
|
share_driver.migration_get_progress,
|
||||||
None, None, None, None, None, None, None)
|
None, None, None, None, None, None, None)
|
||||||
|
|
||||||
|
def test_share_server_migration_start(self):
|
||||||
|
driver.CONF.set_default('driver_handles_share_servers', True)
|
||||||
|
share_driver = driver.ShareDriver(True)
|
||||||
|
|
||||||
|
self.assertRaises(NotImplementedError,
|
||||||
|
share_driver.share_server_migration_start,
|
||||||
|
None, None, None, None, None)
|
||||||
|
|
||||||
|
def test_share_server_migration_continue(self):
|
||||||
|
driver.CONF.set_default('driver_handles_share_servers', True)
|
||||||
|
share_driver = driver.ShareDriver(True)
|
||||||
|
|
||||||
|
self.assertRaises(NotImplementedError,
|
||||||
|
share_driver.share_server_migration_continue,
|
||||||
|
None, None, None, None, None)
|
||||||
|
|
||||||
|
def test_share_server_migration_get_progress(self):
|
||||||
|
driver.CONF.set_default('driver_handles_share_servers', True)
|
||||||
|
share_driver = driver.ShareDriver(True)
|
||||||
|
|
||||||
|
self.assertRaises(NotImplementedError,
|
||||||
|
share_driver.share_server_migration_get_progress,
|
||||||
|
None, None, None, None, None)
|
||||||
|
|
||||||
|
def test_share_server_migration_cancel(self):
|
||||||
|
driver.CONF.set_default('driver_handles_share_servers', True)
|
||||||
|
share_driver = driver.ShareDriver(True)
|
||||||
|
|
||||||
|
self.assertRaises(NotImplementedError,
|
||||||
|
share_driver.share_server_migration_cancel,
|
||||||
|
None, None, None, None, None)
|
||||||
|
|
||||||
|
def test_share_server_migration_check_compatibility(self):
|
||||||
|
driver.CONF.set_default('driver_handles_share_servers', True)
|
||||||
|
share_driver = driver.ShareDriver(True)
|
||||||
|
expected_compatibility = {
|
||||||
|
'compatible': False,
|
||||||
|
'writable': False,
|
||||||
|
'nondisruptive': False,
|
||||||
|
'preserve_snapshots': False,
|
||||||
|
'migration_cancel': False,
|
||||||
|
'migration_get_progress': False
|
||||||
|
}
|
||||||
|
|
||||||
|
driver_compatibility = (
|
||||||
|
share_driver.share_server_migration_check_compatibility(
|
||||||
|
None, None, None, None, None, None))
|
||||||
|
self.assertEqual(expected_compatibility, driver_compatibility)
|
||||||
|
|
||||||
|
def test_share_server_migration_complete(self):
|
||||||
|
driver.CONF.set_default('driver_handles_share_servers', True)
|
||||||
|
share_driver = driver.ShareDriver(True)
|
||||||
|
|
||||||
|
self.assertRaises(
|
||||||
|
NotImplementedError,
|
||||||
|
share_driver.share_server_migration_complete,
|
||||||
|
None, None, None, None, None, None)
|
||||||
|
|
||||||
@ddt.data(True, False)
|
@ddt.data(True, False)
|
||||||
def test_connection_get_info(self, admin):
|
def test_connection_get_info(self, admin):
|
||||||
|
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -25,6 +25,7 @@ from manila import exception
|
|||||||
from manila.share import access as access_helper
|
from manila.share import access as access_helper
|
||||||
from manila.share import api as share_api
|
from manila.share import api as share_api
|
||||||
from manila.share import migration
|
from manila.share import migration
|
||||||
|
from manila.share import rpcapi as share_rpcapi
|
||||||
from manila import test
|
from manila import test
|
||||||
from manila.tests import db_utils
|
from manila.tests import db_utils
|
||||||
from manila import utils
|
from manila import utils
|
||||||
@ -43,7 +44,7 @@ class ShareMigrationHelperTestCase(test.TestCase):
|
|||||||
self.access_helper = access_helper.ShareInstanceAccess(db, None)
|
self.access_helper = access_helper.ShareInstanceAccess(db, None)
|
||||||
self.context = context.get_admin_context()
|
self.context = context.get_admin_context()
|
||||||
self.helper = migration.ShareMigrationHelper(
|
self.helper = migration.ShareMigrationHelper(
|
||||||
self.context, db, self.share, self.access_helper)
|
self.context, db, self.access_helper)
|
||||||
|
|
||||||
def test_delete_instance_and_wait(self):
|
def test_delete_instance_and_wait(self):
|
||||||
|
|
||||||
@ -250,10 +251,12 @@ class ShareMigrationHelperTestCase(test.TestCase):
|
|||||||
# asserts
|
# asserts
|
||||||
db.share_server_get.assert_called_with(self.context, 'fake_server_id')
|
db.share_server_get.assert_called_with(self.context, 'fake_server_id')
|
||||||
|
|
||||||
def test_revert_access_rules(self):
|
@ddt.data(None, 'fakehost@fakebackend')
|
||||||
|
def test_revert_access_rules(self, dest_host):
|
||||||
|
|
||||||
share_instance = db_utils.create_share_instance(
|
share_instance = db_utils.create_share_instance(
|
||||||
share_id=self.share['id'], status=constants.STATUS_AVAILABLE)
|
share_id=self.share['id'], status=constants.STATUS_AVAILABLE)
|
||||||
|
share_instance_ids = [instance['id'] for instance in [share_instance]]
|
||||||
|
|
||||||
access = db_utils.create_access(share_id=self.share['id'],
|
access = db_utils.create_access(share_id=self.share['id'],
|
||||||
access_to='fake_ip',
|
access_to='fake_ip',
|
||||||
@ -266,16 +269,23 @@ class ShareMigrationHelperTestCase(test.TestCase):
|
|||||||
get_and_update_call = self.mock_object(
|
get_and_update_call = self.mock_object(
|
||||||
self.access_helper, 'get_and_update_share_instance_access_rules',
|
self.access_helper, 'get_and_update_share_instance_access_rules',
|
||||||
mock.Mock(return_value=[access]))
|
mock.Mock(return_value=[access]))
|
||||||
|
mock_update_access_for_instances = self.mock_object(
|
||||||
|
share_rpcapi.ShareAPI, 'update_access_for_instances')
|
||||||
|
|
||||||
# run
|
# run
|
||||||
self.helper.revert_access_rules(share_instance, server)
|
self.helper.revert_access_rules([share_instance], server,
|
||||||
|
dest_host=dest_host)
|
||||||
|
|
||||||
# asserts
|
# asserts
|
||||||
get_and_update_call.assert_called_once_with(
|
get_and_update_call.assert_called_once_with(
|
||||||
self.context, share_instance_id=share_instance['id'],
|
self.context, share_instance_id=share_instance['id'],
|
||||||
updates={'state': constants.ACCESS_STATE_QUEUED_TO_APPLY})
|
updates={'state': constants.ACCESS_STATE_QUEUED_TO_APPLY})
|
||||||
self.access_helper.update_access_rules.assert_called_once_with(
|
if dest_host:
|
||||||
self.context, share_instance['id'], share_server=server)
|
mock_update_access_for_instances.assert_called_once_with(
|
||||||
|
self.context, dest_host, share_instance_ids, server)
|
||||||
|
else:
|
||||||
|
self.access_helper.update_access_rules.assert_called_once_with(
|
||||||
|
self.context, share_instance['id'], share_server=server)
|
||||||
|
|
||||||
@ddt.data(True, False)
|
@ddt.data(True, False)
|
||||||
def test_apply_new_access_rules_there_are_rules(self, prior_rules):
|
def test_apply_new_access_rules_there_are_rules(self, prior_rules):
|
||||||
@ -297,7 +307,8 @@ class ShareMigrationHelperTestCase(test.TestCase):
|
|||||||
self.mock_object(utils, 'wait_for_access_update')
|
self.mock_object(utils, 'wait_for_access_update')
|
||||||
|
|
||||||
# run
|
# run
|
||||||
self.helper.apply_new_access_rules(new_share_instance)
|
self.helper.apply_new_access_rules(new_share_instance,
|
||||||
|
self.share['id'])
|
||||||
|
|
||||||
# asserts
|
# asserts
|
||||||
db.share_instance_access_copy.assert_called_once_with(
|
db.share_instance_access_copy.assert_called_once_with(
|
||||||
@ -346,7 +357,7 @@ class ShareMigrationHelperTestCase(test.TestCase):
|
|||||||
|
|
||||||
# asserts
|
# asserts
|
||||||
self.helper.revert_access_rules.assert_called_once_with(
|
self.helper.revert_access_rules.assert_called_once_with(
|
||||||
self.share_instance, server)
|
self.share_instance, server, None)
|
||||||
|
|
||||||
if exc:
|
if exc:
|
||||||
self.assertEqual(1, migration.LOG.warning.call_count)
|
self.assertEqual(1, migration.LOG.warning.call_count)
|
||||||
|
@ -44,10 +44,10 @@ class ShareRpcAPITestCase(test.TestCase):
|
|||||||
share_id='fake_share_id',
|
share_id='fake_share_id',
|
||||||
host='fake_host',
|
host='fake_host',
|
||||||
)
|
)
|
||||||
share_server = db_utils.create_share_server()
|
|
||||||
share_group = {'id': 'fake_share_group_id', 'host': 'fake_host'}
|
share_group = {'id': 'fake_share_group_id', 'host': 'fake_host'}
|
||||||
share_group_snapshot = {'id': 'fake_share_group_id'}
|
share_group_snapshot = {'id': 'fake_share_group_id'}
|
||||||
host = 'fake_host'
|
host = 'fake_host'
|
||||||
|
share_server = db_utils.create_share_server(host=host)
|
||||||
self.fake_share = jsonutils.to_primitive(share)
|
self.fake_share = jsonutils.to_primitive(share)
|
||||||
# mock out the getattr on the share db model object since jsonutils
|
# mock out the getattr on the share db model object since jsonutils
|
||||||
# doesn't know about those extra attributes to pull in
|
# doesn't know about those extra attributes to pull in
|
||||||
@ -115,11 +115,25 @@ class ShareRpcAPITestCase(test.TestCase):
|
|||||||
if 'snapshot_instance' in expected_msg:
|
if 'snapshot_instance' in expected_msg:
|
||||||
snapshot_instance = expected_msg.pop('snapshot_instance', None)
|
snapshot_instance = expected_msg.pop('snapshot_instance', None)
|
||||||
expected_msg['snapshot_instance_id'] = snapshot_instance['id']
|
expected_msg['snapshot_instance_id'] = snapshot_instance['id']
|
||||||
|
share_server_id_methods = [
|
||||||
|
'manage_share_server', 'unmanage_share_server',
|
||||||
|
'share_server_migration_start', 'share_server_migration_check']
|
||||||
|
src_dest_share_server_methods = [
|
||||||
|
'share_server_migration_cancel',
|
||||||
|
'share_server_migration_get_progress',
|
||||||
|
'share_server_migration_complete']
|
||||||
if ('share_server' in expected_msg
|
if ('share_server' in expected_msg
|
||||||
and (method == 'manage_share_server')
|
and method in share_server_id_methods):
|
||||||
or method == 'unmanage_share_server'):
|
|
||||||
share_server = expected_msg.pop('share_server', None)
|
share_server = expected_msg.pop('share_server', None)
|
||||||
expected_msg['share_server_id'] = share_server['id']
|
expected_msg['share_server_id'] = share_server['id']
|
||||||
|
if ('share_server' in expected_msg
|
||||||
|
and method in src_dest_share_server_methods):
|
||||||
|
share_server = expected_msg.pop('share_server', None)
|
||||||
|
expected_msg['src_share_server_id'] = share_server['id']
|
||||||
|
if ('dest_share_server' in expected_msg
|
||||||
|
and method in src_dest_share_server_methods):
|
||||||
|
share_server = expected_msg.pop('dest_share_server', None)
|
||||||
|
expected_msg['dest_share_server_id'] = share_server['id']
|
||||||
|
|
||||||
if 'host' in kwargs:
|
if 'host' in kwargs:
|
||||||
host = kwargs['host']
|
host = kwargs['host']
|
||||||
@ -388,3 +402,58 @@ class ShareRpcAPITestCase(test.TestCase):
|
|||||||
version='1.17',
|
version='1.17',
|
||||||
snapshot_instance=self.fake_snapshot[
|
snapshot_instance=self.fake_snapshot[
|
||||||
'share_instance'])
|
'share_instance'])
|
||||||
|
|
||||||
|
def test_share_server_migration_start(self):
|
||||||
|
self._test_share_api('share_server_migration_start',
|
||||||
|
rpc_method='cast',
|
||||||
|
version='1.21',
|
||||||
|
share_server=self.fake_share_server,
|
||||||
|
dest_host=self.fake_host,
|
||||||
|
writable=True,
|
||||||
|
nondisruptive=False,
|
||||||
|
preserve_snapshots=True,
|
||||||
|
new_share_network_id='fake_share_network_id')
|
||||||
|
|
||||||
|
def test_share_server_migration_check(self):
|
||||||
|
self._test_share_api('share_server_migration_check',
|
||||||
|
rpc_method='call',
|
||||||
|
version='1.21',
|
||||||
|
share_server_id=self.fake_share_server['id'],
|
||||||
|
dest_host=self.fake_host,
|
||||||
|
writable=True,
|
||||||
|
nondisruptive=False,
|
||||||
|
preserve_snapshots=True,
|
||||||
|
new_share_network_id='fake_net_id')
|
||||||
|
|
||||||
|
def test_share_server_migration_cancel(self):
|
||||||
|
self._test_share_api('share_server_migration_cancel',
|
||||||
|
rpc_method='cast',
|
||||||
|
version='1.21',
|
||||||
|
dest_host=self.fake_host,
|
||||||
|
share_server=self.fake_share_server,
|
||||||
|
dest_share_server=self.fake_share_server)
|
||||||
|
|
||||||
|
def test_share_server_migration_get_progress(self):
|
||||||
|
self._test_share_api('share_server_migration_get_progress',
|
||||||
|
rpc_method='call',
|
||||||
|
version='1.21',
|
||||||
|
dest_host=self.fake_host,
|
||||||
|
share_server=self.fake_share_server,
|
||||||
|
dest_share_server=self.fake_share_server)
|
||||||
|
|
||||||
|
def test_share_server_migration_complete(self):
|
||||||
|
self._test_share_api('share_server_migration_complete',
|
||||||
|
rpc_method='cast',
|
||||||
|
version='1.21',
|
||||||
|
dest_host=self.fake_host,
|
||||||
|
share_server=self.fake_share_server,
|
||||||
|
dest_share_server=self.fake_share_server)
|
||||||
|
|
||||||
|
def test_update_access_for_share_instances(self):
|
||||||
|
self._test_share_api(
|
||||||
|
'update_access_for_instances',
|
||||||
|
rpc_method='cast',
|
||||||
|
version='1.21',
|
||||||
|
dest_host=self.fake_host,
|
||||||
|
share_instance_ids=[self.fake_share['instance']['id']],
|
||||||
|
share_server_id=self.fake_share_server['id'])
|
||||||
|
@ -0,0 +1,26 @@
|
|||||||
|
---
|
||||||
|
features:
|
||||||
|
- |
|
||||||
|
Added the ability to migrate share servers within and across backends in
|
||||||
|
Manila. As designed in share migration, a two-phase approach is now
|
||||||
|
available for share servers, with the addition of a new API to check the
|
||||||
|
feasibility of a migration, called ``share-server-migration-check``. Now,
|
||||||
|
Manila can start, complete, cancel and retrieve the progress of a share
|
||||||
|
server migration. These operations were designed for Administrators and
|
||||||
|
will work only when operating under `driver_handles_share_servers=True`
|
||||||
|
mode. When starting a share server migration, it is possible to choose
|
||||||
|
which capabilities must be supported by the driver: remain ``writable``
|
||||||
|
during the first phase, ``preserve_snapshots``, be ``nondisruptive`` and
|
||||||
|
migrate to a different share network.
|
||||||
|
upgrade:
|
||||||
|
- |
|
||||||
|
The share server entity now contains two new fields: ``task_state`` and
|
||||||
|
`source_share_server_id`. The `task_state` field helps tracking the
|
||||||
|
migration progress of a share server. The ``source_share_server_id`` field
|
||||||
|
will hold the source share server identification until the migration gets
|
||||||
|
completed or cancelled.
|
||||||
|
New statuses were added in order to control whether a share server, its
|
||||||
|
shares or snapshots are being migrated to a different location. Share
|
||||||
|
server shares’ are going to remain in the status ``server_migrating`` while
|
||||||
|
the migration is in course. When the migration gets completed, the
|
||||||
|
statuses are going to be updated.
|
Loading…
Reference in New Issue
Block a user