diff --git a/manila/api/v2/share_servers.py b/manila/api/v2/share_servers.py index b20f609423..0ee643896a 100644 --- a/manila/api/v2/share_servers.py +++ b/manila/api/v2/share_servers.py @@ -278,6 +278,9 @@ class ShareServerController(share_servers.ShareServerController, # NOTE(dviroel): invalid share server meaning that some internal # resource have a invalid state. raise exc.HTTPConflict(explanation=e.msg) + except exception.InvalidInput as e: + # User provided controversial parameters in the request + raise exc.HTTPBadRequest(explanation=e.msg) @wsgi.Controller.api_version('2.57', experimental=True) @wsgi.action("migration_complete") diff --git a/manila/db/api.py b/manila/db/api.py index c4b5d9a18f..d1e7b2e560 100644 --- a/manila/db/api.py +++ b/manila/db/api.py @@ -1078,6 +1078,11 @@ def share_server_backend_details_set(context, share_server_id, server_details): server_details) +def share_server_backend_details_delete(context, share_server_id): + """Delete backend details DB records for a share server.""" + return IMPL.share_server_backend_details_delete(context, share_server_id) + + def share_servers_update(context, share_server_ids, values): """Updates values of a bunch of share servers at once.""" return IMPL.share_servers_update( diff --git a/manila/share/api.py b/manila/share/api.py index f99131dfc2..d8325b39c6 100644 --- a/manila/share/api.py +++ b/manila/share/api.py @@ -2582,6 +2582,14 @@ class API(base.Base): payload = {'network': new_share_network_id} raise exception.InvalidShareServer(reason=msg % payload) + net_changes_identified = False + if new_share_network: + current_subnet = self.db.share_network_subnet_get( + context, share_server['share_network_subnet_id']) + for key in ['neutron_net_id', 'neutron_subnet_id']: + if current_subnet[key] != compatible_subnet[key]: + net_changes_identified = True + # NOTE(carloss): Refreshing the list of shares since something could've # changed from the initial list. shares = self.db.share_get_all_by_share_server( @@ -2645,23 +2653,50 @@ class API(base.Base): except exception.ShareBusyException as e: raise exception.InvalidShareServer(reason=e.msg) - return shares, types, service, new_share_network_id + return ( + shares, types, service, new_share_network_id, + net_changes_identified) def share_server_migration_check(self, context, share_server, dest_host, writable, nondisruptive, preserve_snapshots, new_share_network=None): """Migrates share server to a new host.""" - shares, types, service, new_share_network_id = ( + shares, types, service, new_share_network_id, net_params_changed = ( self._migration_initial_checks(context, share_server, dest_host, new_share_network)) + # If a nondisruptive migration was requested and different neutron net + # id and neutron subnet ids were identified + if net_params_changed and nondisruptive: + result = { + 'compatible': False, + 'writable': False, + 'nondisruptive': False, + 'preserve_snapshots': False, + 'migration_cancel': False, + 'migration_get_progress': False, + 'share_network_id': new_share_network_id + } + return result + # NOTE(dviroel): Service is up according to validations made on initial # checks result = self.share_rpcapi.share_server_migration_check( context, share_server['id'], dest_host, writable, nondisruptive, preserve_snapshots, new_share_network_id) + # NOTE(carloss): In case users haven't requested a nondisruptive + # migration and a network change was identified, we must get the + # driver's check result and if there is need to, manipulate it. + # The result is provided by the driver and based on the back end + # possibility to perform a nondisruptive migration or not. If + # a network change was provided, we know that the migration will be + # disruptive, so in order to do not confuse the user, we must present + # the share server migration as disruptive + if result.get('nondisruptive') and net_params_changed: + result['nondisruptive'] = False + return result def share_server_migration_start( @@ -2669,11 +2704,18 @@ class API(base.Base): preserve_snapshots, new_share_network=None): """Migrates share server to a new host.""" - shares, types, dest_service, new_share_network_id = ( + shares, types, service, new_share_network_id, net_params_changed = ( self._migration_initial_checks(context, share_server, dest_host, new_share_network)) + if nondisruptive and net_params_changed: + msg = _("Nondisruptive migration would only be feasible when the " + "current and new share networks carry the same " + "'neutron_net_id' and 'neutron_subnet_id', or when no " + "network changes are occurring.") + raise exception.InvalidInput(reason=msg) + # Updates the share server status to migration starting self.db.share_server_update( context, share_server['id'], diff --git a/manila/share/driver.py b/manila/share/driver.py index a27794ed96..0c67076708 100644 --- a/manila/share/driver.py +++ b/manila/share/driver.py @@ -2921,6 +2921,16 @@ class ShareDriver(object): migrated. :param snapshots: All snapshots in the source share server that should be migrated. + :return: Dict with migration information to be set in the destination + share server. + + Example:: + + { + 'backend_details': { + 'migration_info_key': 'migration_info_value', + } + } """ raise NotImplementedError() @@ -3085,7 +3095,7 @@ class ShareDriver(object): 'nondisruptive': True, 'preserve_snapshots': True, 'migration_cancel': True, - 'migration_get_progress': False + 'migration_get_progress': False, } """ @@ -3095,7 +3105,7 @@ class ShareDriver(object): 'nondisruptive': False, 'preserve_snapshots': False, 'migration_cancel': False, - 'migration_get_progress': False + 'migration_get_progress': False, } def share_server_migration_complete(self, context, src_share_server, @@ -3189,6 +3199,11 @@ class ShareDriver(object): ], }, } + 'backend_details': + { + 'new_share_server_info_key': + 'new_share_server_info_value', + }, } """ diff --git a/manila/share/drivers/container/storage_helper.py b/manila/share/drivers/container/storage_helper.py index bc09aad0aa..40ebc8ad67 100644 --- a/manila/share/drivers/container/storage_helper.py +++ b/manila/share/drivers/container/storage_helper.py @@ -230,7 +230,7 @@ class LVMHelper(driver.ExecuteMixin): 'nondisruptive': None, 'preserve_snapshots': None, 'migration_cancel': None, - 'migration_get_progress': None + 'migration_get_progress': None, } dest_backend_name = share_utils.extract_host(dest_host, @@ -275,7 +275,7 @@ class LVMHelper(driver.ExecuteMixin): 'nondisruptive': False, 'preserve_snapshots': False, 'migration_cancel': True, - 'migration_get_progress': True + 'migration_get_progress': True, } def share_server_migration_start(self, context, src_share_server, diff --git a/manila/share/drivers/netapp/dataontap/cluster_mode/lib_multi_svm.py b/manila/share/drivers/netapp/dataontap/cluster_mode/lib_multi_svm.py index 3cc524327f..610cfc7433 100644 --- a/manila/share/drivers/netapp/dataontap/cluster_mode/lib_multi_svm.py +++ b/manila/share/drivers/netapp/dataontap/cluster_mode/lib_multi_svm.py @@ -915,7 +915,7 @@ class NetAppCmodeMultiSVMFileStorageLibrary( 'preserve_snapshots': None, 'migration_cancel': None, 'migration_get_progress': None, - 'share_network_id': None + 'share_network_id': None, } # We need cluster creds, of course diff --git a/manila/share/manager.py b/manila/share/manager.py index 4df880017b..89ee64046e 100644 --- a/manila/share/manager.py +++ b/manila/share/manager.py @@ -830,6 +830,8 @@ class ShareManager(manager.SchedulerDependentManager): # request can use this metadata to take actions. server_metadata['migration_destination'] = True server_metadata['request_host'] = destination_host + server_metadata['source_share_server'] = ( + source_share_server) destination_share_server = ( self._create_share_server_in_backend( context, destination_share_server, @@ -3229,7 +3231,7 @@ class ShareManager(manager.SchedulerDependentManager): self.driver.network_api.unmanage_network_allocations( context, share_server_id) elif (self.driver.get_admin_network_allocations_number() > 0 - and self.driver.admin_network_api): + and self.driver.admin_network_api): # NOTE(ganso): This is here in case there are only admin # allocations. self.driver.admin_network_api.unmanage_network_allocations( @@ -3995,13 +3997,14 @@ class ShareManager(manager.SchedulerDependentManager): def _form_server_setup_info(self, context, share_server, share_network, share_network_subnet): + share_server_id = share_server['id'] # Network info is used by driver for setting up share server # and getting server info on share creation. network_allocations = self.db.network_allocations_get_for_share_server( - context, share_server['id'], label='user') + context, share_server_id, label='user') admin_network_allocations = ( self.db.network_allocations_get_for_share_server( - context, share_server['id'], label='admin')) + context, share_server_id, label='admin')) # NOTE(vponomaryov): following network_info fields are deprecated: # 'segmentation_id', 'cidr' and 'network_type'. # And they should be used from network allocations directly. @@ -5045,9 +5048,33 @@ class ShareManager(manager.SchedulerDependentManager): snapshot_instances, compatibility, dest_host, nondisruptive, writable, preserve_snapshots, resource_type='share server') + create_server_on_backend = not compatibility.get('nondisruptive') dest_share_server = self._provide_share_server_for_migration( context, source_share_server, new_share_network_id, - service['availability_zone_id'], dest_host) + service['availability_zone_id'], dest_host, + create_on_backend=create_server_on_backend) + + net_changes_identified = False + if not create_server_on_backend: + dest_share_server = self.db.share_server_get( + context, dest_share_server['id']) + current_subnet = dest_share_server['share_network_subnet'] + old_subnet = source_share_server['share_network_subnet'] + for key in ['neutron_net_id', 'neutron_subnet_id']: + if current_subnet.get(key) != old_subnet.get(key): + net_changes_identified = True + if net_changes_identified: + share_network_subnet = self.db.share_network_subnet_get( + context, dest_share_server['share_network_subnet_id']) + self.driver.allocate_network( + context, dest_share_server, new_share_network, + share_network_subnet) + self.driver.allocate_admin_network( + context, dest_share_server) + # Refresh the share server so it will have the network + # allocations when sent to the driver + dest_share_server = self.db.share_server_get( + context, dest_share_server['id']) self.db.share_server_update( context, dest_share_server['id'], @@ -5074,10 +5101,16 @@ class ShareManager(manager.SchedulerDependentManager): {'task_state': ( constants.TASK_STATE_MIGRATION_DRIVER_STARTING)}) - self.driver.share_server_migration_start( + server_info = self.driver.share_server_migration_start( context, source_share_server, dest_share_server, share_instances, snapshot_instances) + backend_details = ( + server_info.get('backend_details') if server_info else None) + if backend_details: + self.db.share_server_backend_details_set( + context, dest_share_server['id'], backend_details) + self.db.share_server_update( context, source_share_server['id'], {'task_state': ( @@ -5102,8 +5135,14 @@ class ShareManager(manager.SchedulerDependentManager): context, dest_share_server['id'], {'task_state': constants.TASK_STATE_MIGRATION_ERROR, 'status': constants.STATUS_ERROR}) - self._check_delete_share_server(context, - share_server=dest_share_server) + if not create_server_on_backend: + if net_changes_identified: + self.driver.deallocate_network( + context, dest_share_server['id']) + self.db.share_server_delete( + context, dest_share_server['id']) + else: + self.delete_share_server(context, dest_share_server) msg = _("Driver-assisted migration of share server %s " "failed.") % source_share_server['id'] LOG.exception(msg) @@ -5380,11 +5419,18 @@ class ShareManager(manager.SchedulerDependentManager): 'has failed in migration-complete phase.') % msg_args raise exception.ShareServerMigrationFailed(reason=msg) + server_update_args = { + 'task_state': constants.TASK_STATE_MIGRATION_SUCCESS, + 'status': constants.STATUS_ACTIVE + } + + # Migration mechanism reused the share server + if not dest_server['identifier']: + server_update_args['identifier'] = src_server['identifier'] + # Update share server status for success scenario self.db.share_server_update( - context, dest_share_server_id, - {'task_state': constants.TASK_STATE_MIGRATION_SUCCESS, - 'status': constants.STATUS_ACTIVE}) + context, dest_share_server_id, server_update_args) self._update_resource_status( context, constants.STATUS_AVAILABLE, share_instance_ids=share_instance_ids, @@ -5412,13 +5458,54 @@ class ShareManager(manager.SchedulerDependentManager): dest_sn = self.db.share_network_get(context, dest_sn_id) dest_sns = self.db.share_network_subnet_get(context, dest_sns_id) + migration_reused_network_allocations = (len( + self.db.network_allocations_get_for_share_server( + context, dest_share_server['id'])) == 0) + + server_to_get_allocations = ( + dest_share_server + if not migration_reused_network_allocations + else source_share_server) + new_network_allocations = self._form_server_setup_info( - context, dest_share_server, dest_sn, dest_sns) + context, server_to_get_allocations, dest_sn, dest_sns) model_update = self.driver.share_server_migration_complete( context, source_share_server, dest_share_server, share_instances, snapshot_instances, new_network_allocations) + if not migration_reused_network_allocations: + all_allocations = [ + new_network_allocations['network_allocations'], + new_network_allocations['admin_network_allocations'] + ] + for allocations in all_allocations: + for allocation in allocations: + allocation_id = allocation['id'] + values = { + 'share_server_id': dest_share_server['id'] + } + self.db.network_allocation_update( + context, allocation_id, values) + + # If share server doesn't have an identifier, we didn't ask the driver + # to create a brand new server - this was a nondisruptive migration + share_server_was_reused = not dest_share_server['identifier'] + if share_server_was_reused: + driver_backend_details = model_update.get( + 'server_backend_details') + # Clean up the previous backend details set for migration details + if driver_backend_details: + self.db.share_server_backend_details_delete( + context, dest_share_server['id']) + backend_details = ( + driver_backend_details + or source_share_server.get("backend_details")) + if backend_details: + for k, v in backend_details.items(): + self.db.share_server_backend_details_set( + context, dest_share_server['id'], {k: v}) + host_value = share_utils.extract_host(dest_share_server['host']) service = self.db.service_get_by_args( context, host_value, 'manila-share') @@ -5482,8 +5569,14 @@ class ShareManager(manager.SchedulerDependentManager): {'task_state': constants.TASK_STATE_MIGRATION_SUCCESS, 'status': constants.STATUS_INACTIVE}) - self._check_delete_share_server( - context, share_server=source_share_server, remote_host=True) + if share_server_was_reused: + self.driver.deallocate_network(context, source_share_server['id']) + self.db.share_server_delete(context, source_share_server['id']) + else: + source_share_server = self._get_share_server_dict( + context, source_share_server) + rpcapi = share_rpcapi.ShareAPI() + rpcapi.delete_share_server(context, source_share_server) @add_hooks @utils.require_driver_initialized diff --git a/manila/tests/api/v2/test_share_servers.py b/manila/tests/api/v2/test_share_servers.py index 89a90c6ce6..fcd7ed2161 100644 --- a/manila/tests/api/v2/test_share_servers.py +++ b/manila/tests/api/v2/test_share_servers.py @@ -532,16 +532,16 @@ class ShareServerControllerTest(test.TestCase): context, network_subnet['share_network_id']) is_active_mock.assert_called_once_with(share_network) - def _get_server_migration_request(self, server_id): + def _get_server_migration_request(self, server_id, version='2.57'): req = fakes.HTTPRequest.blank( '/share-servers/%s/action' % server_id, - use_admin_context=True, version='2.57') + use_admin_context=True, version=version) req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True return req - def test_share_server_migration_start(self): + def test__share_server_migration_start(self): server = db_utils.create_share_server(id='fake_server_id', status=constants.STATUS_ACTIVE) share_network = db_utils.create_share_network() @@ -566,7 +566,8 @@ class ShareServerControllerTest(test.TestCase): } } - self.controller.share_server_migration_start(req, server['id'], body) + self.controller.share_server_migration_start( + req, server['id'], body) db_api.share_server_get.assert_called_once_with( context, server['id']) @@ -581,10 +582,12 @@ class ShareServerControllerTest(test.TestCase): @ddt.data({'api_exception': exception.ServiceIsDown(service='fake_srv'), 'expected_exception': webob.exc.HTTPBadRequest}, {'api_exception': exception.InvalidShareServer(reason=""), - 'expected_exception': webob.exc.HTTPConflict}) + 'expected_exception': webob.exc.HTTPConflict}, + {'api_exception': exception.InvalidInput(), + 'expected_exception': webob.exc.HTTPBadRequest}) @ddt.unpack - def test_share_server_migration_start_conflict(self, api_exception, - expected_exception): + def test__share_server_migration_start_conflict(self, api_exception, + expected_exception): share_network = db_utils.create_share_network() share_network_subnet = db_utils.create_share_network_subnet( share_network_id=share_network['id']) @@ -598,7 +601,7 @@ class ShareServerControllerTest(test.TestCase): 'host': 'fake_host', 'preserve_snapshots': True, 'writable': True, - 'nondisruptive': True, + 'nondisruptive': True } } self.mock_object(share_api.API, 'share_server_migration_start', @@ -629,7 +632,7 @@ class ShareServerControllerTest(test.TestCase): new_share_network=None) @ddt.data('host', 'body') - def test_share_server_migration_start_missing_mandatory(self, param): + def test__share_server_migration_start_missing_mandatory(self, param): server = db_utils.create_share_server( id='fake_server_id', status=constants.STATUS_ACTIVE) req = self._get_server_migration_request(server['id']) @@ -641,7 +644,7 @@ class ShareServerControllerTest(test.TestCase): 'preserve_metadata': True, 'preserve_snapshots': True, 'writable': True, - 'nondisruptive': True, + 'nondisruptive': True } } @@ -650,20 +653,20 @@ class ShareServerControllerTest(test.TestCase): else: body['migration_start'].pop(param) - method = 'share_server_migration_start' - - self.mock_object(share_api.API, method) + self.mock_object(share_api.API, 'share_server_migration_start') self.mock_object(db_api, 'share_server_get', mock.Mock(return_value=server)) - self.assertRaises(webob.exc.HTTPBadRequest, - getattr(self.controller, method), - req, server['id'], body) + self.assertRaises( + webob.exc.HTTPBadRequest, + getattr(self.controller, 'share_server_migration_start'), + req, server['id'], body) + db_api.share_server_get.assert_called_once_with(context, server['id']) @ddt.data('nondisruptive', 'writable', 'preserve_snapshots') - def test_share_server_migration_start_non_boolean(self, param): + def test__share_server_migration_start_non_boolean(self, param): server = db_utils.create_share_server( id='fake_server_id', status=constants.STATUS_ACTIVE) req = self._get_server_migration_request(server['id']) @@ -674,25 +677,25 @@ class ShareServerControllerTest(test.TestCase): 'host': 'fake_host', 'preserve_snapshots': True, 'writable': True, - 'nondisruptive': True, + 'nondisruptive': True } } body['migration_start'][param] = None - method = 'share_server_migration_start' - - self.mock_object(share_api.API, method) + self.mock_object(share_api.API, 'share_server_migration_start') self.mock_object(db_api, 'share_server_get', mock.Mock(return_value=server)) - self.assertRaises(webob.exc.HTTPBadRequest, - getattr(self.controller, method), - req, server['id'], body) + self.assertRaises( + webob.exc.HTTPBadRequest, + getattr(self.controller, 'share_server_migration_start'), + req, server['id'], body) + db_api.share_server_get.assert_called_once_with(context, server['id']) - def test_share_server_migration_start_share_server_not_found(self): + def test__share_server_migration_start_share_server_not_found(self): fake_id = 'fake_server_id' req = self._get_server_migration_request(fake_id) context = req.environ['manila.context'] @@ -709,7 +712,7 @@ class ShareServerControllerTest(test.TestCase): db_api.share_server_get.assert_called_once_with(context, fake_id) - def test_share_server_migration_start_new_share_network_not_found(self): + def test__share_server_migration_start_new_share_network_not_found(self): server = db_utils.create_share_server( id='fake_server_id', status=constants.STATUS_ACTIVE) req = self._get_server_migration_request(server['id']) @@ -737,7 +740,7 @@ class ShareServerControllerTest(test.TestCase): db_api.share_server_get.assert_called_once_with(context, server['id']) - def test_share_server_migration_start_host_with_pool(self): + def test__share_server_migration_start_host_with_pool(self): server = db_utils.create_share_server(id='fake_server_id', status=constants.STATUS_ACTIVE) req = self._get_server_migration_request(server['id']) diff --git a/manila/tests/share/drivers/container/test_storage_helper.py b/manila/tests/share/drivers/container/test_storage_helper.py index f854661521..c1c62ea4a3 100644 --- a/manila/tests/share/drivers/container/test_storage_helper.py +++ b/manila/tests/share/drivers/container/test_storage_helper.py @@ -270,7 +270,7 @@ class LVMHelperTestCase(test.TestCase): 'nondisruptive': None, 'preserve_snapshots': None, 'migration_cancel': None, - 'migration_get_progress': None + 'migration_get_progress': None, } mock_error_log = self.mock_object(storage_helper.LOG, 'error') @@ -300,7 +300,7 @@ class LVMHelperTestCase(test.TestCase): 'nondisruptive': False, 'preserve_snapshots': False, 'migration_cancel': True, - 'migration_get_progress': True + 'migration_get_progress': True, } source_server = {'id': 'fake_id', 'host': source_host} diff --git a/manila/tests/share/drivers/dummy.py b/manila/tests/share/drivers/dummy.py index c01447854f..e2afb616c9 100644 --- a/manila/tests/share/drivers/dummy.py +++ b/manila/tests/share/drivers/dummy.py @@ -716,7 +716,7 @@ class DummyDriver(driver.ShareDriver): 'nondisruptive': False, 'share_network_id': new_share_network['id'], 'migration_cancel': compatible, - 'migration_get_progress': compatible + 'migration_get_progress': compatible, } @slow_me_down diff --git a/manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_lib_multi_svm.py b/manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_lib_multi_svm.py index 0078712fcd..4276ae07a5 100644 --- a/manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_lib_multi_svm.py +++ b/manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_lib_multi_svm.py @@ -1956,7 +1956,7 @@ class NetAppFileStorageLibraryTestCase(test.TestCase): 'preserve_snapshots': True, 'migration_cancel': True, 'migration_get_progress': False, - 'share_network_id': fake.SHARE_NETWORK['id'] + 'share_network_id': fake.SHARE_NETWORK['id'], } self._configure_mocks_share_server_migration_check_compatibility() diff --git a/manila/tests/share/drivers/netapp/dataontap/fakes.py b/manila/tests/share/drivers/netapp/dataontap/fakes.py index 885c83d477..3578c2a756 100644 --- a/manila/tests/share/drivers/netapp/dataontap/fakes.py +++ b/manila/tests/share/drivers/netapp/dataontap/fakes.py @@ -1552,7 +1552,7 @@ SERVER_MIGRATION_CHECK_NOT_COMPATIBLE = { 'preserve_snapshots': None, 'migration_cancel': None, 'migration_get_progress': None, - 'share_network_id': None + 'share_network_id': None, } CIFS_SECURITY_SERVICE = { diff --git a/manila/tests/share/test_api.py b/manila/tests/share/test_api.py index f993797b1a..a0765554a4 100644 --- a/manila/tests/share/test_api.py +++ b/manila/tests/share/test_api.py @@ -4594,6 +4594,15 @@ class ShareAPITestCase(test.TestCase): } fake_share_network = ( db_utils.create_share_network() if create_share_network else None) + current_subnet_data = { + 'id': 'current_subnet_id', + 'neutron_net_id': 'current_neutron_net_id', + 'neutron_subnet_id': 'current_neutron_subnet_id', + } + fake_network_subnet = db_utils.create_share_network_subnet( + **current_subnet_data) + expected_network_change = create_share_network is True + fake_share_network_id = ( fake_share_network['id'] if create_share_network else fake_share['share_network_id']) @@ -4618,8 +4627,11 @@ class ShareAPITestCase(test.TestCase): mock_get_subnet = self.mock_object( db_api, 'share_network_subnet_get_by_availability_zone_id', mock.Mock(return_value=fake_subnet)) + mock_get_current_subnet = self.mock_object( + db_api, 'share_network_subnet_get', + mock.Mock(return_value=fake_network_subnet)) - exp_shares, exp_types, exp_service, exp_share_network_id = ( + exp_shares, exp_types, exp_service, exp_network_id, net_change = ( self.api._migration_initial_checks( self.context, fake_share_server, fake_host, fake_share_network)) @@ -4627,7 +4639,8 @@ class ShareAPITestCase(test.TestCase): self.assertEqual(exp_shares, [fake_share]) self.assertEqual(exp_types, [share_type]) self.assertEqual(exp_service, service) - self.assertEqual(exp_share_network_id, fake_share_network_id) + self.assertEqual(exp_network_id, fake_share_network_id) + self.assertIs(expected_network_change, net_change) mock_shares_get_all.assert_has_calls([ mock.call(self.context, fake_share_server['id']), mock.call(self.context, fake_share_server['id'])]) @@ -4640,6 +4653,9 @@ class ShareAPITestCase(test.TestCase): mock_az_get.assert_called_once_with( self.context, service['availability_zone']['name'] ) + if create_share_network: + mock_get_current_subnet.assert_called_once_with( + self.context, fake_share_server['share_network_subnet_id']) def test_share_server_migration_get_destination(self): fake_source_server_id = 'fake_source_id' @@ -5208,6 +5224,74 @@ class ShareAPITestCase(test.TestCase): self.context, service['availability_zone']['name'] ) + def test_server_migration_check_nondisruptive_and_network_change(self): + fake_shares = [db_utils.create_share() for i in range(2)] + fake_types = [{'id': 'fake_type_id'}] + fake_share_server = db_utils.create_share_server() + dest_host = fake_share_server['host'] + service = { + 'availability_zone_id': 'fake_az_id', + 'availability_zone': {'name': 'fake_az_name'} + } + fake_share_network = db_utils.create_share_network() + network_has_changed = True + writable = preserve_snapshots = False + nondisruptive = True + expected_result = { + 'compatible': False, + 'writable': writable, + 'nondisruptive': False, + 'preserve_snapshots': preserve_snapshots, + 'share_network_id': fake_share_network['id'], + 'migration_cancel': False, + 'migration_get_progress': False + } + + mock_initial_checks = self.mock_object( + self.api, '_migration_initial_checks', + mock.Mock( + return_value=[fake_shares, fake_types, service, + fake_share_network['id'], network_has_changed])) + + check_result = self.api.share_server_migration_check( + self.context, fake_share_server, dest_host, writable, + nondisruptive, preserve_snapshots, fake_share_network + ) + + self.assertEqual(expected_result, check_result) + mock_initial_checks.assert_called_once_with( + self.context, fake_share_server, dest_host, fake_share_network) + + def test_server_migration_start_nondisruptive_and_network_change(self): + fake_shares = [db_utils.create_share() for i in range(2)] + fake_types = [{'id': 'fake_type_id'}] + fake_share_server = db_utils.create_share_server() + dest_host = fake_share_server['host'] + service = { + 'availability_zone_id': 'fake_az_id', + 'availability_zone': {'name': 'fake_az_name'} + } + fake_share_network = db_utils.create_share_network() + network_has_changed = True + writable = preserve_snapshots = False + nondisruptive = True + + mock_initial_checks = self.mock_object( + self.api, '_migration_initial_checks', + mock.Mock( + return_value=[fake_shares, fake_types, service, + fake_share_network['id'], network_has_changed])) + + self.assertRaises( + exception.InvalidInput, + self.api.share_server_migration_start, + self.context, fake_share_server, dest_host, writable, + nondisruptive, preserve_snapshots, fake_share_network + ) + + mock_initial_checks.assert_called_once_with( + self.context, fake_share_server, dest_host, fake_share_network) + def test_share_server_migration_check(self): type_data = { 'extra_specs': { @@ -5243,7 +5327,7 @@ class ShareAPITestCase(test.TestCase): mock_initial_checks = self.mock_object( self.api, '_migration_initial_checks', mock.Mock(return_value=[fake_shares, fake_types, service, - fake_share_network['id']])) + fake_share_network['id'], False])) # NOTE(carloss): Returning an "empty" dictionary should be enough for # this test case. The unit test to check the values being returned to # the user should be placed in the share manager, where the dict is @@ -5308,7 +5392,7 @@ class ShareAPITestCase(test.TestCase): mock_initial_checks = self.mock_object( self.api, '_migration_initial_checks', mock.Mock(return_value=[fake_shares, fake_types, service, - fake_share_network['id']])) + fake_share_network['id'], False])) mock_migration_start = self.mock_object( self.share_rpcapi, 'share_server_migration_start') mock_server_update = self.mock_object(db_api, 'share_server_update') diff --git a/manila/tests/share/test_driver.py b/manila/tests/share/test_driver.py index d9dcf35c28..88a7a71d55 100644 --- a/manila/tests/share/test_driver.py +++ b/manila/tests/share/test_driver.py @@ -623,7 +623,7 @@ class ShareDriverTestCase(test.TestCase): 'nondisruptive': False, 'preserve_snapshots': False, 'migration_cancel': False, - 'migration_get_progress': False + 'migration_get_progress': False, } driver_compatibility = ( diff --git a/manila/tests/share/test_manager.py b/manila/tests/share/test_manager.py index 135ef0b54a..54c8abfa1a 100644 --- a/manila/tests/share/test_manager.py +++ b/manila/tests/share/test_manager.py @@ -2900,7 +2900,8 @@ class ShareManagerTestCase(test.TestCase): } fake_metadata = { 'migration_destination': True, - 'request_host': fake_data['fake_dest_host'] + 'request_host': fake_data['fake_dest_host'], + 'source_share_server': fake_data['source_share_server'] } mock_subnet_get = self.mock_object( @@ -8167,7 +8168,8 @@ class ShareManagerTestCase(test.TestCase): def _setup_server_migration_start_mocks( self, fake_share_instances, fake_snap_instances, fake_old_network, fake_new_network, fake_service, fake_request_spec, - fake_driver_result, fake_new_share_server): + fake_driver_result, fake_new_share_server, server_info, + network_subnet): self.mock_object(db, 'share_instances_get_all_by_share_server', mock.Mock(return_value=fake_share_instances)) self.mock_object(db, 'share_snapshot_instance_get_all_with_filters', @@ -8191,14 +8193,50 @@ class ShareManagerTestCase(test.TestCase): mock.Mock(return_value=fake_new_share_server)) self.mock_object(self.share_manager, '_cast_access_rules_to_readonly_for_server') + self.mock_object(db, 'share_network_subnet_get', + mock.Mock(return_value=network_subnet)) + self.mock_object(db, 'share_server_get', + mock.Mock(return_value=fake_new_share_server)) + self.mock_object(self.share_manager.driver, 'allocate_network') + self.mock_object(self.share_manager.driver, 'allocate_admin_network') + self.mock_object(self.share_manager.driver, + 'deallocate_network') + self.mock_object(db, 'share_server_delete') self.mock_object(db, 'share_server_update') self.mock_object(self.share_manager.driver, - 'share_server_migration_start') + 'share_server_migration_start', + mock.Mock(return_value=server_info)) + self.mock_object(db, 'share_server_backend_details_set') + self.mock_object(self.share_manager, 'delete_share_server') + + @ddt.data((True, True), (False, True)) + @ddt.unpack + def test__share_server_migration_start_driver(self, writable, + nondisruptive): + old_subnet_id = 'fake_id' + new_subnet_kwargs = {} + create_new_allocations = False + if not nondisruptive: + new_subnet_kwargs.update({ + 'neutron_net_id': 'fake_nn_id', + 'neutron_subnet_id': 'fake_sn_id' + }) + create_new_allocations = True + + network_subnet = db_utils.create_share_network_subnet(id=old_subnet_id) + new_network_subnet = db_utils.create_share_network_subnet( + **new_subnet_kwargs) + fake_old_share_server = { + 'id': 'fake_server_id', + 'share_network_subnet': network_subnet, + 'host': 'host@backend' + } + fake_new_share_server = { + 'id': 'fake_server_id_2', + 'share_network_subnet': new_network_subnet, + 'host': 'host@backend' + } - @ddt.data(True, False) - def test__share_server_migration_start_driver(self, writable): - fake_old_share_server = db_utils.create_share_server() - fake_new_share_server = db_utils.create_share_server() fake_old_network = db_utils.create_share_network() fake_new_network = db_utils.create_share_network() fake_share_instances = [ @@ -8212,7 +8250,6 @@ class ShareManagerTestCase(test.TestCase): 'availability_zone': {'name': 'fake_az1'}} fake_request_spec = {} fake_dest_host = 'fakehost@fakebackend' - nondisruptive = False preserve_snapshots = True fake_driver_result = { 'compatible': True, @@ -8223,16 +8260,21 @@ class ShareManagerTestCase(test.TestCase): 'migration_cancel': False, 'migration_get_progress': False } + server_info = { + 'fake_server_info_key': 'fake_server_info_value', + 'backend_details': {'fake': 'fake'} + } + create_on_backend = not nondisruptive self._setup_server_migration_start_mocks( fake_share_instances, fake_snap_instances, fake_old_network, fake_new_network, fake_service, fake_request_spec, - fake_driver_result, fake_new_share_server) + fake_driver_result, fake_new_share_server, server_info, + network_subnet) result = self.share_manager._share_server_migration_start_driver( self.context, fake_old_share_server, fake_dest_host, writable, - nondisruptive, preserve_snapshots, fake_new_network['id'] - ) + nondisruptive, preserve_snapshots, fake_new_network['id']) self.assertTrue(result) db.share_instances_get_all_by_share_server.assert_called_once_with( @@ -8263,7 +8305,8 @@ class ShareManagerTestCase(test.TestCase): (self.share_manager._provide_share_server_for_migration. assert_called_once_with( self.context, fake_old_share_server, fake_new_network['id'], - fake_service['availability_zone_id'], fake_dest_host)) + fake_service['availability_zone_id'], fake_dest_host, + create_on_backend=create_on_backend)) db.share_server_update.assert_has_calls( self._get_share_server_start_update_calls( fake_old_share_server, fake_new_share_server)) @@ -8271,6 +8314,17 @@ class ShareManagerTestCase(test.TestCase): assert_called_once_with( self.context, fake_old_share_server, fake_new_share_server, fake_share_instances, fake_snap_instances)) + if not create_on_backend: + db.share_server_get.assert_called_once_with( + self.context, fake_new_share_server['id']) + if create_new_allocations: + db.share_network_subnet_get.assert_called_once_with( + self.context, fake_new_share_server['id']) + self.driver.allocate_network.assert_called_once_with( + self.context, fake_new_share_server, fake_new_network, + network_subnet) + self.driver.allocate_admin_network_assert_called_once_with( + self.context, fake_new_share_server) if not writable: (self.share_manager._cast_access_rules_to_readonly_for_server. assert_called_once_with( @@ -8279,6 +8333,10 @@ class ShareManagerTestCase(test.TestCase): else: (self.share_manager._cast_access_rules_to_readonly_for_server. assert_not_called()) + if server_info: + db.share_server_backend_details_set.assert_called_once_with( + self.context, fake_new_share_server['id'], + server_info.get('backend_details')) def test__share_server_migration_start_driver_exception(self): fake_old_share_server = db_utils.create_share_server() @@ -8309,11 +8367,17 @@ class ShareManagerTestCase(test.TestCase): 'migration_cancel': False, 'migration_get_progress': False } + server_info = { + 'fake_server_info_key': 'fake_server_info_value', + 'backend_details': {'fake': 'fake'} + } + network_subnet = db_utils.create_share_network_subnet() self._setup_server_migration_start_mocks( fake_share_instances, fake_snap_instances, fake_old_network, fake_new_network, fake_service, fake_request_spec, - fake_driver_result, fake_new_share_server) + fake_driver_result, fake_new_share_server, server_info, + network_subnet) mock__reset_read_only = self.mock_object( self.share_manager, '_reset_read_only_access_rules_for_server') @@ -8361,7 +8425,8 @@ class ShareManagerTestCase(test.TestCase): (self.share_manager._provide_share_server_for_migration. assert_called_once_with( self.context, fake_old_share_server, fake_new_network['id'], - fake_service['availability_zone_id'], fake_dest_host)) + fake_service['availability_zone_id'], fake_dest_host, + create_on_backend=True)) db.share_server_update.assert_has_calls( self._get_share_server_start_update_calls( fake_old_share_server, fake_new_share_server, @@ -8383,6 +8448,8 @@ class ShareManagerTestCase(test.TestCase): else: (self.share_manager._cast_access_rules_to_readonly_for_server. assert_not_called()) + self.share_manager.delete_share_server.assert_called_once_with( + self.context, fake_new_share_server) @ddt.data(None, exception.ShareServerMigrationFailed) def test_share_server_migration_check(self, check_action): @@ -8747,11 +8814,16 @@ class ShareManagerTestCase(test.TestCase): def _setup_server_migration_complete_mocks( self, fake_source_share_server, fake_dest_share_server, - fake_share_instances, fake_snapshot_instances): + fake_share_instances, fake_snapshot_instances, + additional_server_get_side_effect=None): + server_get_side_effects = [fake_dest_share_server, + fake_source_share_server] + if additional_server_get_side_effect: + server_get_side_effects.append(additional_server_get_side_effect) + self.mock_object( db, 'share_server_get', - mock.Mock(side_effect=[fake_dest_share_server, - fake_source_share_server])) + mock.Mock(side_effect=server_get_side_effects)) self.mock_object( db, 'share_instances_get_all_by_share_server', mock.Mock(return_value=fake_share_instances)) @@ -8762,7 +8834,9 @@ class ShareManagerTestCase(test.TestCase): self.share_manager, '_update_resource_status') self.mock_object(db, 'share_server_update') - def test_share_server_migration_complete_exception(self): + @ddt.data(True, False) + def test_share_server_migration_complete_exception( + self, server_already_dropped): fake_source_share_server = db_utils.create_share_server() fake_dest_share_server = db_utils.create_share_server() fake_share_instances = [db_utils.create_share()['instance']] @@ -8770,11 +8844,26 @@ class ShareManagerTestCase(test.TestCase): instance['id'] for instance in fake_share_instances] fake_snapshot_instances = [] fake_snapshot_instance_ids = [] + server_get_additional_se = ( + exception.ShareServerNotFound + if server_already_dropped else fake_dest_share_server) + server_update_calls = [ + mock.call(self.context, fake_source_share_server['id'], + {'task_state': constants.TASK_STATE_MIGRATION_ERROR, + 'status': constants.STATUS_ERROR}), + ] + if not server_already_dropped: + server_update_calls.append( + mock.call( + self.context, fake_dest_share_server['id'], + {'task_state': constants.TASK_STATE_MIGRATION_ERROR, + 'status': constants.STATUS_ERROR}) + ) self._setup_server_migration_complete_mocks( fake_source_share_server, fake_dest_share_server, - fake_share_instances, fake_snapshot_instances - ) + fake_share_instances, fake_snapshot_instances, + server_get_additional_se) mock__server_migration_complete = self.mock_object( self.share_manager, '_server_migration_complete_driver', mock.Mock(side_effect=Exception)) @@ -8801,24 +8890,30 @@ class ShareManagerTestCase(test.TestCase): self.context, constants.STATUS_ERROR, share_instance_ids=fake_share_instance_ids, snapshot_instance_ids=fake_snapshot_instance_ids) - db.share_server_update.assert_has_calls([ - mock.call(self.context, fake_source_share_server['id'], - {'task_state': constants.TASK_STATE_MIGRATION_ERROR, - 'status': constants.STATUS_ERROR}), - mock.call( - self.context, fake_dest_share_server['id'], - {'task_state': constants.TASK_STATE_MIGRATION_ERROR, - 'status': constants.STATUS_ERROR})]) + db.share_server_update.assert_has_calls(server_update_calls) - def test_share_server_migration_complete(self): - fake_source_share_server = db_utils.create_share_server() - fake_dest_share_server = db_utils.create_share_server() + @ddt.data(('fake_src_identifier', 'fake_dest_identifier'), + ('fake_src_identifier', None)) + @ddt.unpack + def test_share_server_migration_complete( + self, src_identifier, dest_identifier): + fake_source_share_server = db_utils.create_share_server( + identifier=src_identifier) + fake_dest_share_server = db_utils.create_share_server( + identifier=dest_identifier) fake_share_instances = [db_utils.create_share()['instance']] fake_share_instance_ids = [ instance['id'] for instance in fake_share_instances] fake_snapshot_instances = [] fake_snapshot_instance_ids = [] - + expected_identifier = ( + dest_identifier if dest_identifier else src_identifier) + expected_server_update = { + 'task_state': constants.TASK_STATE_MIGRATION_SUCCESS, + 'status': constants.STATUS_ACTIVE, + } + if not dest_identifier: + expected_server_update['identifier'] = expected_identifier self._setup_server_migration_complete_mocks( fake_source_share_server, fake_dest_share_server, fake_share_instances, fake_snapshot_instances @@ -8847,18 +8942,38 @@ class ShareManagerTestCase(test.TestCase): snapshot_instance_ids=fake_snapshot_instance_ids) db.share_server_update.assert_called_once_with( self.context, fake_dest_share_server['id'], - {'task_state': constants.TASK_STATE_MIGRATION_SUCCESS, - 'status': constants.STATUS_ACTIVE}) + expected_server_update) @ddt.data( - {'unmanage_source_server': False, - 'snapshot_updates': {}, - 'share_updates': {}}, - {'unmanage_source_server': True, - 'snapshot_updates': {}, - 'share_updates': {}}, + {'model_update': { + 'unmanage_source_server': False, + 'snapshot_updates': {}, + 'share_updates': {}}, + 'need_network_allocation': False, + 'can_reuse_server': False}, + {'model_update': { + 'unmanage_source_server': True, + 'snapshot_updates': {}, + 'share_updates': {}}, + 'need_network_allocation': False, + 'can_reuse_server': True}, + {'model_update': { + 'unmanage_source_server': False, + 'snapshot_updates': {}, + 'share_updates': {}}, + 'need_network_allocation': True, + 'can_reuse_server': False}, + {'model_update': { + 'unmanage_source_server': True, + 'snapshot_updates': {}, + 'share_updates': {}}, + 'need_network_allocation': True, + 'can_reuse_server': True} ) - def test__server_migration_complete_driver(self, model_update): + @ddt.unpack + def test__server_migration_complete_driver(self, model_update, + need_network_allocation, + can_reuse_server): fake_share_network = db_utils.create_share_network() fake_share_network_subnet = db_utils.create_share_network_subnet( share_network_id=fake_share_network['id']) @@ -8872,7 +8987,10 @@ class ShareManagerTestCase(test.TestCase): fake_share_instances = [fake_share['instance']] fake_snapshot_instances = [fake_snapshot['instance']] fake_share_instance_id = fake_share['instance']['id'] - fake_allocation_data = {} + fake_allocation_data = { + 'network_allocations': [{'id': 'fake_id'}], + 'admin_network_allocations': [{'id': 'fake_admin_id'}], + } model_update['share_updates'][fake_share['instance']['id']] = { 'export_locations': { "path": "10.10.10.31:/fake_mount_point", @@ -8896,19 +9014,41 @@ class ShareManagerTestCase(test.TestCase): 'share_network_id': fake_share_network['id'], 'availability_zone_id': fake_service['availability_zone_id'], } + backend_details = fake_source_share_server.get("backend_details") + mock_backend_details_set_calls = [] + if backend_details: + for k, v in backend_details.items(): + mock_backend_details_set_calls.append( + mock.call( + self.context, fake_dest_share_server['id'], + {k: v}) + ) + + dest_network_allocations = [] + if need_network_allocation: + dest_network_allocations.append({'id': 'fake_allocation'}) mock_server_update = self.mock_object(db, 'share_server_update') mock_network_get = self.mock_object( - db, 'share_network_get', mock.Mock(return_vale=fake_share_network)) + db, 'share_network_get', + mock.Mock(return_value=fake_share_network)) + mock_allocations_get = self.mock_object( + db, 'network_allocations_get_for_share_server', + mock.Mock(return_value=dest_network_allocations) + ) mock_subnet_get = self.mock_object( db, 'share_network_subnet_get', mock.Mock(return_value=fake_share_network_subnet)) - self.mock_object( + mock_form_server_setup_info = self.mock_object( self.share_manager, '_form_server_setup_info', mock.Mock(return_value=fake_allocation_data)) mock_server_migration_complete = self.mock_object( self.share_manager.driver, 'share_server_migration_complete', mock.Mock(return_value=model_update)) + mock_network_allocation_update = self.mock_object( + db, 'network_allocation_update') + mock_share_server_backend_details_set = self.mock_object( + db, 'share_server_backend_details_set') mock_service_get_by_args = self.mock_object( db, 'service_get_by_args', mock.Mock(return_value=fake_service)) mock_instance_update = self.mock_object(db, 'share_instance_update') @@ -8919,8 +9059,9 @@ class ShareManagerTestCase(test.TestCase): self.share_manager, '_reset_read_only_access_rules_for_server') mock_unmanage_server = self.mock_object( rpcapi.ShareAPI, 'unmanage_share_server') - mock_check_delete_server = self.mock_object( - self.share_manager, '_check_delete_share_server') + mock_delete_server = self.mock_object(db, 'share_server_delete') + mock_deallocate_network = self.mock_object( + self.share_manager.driver, 'deallocate_network') self.share_manager._server_migration_complete_driver( self.context, fake_source_share_server, fake_share_instances, @@ -8941,6 +9082,29 @@ class ShareManagerTestCase(test.TestCase): self.context, fake_share_network['id']) mock_subnet_get.assert_called_once_with( self.context, fake_share_network_subnet['id']) + mock_allocations_get.assert_called_once_with( + self.context, fake_dest_share_server['id']) + + if not need_network_allocation: + mock_form_server_setup_info.assert_called_once_with( + self.context, fake_source_share_server, fake_share_network, + fake_share_network_subnet) + elif need_network_allocation: + mock_share_server_backend_details_set.assert_has_calls( + mock_backend_details_set_calls) + mock_form_server_setup_info.assert_called_once_with( + self.context, fake_dest_share_server, fake_share_network, + fake_share_network_subnet) + mock_network_allocation_update.assert_has_calls( + [mock.call( + self.context, + fake_allocation_data['network_allocations'][0]['id'], + {'share_server_id': fake_dest_share_server['id']}), + mock.call( + self.context, + fake_allocation_data['admin_network_allocations'][0]['id'], + {'share_server_id': fake_dest_share_server['id']})]) + mock_server_migration_complete.assert_called_once_with( self.context, fake_source_share_server, fake_dest_share_server, fake_share_instances, fake_snapshot_instances, fake_allocation_data @@ -8963,10 +9127,10 @@ class ShareManagerTestCase(test.TestCase): mock_unmanage_server.assert_called_once_with( self.context, fake_source_share_server) else: - mock_check_delete_server.assert_called_once_with( - self.context, share_server=fake_source_share_server, - remote_host=True - ) + mock_deallocate_network.assert_called_once_with( + self.context, fake_source_share_server['id']) + mock_delete_server.assert_called_once_with( + self.context, fake_source_share_server['id']) @ddt.data(constants.TASK_STATE_MIGRATION_SUCCESS, constants.TASK_STATE_MIGRATION_IN_PROGRESS) diff --git a/releasenotes/notes/add-share-server-migration-enhancements-bbbc98a7fb419700.yaml b/releasenotes/notes/add-share-server-migration-enhancements-bbbc98a7fb419700.yaml new file mode 100644 index 0000000000..30a83a2a4d --- /dev/null +++ b/releasenotes/notes/add-share-server-migration-enhancements-bbbc98a7fb419700.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Added share server migration enhancements. Share back ends that support + non-disruptive migration are able to do so, in case no network changes + were identified and if the back end driver supports reusing ip addresses.