From ecda09a14d0b31169b275ed67a9088a131fbef12 Mon Sep 17 00:00:00 2001 From: melakualehegn Date: Sat, 28 Oct 2023 02:09:22 +0300 Subject: [PATCH] Change misleading user message when user services are down enhance the user message when all of the share manager services are down or are still initializing removed a duplicate test: test_create_share_non_admin Closes-Bug: #1886690 Change-Id: I168564a5b054d17762ad668ebbe4f5e7b562197b --- manila/message/message_field.py | 7 +++ manila/scheduler/drivers/filter.py | 14 +++++ manila/tests/scheduler/drivers/test_filter.py | 60 +++++++++---------- ...ervices-down-message-c857de1a678b6781.yaml | 6 ++ 4 files changed, 54 insertions(+), 33 deletions(-) create mode 100644 releasenotes/notes/bug-1886690-edit-services-down-message-c857de1a678b6781.yaml diff --git a/manila/message/message_field.py b/manila/message/message_field.py index e238df18c4..5faf7c168b 100644 --- a/manila/message/message_field.py +++ b/manila/message/message_field.py @@ -148,6 +148,12 @@ class Detail(object): "increase the network port quotas or free up some ports and retry. " "If this doesn't work, contact your administrator to troubleshoot " "issues with your network.")) + SHARE_BACKEND_NOT_READY_YET = ( + '028', + _("No storage could be allocated for this share " + "request. Share back end services are not " + "ready yet. Contact your administrator in case " + "retrying does not help.")) ALL = ( UNKNOWN_ERROR, @@ -177,6 +183,7 @@ class Detail(object): MISSING_SECURITY_SERVICE, DRIVER_FAILED_TRANSFER_ACCEPT, SHARE_NETWORK_PORT_QUOTA_LIMIT_EXCEEDED, + SHARE_BACKEND_NOT_READY_YET, ) # Exception and detail mappings diff --git a/manila/scheduler/drivers/filter.py b/manila/scheduler/drivers/filter.py index f27e82ab81..e12f49a160 100644 --- a/manila/scheduler/drivers/filter.py +++ b/manila/scheduler/drivers/filter.py @@ -241,6 +241,20 @@ class FilterScheduler(base.Scheduler): # Note: remember, we are using an iterator here. So only # traverse this list once. hosts = self.host_manager.get_all_host_states_share(elevated) + if not hosts: + msg = _("No storage could be allocated for this share " + "request. Share back end services are not " + "ready yet. Contact your administrator in case " + "retrying does not help.") + LOG.error(msg) + self.message_api.create( + context, + message_field.Action.CREATE, + context.project_id, + resource_type=message_field.Resource.SHARE, + resource_id=request_spec.get('share_id', None), + detail=message_field.Detail.SHARE_BACKEND_NOT_READY_YET) + raise exception.WillNotSchedule(msg) # Filter local hosts based on requirements ... hosts, last_filter = self.host_manager.get_filtered_hosts( diff --git a/manila/tests/scheduler/drivers/test_filter.py b/manila/tests/scheduler/drivers/test_filter.py index 913cbc1e2b..d5e89c55e7 100644 --- a/manila/tests/scheduler/drivers/test_filter.py +++ b/manila/tests/scheduler/drivers/test_filter.py @@ -145,43 +145,28 @@ class FilterSchedulerTestCase(test_base.SchedulerTestCase): self.assertDictEqual(fake_type, retval['resource_type']) def test_create_share_no_hosts(self): - # Ensure empty hosts/child_zones result in NoValidHosts exception. + # Ensure empty hosts/child_zones result in WillNotSchedule exception. sched = fakes.FakeFilterScheduler() fake_context = context.RequestContext('user', 'project') + create_mock_message = self.mock_object(sched.message_api, 'create') request_spec = { 'share_properties': {'project_id': 1, 'size': 1}, 'share_instance_properties': {}, 'share_type': {'name': 'NFS'}, 'share_id': 'fake-id1', } - self.assertRaises(exception.NoValidHost, sched.schedule_create_share, - fake_context, request_spec, {}) - - @mock.patch('manila.scheduler.host_manager.HostManager.' - 'get_all_host_states_share') - def test_create_share_non_admin(self, _mock_get_all_host_states): - # Test creating a volume locally using create_volume, passing - # a non-admin context. DB actions should work. - self.was_admin = False - - def fake_get(context, *args, **kwargs): - # Make sure this is called with admin context, even though - # we're using user context below. - self.was_admin = context.is_admin - return {} - - sched = fakes.FakeFilterScheduler() - _mock_get_all_host_states.side_effect = fake_get - fake_context = context.RequestContext('user', 'project') - request_spec = { - 'share_properties': {'project_id': 1, 'size': 1}, - 'share_instance_properties': {}, - 'share_type': {'name': 'NFS'}, - 'share_id': 'fake-id1', - } - self.assertRaises(exception.NoValidHost, sched.schedule_create_share, - fake_context, request_spec, {}) - self.assertTrue(self.was_admin) + self.assertRaises(exception.WillNotSchedule, + sched.schedule_create_share, + fake_context, + request_spec, + {}) + create_mock_message.assert_called_once_with( + fake_context, + message_field.Action.CREATE, + fake_context.project_id, + resource_type=message_field.Resource.SHARE, + resource_id=request_spec.get('share_id', None), + detail=message_field.Detail.SHARE_BACKEND_NOT_READY_YET) @ddt.data( {'name': 'foo'}, @@ -420,10 +405,13 @@ class FilterSchedulerTestCase(test_base.SchedulerTestCase): self.assertRaises(exception.InvalidParameterValue, fakes.FakeFilterScheduler) - def test_retry_disabled(self): + @mock.patch('manila.scheduler.host_manager.HostManager.' + 'get_all_host_states_share') + def test_retry_disabled(self, _mock_get_all_host_states): # Retry info should not get populated when re-scheduling is off. self.flags(scheduler_max_attempts=1) sched = fakes.FakeFilterScheduler() + sched.host_manager = fakes.FakeHostManager() request_spec = { 'share_type': {'name': 'iSCSI'}, 'share_properties': {'project_id': 1, 'size': 1}, @@ -436,10 +424,13 @@ class FilterSchedulerTestCase(test_base.SchedulerTestCase): # Should not have retry info in the populated filter properties. self.assertNotIn("retry", filter_properties) - def test_retry_attempt_one(self): + @mock.patch('manila.scheduler.host_manager.HostManager.' + 'get_all_host_states_share') + def test_retry_attempt_one(self, _mock_get_all_host_states): # Test retry logic on initial scheduling attempt. self.flags(scheduler_max_attempts=2) sched = fakes.FakeFilterScheduler() + sched.host_manager = fakes.FakeHostManager() request_spec = { 'share_type': {'name': 'iSCSI'}, 'share_properties': {'project_id': 1, 'size': 1}, @@ -452,10 +443,13 @@ class FilterSchedulerTestCase(test_base.SchedulerTestCase): num_attempts = filter_properties['retry']['num_attempts'] self.assertEqual(1, num_attempts) - def test_retry_attempt_two(self): + @mock.patch('manila.scheduler.host_manager.HostManager.' + 'get_all_host_states_share') + def test_retry_attempt_two(self, _mock_get_all_host_states): # Test retry logic when re-scheduling. self.flags(scheduler_max_attempts=2) sched = fakes.FakeFilterScheduler() + sched.host_manager = fakes.FakeHostManager() request_spec = { 'share_type': {'name': 'iSCSI'}, 'share_properties': {'project_id': 1, 'size': 1}, @@ -643,7 +637,7 @@ class FilterSchedulerTestCase(test_base.SchedulerTestCase): self.mock_object(sched.host_manager, 'get_filtered_hosts', mock.Mock(return_value=(None, 'filter'))) - self.assertRaises(exception.NoValidHost, + self.assertRaises(exception.WillNotSchedule, sched.schedule_create_replica, self.context, request_spec, {}) diff --git a/releasenotes/notes/bug-1886690-edit-services-down-message-c857de1a678b6781.yaml b/releasenotes/notes/bug-1886690-edit-services-down-message-c857de1a678b6781.yaml new file mode 100644 index 0000000000..b78aaaca38 --- /dev/null +++ b/releasenotes/notes/bug-1886690-edit-services-down-message-c857de1a678b6781.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fixed `bug #1886690 `_ + that was a misleading user message when share services are down. The + message is now clear and descriptive. \ No newline at end of file