Use long_rpc_timeout in select_destinations RPC call
Conductor RPC calls the scheduler to get hosts during
server create, which in a multi-create request with a
lot of servers and the default rpc_response_timeout, can
trigger a MessagingTimeout. Due to the old
retry_select_destinations decorator, conductor will retry
the select_destinations RPC call up to max_attempts times,
so thrice by default. This can clobber the scheduler and
placement while the initial scheduler worker is still
trying to process the beefy request and allocate resources
in placement.
This has been recreated in a devstack test patch [1] and
shown to fail with 1000 instances in a single request with
the default rpc_response_timeout of 60 seconds. Changing the
rpc_response_timeout to 300 avoids the MessagingTimeout and
retry loop.
Since Rocky we have the long_rpc_timeout config option which
defaults to 1800 seconds. The RPC client can thus be changed
to heartbeat the scheduler service during the RPC call every
$rpc_response_timeout seconds with a hard timeout of
$long_rpc_timeout. That change is made here.
As a result, the problematic retry_select_destinations
decorator is also no longer necessary and removed here. That
decorator was added in I2b891bf6d0a3d8f45fd98ca54a665ae78eab78b3
and was a hack for scheduler high availability where a
MessagingTimeout was assumed to be a result of the scheduler
service dying so retrying the request was reasonable to hit
another scheduler worker, but is clearly not sufficient
in the large multi-create case, and long_rpc_timeout is a
better fit for that HA type scenario to heartbeat the scheduler
service.
[1] https://review.openstack.org/507918/
Conflicts:
nova/scheduler/client/__init__.py
NOTE(mriedem): The conflict is due to not having change
I1f97d00fb7633f173370ed6787c9a71ecd8106d5 in Rocky.
Change-Id: I87d89967bbc5fbf59cf44d9a63eb6e9d477ac1f3
Closes-Bug: #1795992
(cherry picked from commit 5af632e9ca
)
This commit is contained in:
parent
5d9dd1f63f
commit
4b5a21b4eb
@ -27,6 +27,7 @@ instead of the global rpc_response_timeout value.
|
|||||||
Operations with RPC calls that utilize this value:
|
Operations with RPC calls that utilize this value:
|
||||||
|
|
||||||
* live migration
|
* live migration
|
||||||
|
* scheduling
|
||||||
|
|
||||||
Related options:
|
Related options:
|
||||||
|
|
||||||
|
@ -17,8 +17,6 @@ import functools
|
|||||||
|
|
||||||
from oslo_utils import importutils
|
from oslo_utils import importutils
|
||||||
|
|
||||||
from nova.scheduler import utils
|
|
||||||
|
|
||||||
|
|
||||||
class LazyLoader(object):
|
class LazyLoader(object):
|
||||||
|
|
||||||
@ -46,7 +44,6 @@ class SchedulerClient(object):
|
|||||||
self.reportclient = LazyLoader(importutils.import_class(
|
self.reportclient = LazyLoader(importutils.import_class(
|
||||||
'nova.scheduler.client.report.SchedulerReportClient'))
|
'nova.scheduler.client.report.SchedulerReportClient'))
|
||||||
|
|
||||||
@utils.retry_select_destinations
|
|
||||||
def select_destinations(self, context, spec_obj, instance_uuids,
|
def select_destinations(self, context, spec_obj, instance_uuids,
|
||||||
return_objects=False, return_alternates=False):
|
return_objects=False, return_alternates=False):
|
||||||
return self.queryclient.select_destinations(context, spec_obj,
|
return self.queryclient.select_destinations(context, spec_obj,
|
||||||
|
@ -154,7 +154,9 @@ class SchedulerAPI(object):
|
|||||||
msg_args['filter_properties'
|
msg_args['filter_properties'
|
||||||
] = spec_obj.to_legacy_filter_properties_dict()
|
] = spec_obj.to_legacy_filter_properties_dict()
|
||||||
version = '4.0'
|
version = '4.0'
|
||||||
cctxt = self.client.prepare(version=version)
|
cctxt = self.client.prepare(
|
||||||
|
version=version, call_monitor_timeout=CONF.rpc_response_timeout,
|
||||||
|
timeout=CONF.long_rpc_timeout)
|
||||||
return cctxt.call(ctxt, 'select_destinations', **msg_args)
|
return cctxt.call(ctxt, 'select_destinations', **msg_args)
|
||||||
|
|
||||||
def update_aggregates(self, ctxt, aggregates):
|
def update_aggregates(self, ctxt, aggregates):
|
||||||
|
@ -15,12 +15,10 @@
|
|||||||
"""Utility methods for scheduling."""
|
"""Utility methods for scheduling."""
|
||||||
|
|
||||||
import collections
|
import collections
|
||||||
import functools
|
|
||||||
import re
|
import re
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
import oslo_messaging as messaging
|
|
||||||
from oslo_serialization import jsonutils
|
from oslo_serialization import jsonutils
|
||||||
from six.moves.urllib import parse
|
from six.moves.urllib import parse
|
||||||
|
|
||||||
@ -890,37 +888,6 @@ def setup_instance_group(context, request_spec):
|
|||||||
request_spec.instance_group.members = group_info.members
|
request_spec.instance_group.members = group_info.members
|
||||||
|
|
||||||
|
|
||||||
def retry_on_timeout(retries=1):
|
|
||||||
"""Retry the call in case a MessagingTimeout is raised.
|
|
||||||
|
|
||||||
A decorator for retrying calls when a service dies mid-request.
|
|
||||||
|
|
||||||
:param retries: Number of retries
|
|
||||||
:returns: Decorator
|
|
||||||
"""
|
|
||||||
def outer(func):
|
|
||||||
@functools.wraps(func)
|
|
||||||
def wrapped(*args, **kwargs):
|
|
||||||
attempt = 0
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
return func(*args, **kwargs)
|
|
||||||
except messaging.MessagingTimeout:
|
|
||||||
attempt += 1
|
|
||||||
if attempt <= retries:
|
|
||||||
LOG.warning(
|
|
||||||
"Retrying %(name)s after a MessagingTimeout, "
|
|
||||||
"attempt %(attempt)s of %(retries)s.",
|
|
||||||
{'attempt': attempt, 'retries': retries,
|
|
||||||
'name': func.__name__})
|
|
||||||
else:
|
|
||||||
raise
|
|
||||||
return wrapped
|
|
||||||
return outer
|
|
||||||
|
|
||||||
retry_select_destinations = retry_on_timeout(CONF.scheduler.max_attempts - 1)
|
|
||||||
|
|
||||||
|
|
||||||
def request_is_rebuild(spec_obj):
|
def request_is_rebuild(spec_obj):
|
||||||
"""Returns True if request is for a rebuild.
|
"""Returns True if request is for a rebuild.
|
||||||
|
|
||||||
|
@ -60,19 +60,7 @@ class SchedulerClientTestCase(test.NoDBTestCase):
|
|||||||
False]
|
False]
|
||||||
self.assertRaises(messaging.MessagingTimeout,
|
self.assertRaises(messaging.MessagingTimeout,
|
||||||
self.client.select_destinations, *fake_args)
|
self.client.select_destinations, *fake_args)
|
||||||
mock_select_destinations.assert_has_calls([mock.call(*fake_args)] * 2)
|
mock_select_destinations.assert_called_once_with(*fake_args)
|
||||||
|
|
||||||
@mock.patch.object(scheduler_query_client.SchedulerQueryClient,
|
|
||||||
'select_destinations', side_effect=[
|
|
||||||
messaging.MessagingTimeout(), mock.DEFAULT])
|
|
||||||
def test_select_destinations_timeout_once(self, mock_select_destinations):
|
|
||||||
# scenario: the scheduler service times out & recovers after failure
|
|
||||||
fake_spec = objects.RequestSpec()
|
|
||||||
fake_spec.instance_uuid = uuids.instance
|
|
||||||
fake_args = ['ctxt', fake_spec, [fake_spec.instance_uuid], False,
|
|
||||||
False]
|
|
||||||
self.client.select_destinations(*fake_args)
|
|
||||||
mock_select_destinations.assert_has_calls([mock.call(*fake_args)] * 2)
|
|
||||||
|
|
||||||
@mock.patch.object(scheduler_query_client.SchedulerQueryClient,
|
@mock.patch.object(scheduler_query_client.SchedulerQueryClient,
|
||||||
'update_aggregates')
|
'update_aggregates')
|
||||||
|
@ -18,6 +18,7 @@ Unit Tests for nova.scheduler.rpcapi
|
|||||||
|
|
||||||
import mock
|
import mock
|
||||||
|
|
||||||
|
from nova import conf
|
||||||
from nova import context
|
from nova import context
|
||||||
from nova import exception as exc
|
from nova import exception as exc
|
||||||
from nova import objects
|
from nova import objects
|
||||||
@ -25,6 +26,8 @@ from nova.scheduler import rpcapi as scheduler_rpcapi
|
|||||||
from nova import test
|
from nova import test
|
||||||
from nova.tests import uuidsentinel as uuids
|
from nova.tests import uuidsentinel as uuids
|
||||||
|
|
||||||
|
CONF = conf.CONF
|
||||||
|
|
||||||
|
|
||||||
class SchedulerRpcAPITestCase(test.NoDBTestCase):
|
class SchedulerRpcAPITestCase(test.NoDBTestCase):
|
||||||
def _test_scheduler_api(self, method, rpc_method, expected_args=None,
|
def _test_scheduler_api(self, method, rpc_method, expected_args=None,
|
||||||
@ -45,6 +48,11 @@ class SchedulerRpcAPITestCase(test.NoDBTestCase):
|
|||||||
expected_kwargs = expected_args
|
expected_kwargs = expected_args
|
||||||
|
|
||||||
prepare_kwargs = {}
|
prepare_kwargs = {}
|
||||||
|
if method == 'select_destinations':
|
||||||
|
prepare_kwargs.update({
|
||||||
|
'call_monitor_timeout': CONF.rpc_response_timeout,
|
||||||
|
'timeout': CONF.long_rpc_timeout
|
||||||
|
})
|
||||||
if expected_fanout:
|
if expected_fanout:
|
||||||
prepare_kwargs['fanout'] = True
|
prepare_kwargs['fanout'] = True
|
||||||
if expected_version:
|
if expected_version:
|
||||||
|
@ -0,0 +1,7 @@
|
|||||||
|
---
|
||||||
|
fixes:
|
||||||
|
- |
|
||||||
|
The ``long_rpc_timeout`` configuration option is now used for the RPC
|
||||||
|
call to the scheduler to select a host. This is in order to avoid a
|
||||||
|
timeout when scheduling multiple servers in a single request and/or when
|
||||||
|
the scheduler needs to process a large number of hosts.
|
Loading…
Reference in New Issue
Block a user