Handle the node being locked
If the node is locked, a lookup cannot be performed when an agent token needs to be generated, which tends to error like this: ironic_python_agent.ironic_api_client [-] Failed looking up node with addresses '00:6f:bb:34:b3:4d,00:6f:bb:34:b3:4b' at https://172.22.0.2:6385. Error 409: Node c25e451b-d2fb-4168-b690-f15bc8365520 is locked by host 172.22.0.2, please retry after the current operation is completed.. Check if inspection has completed. Problem is, if we keep pounding on the door, we can actually worsen the situation, and previously we would just just let tenacity retry. We will now hold for 30 seconds before proceeding, so we have hopefully allowed the operation to complete. Also fixes the error logging to help human's sanity. Change-Id: I97d3e27e2adb731794a7746737d3788c6e7977a0
This commit is contained in:
parent
b68a4c8a92
commit
5ed520df89
@ -13,6 +13,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
import json
|
||||
import time
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
@ -45,6 +46,7 @@ class APIClient(object):
|
||||
heartbeat_api = '/%s/heartbeat/{uuid}' % api_version
|
||||
_ironic_api_version = None
|
||||
agent_token = None
|
||||
lookup_lock_pause = 0
|
||||
|
||||
def __init__(self, api_url):
|
||||
self.api_url = api_url.rstrip('/')
|
||||
@ -177,7 +179,7 @@ class APIClient(object):
|
||||
raise errors.HeartbeatError(error)
|
||||
|
||||
def lookup_node(self, hardware_info, timeout, starting_interval,
|
||||
node_uuid=None, max_interval=30):
|
||||
node_uuid=None, max_interval=60):
|
||||
retry = tenacity.retry(
|
||||
retry=tenacity.retry_if_result(lambda r: r is False),
|
||||
stop=tenacity.stop_after_delay(timeout),
|
||||
@ -245,10 +247,31 @@ class APIClient(object):
|
||||
LOG.error(exc_msg)
|
||||
raise errors.LookupNodeError(msg)
|
||||
|
||||
if response.status_code == requests.codes.CONFLICT:
|
||||
if self.lookup_lock_pause == 0:
|
||||
self.lookup_lock_pause = 5
|
||||
elif self.lookup_lock_pause == 5:
|
||||
self.lookup_lock_pause = 10
|
||||
elif self.lookup_lock_pause == 10:
|
||||
# If we're reaching this point, we've got a long held
|
||||
# persistent lock, which means things can go very sideways
|
||||
# or the ironic deployment is downright grumpy. Either way,
|
||||
# we need to slow things down.
|
||||
self.lookup_lock_pause = 30
|
||||
LOG.warning(
|
||||
'Ironic has responded with a conflict, signaling the '
|
||||
'node is locked. We will wait %(time)s seconds before trying '
|
||||
'again. %(err)s',
|
||||
{'time': self.lookup_lock_pause,
|
||||
'error': self._error_from_response(response)}
|
||||
)
|
||||
time.sleep(self.lookup_lock_pause)
|
||||
return False
|
||||
|
||||
if response.status_code != requests.codes.OK:
|
||||
LOG.warning(
|
||||
'Failed looking up node with addresses %r at %s. '
|
||||
'%s. Check if inspection has completed.',
|
||||
'Check if inspection has completed? %s',
|
||||
params['addresses'], self.api_url,
|
||||
self._error_from_response(response)
|
||||
)
|
||||
|
@ -13,6 +13,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
import json
|
||||
import time
|
||||
from unittest import mock
|
||||
|
||||
from oslo_config import cfg
|
||||
@ -461,6 +462,32 @@ class TestBaseIronicPythonAgent(base.IronicAgentTest):
|
||||
mock_log.error.assert_has_calls([])
|
||||
self.assertEqual(1, mock_log.warning.call_count)
|
||||
|
||||
@mock.patch.object(time, 'sleep', autospec=True)
|
||||
@mock.patch.object(ironic_api_client, 'LOG', autospec=True)
|
||||
def test_do_lookup_node_locked(self, mock_log, mock_sleep):
|
||||
response = FakeResponse(status_code=409, content={})
|
||||
self.api_client.session.request = mock.Mock()
|
||||
self.api_client.session.request.return_value = response
|
||||
self.assertEqual(0, self.api_client.lookup_lock_pause)
|
||||
error = self.api_client._do_lookup(self.hardware_info,
|
||||
node_uuid=None)
|
||||
self.assertFalse(error)
|
||||
mock_log.error.assert_has_calls([])
|
||||
self.assertEqual(1, mock_log.warning.call_count)
|
||||
self.assertEqual(1, mock_sleep.call_count)
|
||||
self.assertEqual(5, self.api_client.lookup_lock_pause)
|
||||
error = self.api_client._do_lookup(self.hardware_info,
|
||||
node_uuid=None)
|
||||
self.assertEqual(10, self.api_client.lookup_lock_pause)
|
||||
error = self.api_client._do_lookup(self.hardware_info,
|
||||
node_uuid=None)
|
||||
self.assertFalse(error)
|
||||
self.assertEqual(30, self.api_client.lookup_lock_pause)
|
||||
error = self.api_client._do_lookup(self.hardware_info,
|
||||
node_uuid=None)
|
||||
self.assertFalse(error)
|
||||
self.assertEqual(30, self.api_client.lookup_lock_pause)
|
||||
|
||||
@mock.patch.object(ironic_api_client, 'LOG', autospec=True)
|
||||
def test_do_lookup_unknown_exception(self, mock_log):
|
||||
self.api_client.session.request = mock.Mock()
|
||||
|
@ -0,0 +1,13 @@
|
||||
---
|
||||
fixes:
|
||||
- |
|
||||
Fixes, or at least lessens the case where a running Ironic agent can stack
|
||||
up numerous lookup requests against an Ironic deployment when a node is
|
||||
locked. In particular, this is beause the lookup also drives generation of
|
||||
the agent token, which requires the conductor to allocate a worker, and
|
||||
generate the token, and return the result to the API client.
|
||||
Ironic's retry logic will now wait up to ``60`` seconds, and if an HTTP
|
||||
Conflict (409) message is received, the agent will automatically pause
|
||||
lookup operations for thirty seconds as opposed continue to attempt
|
||||
lookups which could create more work for the Ironic deployment
|
||||
needlessly.
|
Loading…
Reference in New Issue
Block a user