Merge "Allow additional exceptions in wrap_db_retry"
This commit is contained in:
commit
eeacae192e
@ -24,10 +24,10 @@ API methods.
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
import sys
|
|
||||||
import threading
|
import threading
|
||||||
import time
|
import time
|
||||||
|
|
||||||
|
from oslo_utils import excutils
|
||||||
from oslo_utils import importutils
|
from oslo_utils import importutils
|
||||||
import six
|
import six
|
||||||
|
|
||||||
@ -100,14 +100,20 @@ class wrap_db_retry(object):
|
|||||||
|
|
||||||
:param max_retry_interval: max interval value between retries
|
:param max_retry_interval: max interval value between retries
|
||||||
:type max_retry_interval: int
|
:type max_retry_interval: int
|
||||||
|
|
||||||
|
:param exception_checker: checks if an exception should trigger a retry
|
||||||
|
:type exception_checker: callable
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, retry_interval=0, max_retries=0, inc_retry_interval=0,
|
def __init__(self, retry_interval=0, max_retries=0, inc_retry_interval=0,
|
||||||
max_retry_interval=0, retry_on_disconnect=False,
|
max_retry_interval=0, retry_on_disconnect=False,
|
||||||
retry_on_deadlock=False, retry_on_request=False):
|
retry_on_deadlock=False, retry_on_request=False,
|
||||||
|
exception_checker=lambda exc: False):
|
||||||
super(wrap_db_retry, self).__init__()
|
super(wrap_db_retry, self).__init__()
|
||||||
|
|
||||||
self.db_error = ()
|
self.db_error = ()
|
||||||
|
# default is that we re-raise anything unexpected
|
||||||
|
self.exception_checker = exception_checker
|
||||||
if retry_on_disconnect:
|
if retry_on_disconnect:
|
||||||
self.db_error += (exception.DBConnectionError, )
|
self.db_error += (exception.DBConnectionError, )
|
||||||
if retry_on_deadlock:
|
if retry_on_deadlock:
|
||||||
@ -124,26 +130,20 @@ class wrap_db_retry(object):
|
|||||||
def wrapper(*args, **kwargs):
|
def wrapper(*args, **kwargs):
|
||||||
next_interval = self.retry_interval
|
next_interval = self.retry_interval
|
||||||
remaining = self.max_retries
|
remaining = self.max_retries
|
||||||
db_error = self.db_error
|
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
return f(*args, **kwargs)
|
return f(*args, **kwargs)
|
||||||
except db_error as e:
|
except Exception as e:
|
||||||
if remaining == 0:
|
with excutils.save_and_reraise_exception() as ectxt:
|
||||||
LOG.exception(_LE('DB exceeded retry limit.'))
|
if remaining > 0:
|
||||||
if isinstance(e, exception.RetryRequest):
|
ectxt.reraise = not self._is_exception_expected(e)
|
||||||
six.reraise(type(e.inner_exc),
|
else:
|
||||||
e.inner_exc,
|
LOG.exception(_LE('DB exceeded retry limit.'))
|
||||||
sys.exc_info()[2])
|
# if it's a RetryRequest, we need to unpack it
|
||||||
raise e
|
if isinstance(e, exception.RetryRequest):
|
||||||
if remaining != -1:
|
ectxt.type_ = type(e.inner_exc)
|
||||||
remaining -= 1
|
ectxt.value = e.inner_exc
|
||||||
# RetryRequest is application-initated exception
|
|
||||||
# and not an error condition in case retries are
|
|
||||||
# not exceeded
|
|
||||||
if not isinstance(e, exception.RetryRequest):
|
|
||||||
LOG.exception(_LE('DB error.'))
|
|
||||||
# NOTE(vsergeyev): We are using patched time module, so
|
# NOTE(vsergeyev): We are using patched time module, so
|
||||||
# this effectively yields the execution
|
# this effectively yields the execution
|
||||||
# context to another green thread.
|
# context to another green thread.
|
||||||
@ -153,8 +153,20 @@ class wrap_db_retry(object):
|
|||||||
next_interval * 2,
|
next_interval * 2,
|
||||||
self.max_retry_interval
|
self.max_retry_interval
|
||||||
)
|
)
|
||||||
|
remaining -= 1
|
||||||
|
|
||||||
return wrapper
|
return wrapper
|
||||||
|
|
||||||
|
def _is_exception_expected(self, exc):
|
||||||
|
if isinstance(exc, self.db_error):
|
||||||
|
# RetryRequest is application-initated exception
|
||||||
|
# and not an error condition in case retries are
|
||||||
|
# not exceeded
|
||||||
|
if not isinstance(exc, exception.RetryRequest):
|
||||||
|
LOG.exception(_LE('DB error.'))
|
||||||
|
return True
|
||||||
|
return self.exception_checker(exc)
|
||||||
|
|
||||||
|
|
||||||
class DBAPI(object):
|
class DBAPI(object):
|
||||||
"""Initialize the chosen DB API backend.
|
"""Initialize the chosen DB API backend.
|
||||||
|
@ -197,6 +197,22 @@ class DBRetryRequestCase(DBAPITestCase):
|
|||||||
self.assertRaises(ValueError, some_method, res)
|
self.assertRaises(ValueError, some_method, res)
|
||||||
self.assertEqual(max_retries + 1, res['result'])
|
self.assertEqual(max_retries + 1, res['result'])
|
||||||
|
|
||||||
|
def test_retry_wrapper_exception_checker(self):
|
||||||
|
|
||||||
|
def exception_checker(exc):
|
||||||
|
return isinstance(exc, ValueError) and exc.args[0] < 5
|
||||||
|
|
||||||
|
@api.wrap_db_retry(max_retries=10, retry_on_request=True,
|
||||||
|
exception_checker=exception_checker)
|
||||||
|
def some_method(res):
|
||||||
|
res['result'] += 1
|
||||||
|
raise ValueError(res['result'])
|
||||||
|
|
||||||
|
res = {'result': 0}
|
||||||
|
self.assertRaises(ValueError, some_method, res)
|
||||||
|
# our exception checker should have stopped returning True after 5
|
||||||
|
self.assertEqual(5, res['result'])
|
||||||
|
|
||||||
@mock.patch.object(DBAPI, 'api_class_call1')
|
@mock.patch.object(DBAPI, 'api_class_call1')
|
||||||
@mock.patch.object(api, 'wrap_db_retry')
|
@mock.patch.object(api, 'wrap_db_retry')
|
||||||
def test_mocked_methods_are_not_wrapped(self, mocked_wrap, mocked_method):
|
def test_mocked_methods_are_not_wrapped(self, mocked_wrap, mocked_method):
|
||||||
|
Loading…
Reference in New Issue
Block a user