Stop logging deadlock tracebacks
The oslo db retry decorator logs a traceback everytime a deadlock is encountered even though it is being retried. With multiple workers and a Galera cluster, deadlocks are common occurences due to our use of with_lockmode update so we should not be polluting the logs. This patch adjusts our usage of the retry decorator to catch deadlocks with the exception checker which does not log them until the retries are exhausted. Change-Id: I433fbbad61070e20ebe934b9247e36fc190fa3e0
This commit is contained in:
parent
6a845249d0
commit
a93886278f
|
@ -17,6 +17,7 @@ import contextlib
|
|||
|
||||
from oslo_config import cfg
|
||||
from oslo_db import api as oslo_db_api
|
||||
from oslo_db import exception as db_exc
|
||||
from oslo_db.sqlalchemy import session
|
||||
from oslo_utils import uuidutils
|
||||
from sqlalchemy import exc
|
||||
|
@ -28,8 +29,11 @@ from neutron.db import common_db_mixin
|
|||
_FACADE = None
|
||||
|
||||
MAX_RETRIES = 10
|
||||
retry_db_errors = oslo_db_api.wrap_db_retry(max_retries=MAX_RETRIES,
|
||||
retry_on_deadlock=True)
|
||||
is_deadlock = lambda e: isinstance(e, db_exc.DBDeadlock)
|
||||
retry_db_errors = oslo_db_api.wrap_db_retry(
|
||||
max_retries=MAX_RETRIES,
|
||||
exception_checker=is_deadlock
|
||||
)
|
||||
|
||||
|
||||
def _create_facade_lazily():
|
||||
|
|
|
@ -137,7 +137,7 @@ class DbQuotaDriver(object):
|
|||
retry_interval=0.1,
|
||||
inc_retry_interval=True,
|
||||
retry_on_request=True,
|
||||
retry_on_deadlock=True)
|
||||
exception_checker=db_api.is_deadlock)
|
||||
def make_reservation(self, context, tenant_id, resources, deltas, plugin):
|
||||
# Lock current reservation table
|
||||
# NOTE(salv-orlando): This routine uses DB write locks.
|
||||
|
|
|
@ -124,7 +124,8 @@ class TunnelTypeDriver(helpers.SegmentTypeDriver):
|
|||
{'type': self.get_type(), 'range': current_range})
|
||||
|
||||
@oslo_db_api.wrap_db_retry(
|
||||
max_retries=db_api.MAX_RETRIES, retry_on_deadlock=True)
|
||||
max_retries=db_api.MAX_RETRIES,
|
||||
exception_checker=db_api.is_deadlock)
|
||||
def sync_allocations(self):
|
||||
# determine current configured allocatable tunnel ids
|
||||
tunnel_ids = set()
|
||||
|
|
|
@ -1443,9 +1443,9 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
|
|||
return self._bind_port_if_needed(port_context)
|
||||
|
||||
@oslo_db_api.wrap_db_retry(
|
||||
max_retries=db_api.MAX_RETRIES,
|
||||
retry_on_deadlock=True, retry_on_request=True,
|
||||
exception_checker=lambda e: isinstance(e, sa_exc.StaleDataError)
|
||||
max_retries=db_api.MAX_RETRIES, retry_on_request=True,
|
||||
exception_checker=lambda e: isinstance(e, (sa_exc.StaleDataError,
|
||||
os_db_exception.DBDeadlock))
|
||||
)
|
||||
def update_port_status(self, context, port_id, status, host=None,
|
||||
network=None):
|
||||
|
|
|
@ -211,9 +211,9 @@ class TrackedResource(BaseResource):
|
|||
# ensure that an UPDATE statement is emitted rather than an INSERT one
|
||||
@oslo_db_api.wrap_db_retry(
|
||||
max_retries=db_api.MAX_RETRIES,
|
||||
retry_on_deadlock=True,
|
||||
exception_checker=lambda exc:
|
||||
isinstance(exc, oslo_db_exception.DBDuplicateEntry))
|
||||
isinstance(exc, (oslo_db_exception.DBDuplicateEntry,
|
||||
oslo_db_exception.DBDeadlock)))
|
||||
def _set_quota_usage(self, context, tenant_id, in_use):
|
||||
return quota_api.set_quota_usage(
|
||||
context, self.name, tenant_id, in_use=in_use)
|
||||
|
|
Loading…
Reference in New Issue