use retry_db_errors from neutron-lib

The externally consumed APIs from neutron.db.api were rehomed into
neutron-lib with https://review.openstack.org/#/c/557040/

This patch consumes the retry_db_errors function from lib by:
- Removing retry_db_errors from neutron.db.api
- Updating the imports for retry_db_errors to use it from lib
- Using the DB API retry fixture from lib in the UTs where applicable
- Removing the UTs for neutron.db.api as they are now covered in lib

NeutronLibImpact

Change-Id: I1feb842d3e0e92c945efb01ece29856335a398fe
This commit is contained in:
Boden R 2018-05-22 11:33:25 -06:00
parent e446b1e35c
commit e4348eb1e1
14 changed files with 25 additions and 201 deletions

View File

@ -21,6 +21,7 @@ from neutron_lib.api.definitions import portbindings
from neutron_lib.api import extensions
from neutron_lib.callbacks import resources
from neutron_lib import constants
from neutron_lib.db import api as db_api
from neutron_lib import exceptions
from neutron_lib.plugins import directory
from neutron_lib.plugins import utils as p_utils
@ -34,7 +35,6 @@ from neutron._i18n import _
from neutron.common import constants as n_const
from neutron.common import exceptions as n_exc
from neutron.common import utils
from neutron.db import api as db_api
from neutron.db import provisioning_blocks
from neutron.extensions import segment as segment_ext
from neutron.quota import resource_registry

View File

@ -17,6 +17,7 @@ from neutron_lib.api.definitions import portbindings
from neutron_lib.api import extensions
from neutron_lib import constants
from neutron_lib import context as neutron_context
from neutron_lib.db import api as db_api
from neutron_lib import exceptions
from neutron_lib.exceptions import l3 as l3_exc
from neutron_lib.plugins import constants as plugin_constants
@ -26,7 +27,6 @@ from oslo_log import log as logging
import oslo_messaging
from neutron.common import constants as n_const
from neutron.db import api as db_api
LOG = logging.getLogger(__name__)

View File

@ -20,6 +20,7 @@ from neutron_lib.api import attributes
from neutron_lib.api import faults
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.db import api as db_api
from neutron_lib import exceptions
from oslo_log import log as logging
from oslo_policy import policy as oslo_policy
@ -32,7 +33,7 @@ from neutron.api.v2 import resource as wsgi_resource
from neutron.common import constants as n_const
from neutron.common import exceptions as n_exc
from neutron.common import rpc as n_rpc
from neutron.db import api as db_api
from neutron.db import api as ndb_api
from neutron import policy
from neutron import quota
from neutron.quota import resource_registry
@ -476,7 +477,7 @@ class Controller(object):
def notify(create_result):
# Ensure usage trackers for all resources affected by this API
# operation are marked as dirty
with db_api.context_manager.writer.using(request.context):
with ndb_api.context_manager.writer.using(request.context):
# Commit the reservation(s)
for reservation in reservations:
quota.QUOTAS.commit_reservation(

View File

@ -24,6 +24,7 @@ from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib import constants
from neutron_lib import context
from neutron_lib.db import api as lib_db_api
from neutron_lib.db import utils as db_utils
from neutron_lib.exceptions import agent as agent_exc
from neutron_lib.exceptions import availability_zone as az_exc
@ -242,7 +243,7 @@ class AgentDbMixin(ext_agent.AgentPluginBase, AgentAvailabilityZoneMixin):
return [self._make_agent_dict(agent, fields=fields)
for agent in agents]
@db_api.retry_db_errors
@lib_db_api.retry_db_errors
def agent_health_check(self):
"""Scan agents and log if some are considered dead."""
agents = self.get_agents(context.get_admin_context(),

View File

@ -22,7 +22,6 @@ from neutron_lib.db import model_base
from neutron_lib import exceptions
from neutron_lib.objects import exceptions as obj_exc
from oslo_config import cfg
from oslo_db import api as oslo_db_api
from oslo_db import exception as db_exc
from oslo_log import log as logging
from oslo_utils import excutils
@ -69,14 +68,6 @@ def is_retriable(e):
return _is_nested_instance(e, db_exc.DBError) and '1305' in str(e)
_retry_db_errors = oslo_db_api.wrap_db_retry(
max_retries=MAX_RETRIES,
retry_interval=0.1,
inc_retry_interval=True,
exception_checker=is_retriable
)
def _tag_retriables_as_unretriable(f):
"""Puts a flag on retriable exceptions so is_retriable returns False.
@ -99,36 +90,6 @@ def _copy_if_lds(item):
return copy.deepcopy(item) if isinstance(item, (list, dict, set)) else item
def retry_db_errors(f):
"""Nesting-safe retry decorator with auto-arg-copy and logging.
Retry decorator for all functions which do not accept a context as an
argument. If the function accepts a context, use
'retry_if_session_inactive' below.
If retriable errors are retried and exceed the count, they will be tagged
with a flag so is_retriable will no longer recognize them as retriable.
This prevents multiple applications of this decorator (and/or the one
below) from retrying the same exception.
"""
@_tag_retriables_as_unretriable
@_retry_db_errors
@six.wraps(f)
def wrapped(*args, **kwargs):
try:
# copy mutable args and kwargs to make retries safe. this doesn't
# prevent mutations of complex objects like the context or 'self'
dup_args = [_copy_if_lds(a) for a in args]
dup_kwargs = {k: _copy_if_lds(v) for k, v in kwargs.items()}
return f(*dup_args, **dup_kwargs)
except Exception as e:
with excutils.save_and_reraise_exception():
if is_retriable(e):
LOG.debug("Retry wrapper got retriable exception: %s", e)
return wrapped
def retry_if_session_inactive(context_var_name='context'):
"""Retries only if the session in the context is inactive.
@ -149,7 +110,7 @@ def retry_if_session_inactive(context_var_name='context'):
except ValueError:
raise RuntimeError("Could not find position of var %s" %
context_var_name)
f_with_retry = retry_db_errors(f)
f_with_retry = api.retry_db_errors(f)
@six.wraps(f)
def wrapped(*args, **kwargs):

View File

@ -23,6 +23,7 @@ from neutron_lib.callbacks import priority_group
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib import constants as const
from neutron_lib.db import api as lib_db_api
from neutron_lib import exceptions as n_exc
from neutron_lib.exceptions import agent as agent_exc
from neutron_lib.exceptions import l3 as l3_exc
@ -436,7 +437,7 @@ class DVRResourceOperationHandler(object):
# with the csnat port.
# TODO(kevinbenton): switch to taskflow to manage
# these rollbacks.
@db_api.retry_db_errors
@lib_db_api.retry_db_errors
def revert():
# TODO(kevinbenton): even though we get the
# port each time, there is a potential race

View File

@ -19,6 +19,7 @@ import functools
from neutron_lib.api import attributes
from neutron_lib import constants
from neutron_lib.db import api as db_api
from oslo_log import log as logging
from oslo_utils import excutils
import pecan
@ -26,7 +27,6 @@ from pecan import request
from neutron._i18n import _
from neutron.api import api_common
from neutron.db import api as db_api
from neutron import manager
from neutron_lib import exceptions

View File

@ -137,7 +137,7 @@ class _TunnelTypeDriverBase(helpers.SegmentTypeDriver):
LOG.info("%(type)s ID ranges: %(range)s",
{'type': self.get_type(), 'range': current_range})
@db_api.retry_db_errors
@lib_db_api.retry_db_errors
def sync_allocations(self):
# determine current configured allocatable tunnel ids
tunnel_ids = set()

View File

@ -17,6 +17,7 @@ import sys
from neutron_lib import constants as p_const
from neutron_lib import context
from neutron_lib.db import api as lib_db_api
from neutron_lib import exceptions as exc
from neutron_lib.plugins.ml2 import api
from neutron_lib.plugins import utils as plugin_utils
@ -60,7 +61,7 @@ class VlanTypeDriver(helpers.SegmentTypeDriver):
sys.exit(1)
LOG.info("Network VLAN ranges: %s", self.network_vlan_ranges)
@db_api.retry_db_errors
@lib_db_api.retry_db_errors
def _sync_vlan_allocations(self):
ctx = context.get_admin_context()
with db_api.context_manager.writer.using(ctx):

View File

@ -377,7 +377,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
binding.persist_state_to_session(plugin_context.session)
return changes
@db_api.retry_db_errors
@lib_db_api.retry_db_errors
def _bind_port_if_needed(self, context, allow_notify=False,
need_notify=False):
if not context.network.network_segments:

View File

@ -21,6 +21,7 @@ import random
from neutron_lib.api.definitions import availability_zone as az_def
from neutron_lib import constants as lib_const
from neutron_lib.db import api as lib_db_api
from neutron_lib.exceptions import l3 as l3_exc
from oslo_config import cfg
from oslo_db import exception as db_exc
@ -155,7 +156,7 @@ class L3Scheduler(object):
else:
self.bind_router(plugin, context, router['id'], l3_agent.id)
@db_api.retry_db_errors
@lib_db_api.retry_db_errors
def bind_router(self, plugin, context, router_id, agent_id,
is_manual_scheduling=False, is_ha=False):
"""Bind the router to the l3 agent which has been chosen.

View File

@ -1,146 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from neutron_lib import exceptions
from oslo_db import exception as db_exc
import osprofiler
import sqlalchemy
from sqlalchemy.orm import exc
import testtools
from neutron.db import api as db_api
from neutron.tests import base
class TestDeadLockDecorator(base.BaseTestCase):
@db_api.retry_db_errors
def _decorated_function(self, fail_count, exc_to_raise):
self.fail_count = getattr(self, 'fail_count', fail_count + 1) - 1
if self.fail_count:
raise exc_to_raise
def test_regular_exception_excluded(self):
with testtools.ExpectedException(ValueError):
self._decorated_function(1, ValueError)
def test_staledata_error_caught(self):
e = exc.StaleDataError()
self.assertIsNone(self._decorated_function(1, e))
def test_dbconnection_error_caught(self):
e = db_exc.DBConnectionError()
self.assertIsNone(self._decorated_function(1, e))
def test_multi_exception_contains_retry(self):
e = exceptions.MultipleExceptions(
[ValueError(), db_exc.RetryRequest(TypeError())])
self.assertIsNone(self._decorated_function(1, e))
def test_multi_exception_contains_deadlock(self):
e = exceptions.MultipleExceptions([ValueError(), db_exc.DBDeadlock()])
self.assertIsNone(self._decorated_function(1, e))
def test_multi_nested_exception_contains_deadlock(self):
i = exceptions.MultipleExceptions([ValueError(), db_exc.DBDeadlock()])
e = exceptions.MultipleExceptions([ValueError(), i])
self.assertIsNone(self._decorated_function(1, e))
def test_multi_exception_raised_on_exceed(self):
# limit retries so this doesn't take 40 seconds
mock.patch.object(db_api._retry_db_errors, 'max_retries',
new=2).start()
e = exceptions.MultipleExceptions([ValueError(), db_exc.DBDeadlock()])
with testtools.ExpectedException(exceptions.MultipleExceptions):
self._decorated_function(db_api.MAX_RETRIES + 1, e)
def test_mysql_savepoint_error(self):
e = db_exc.DBError("(pymysql.err.InternalError) (1305, u'SAVEPOINT "
"sa_savepoint_1 does not exist')")
self.assertIsNone(self._decorated_function(1, e))
@db_api.retry_if_session_inactive('alt_context')
def _alt_context_function(self, alt_context, *args, **kwargs):
return self._decorated_function(*args, **kwargs)
@db_api.retry_if_session_inactive()
def _context_function(self, context, list_arg, dict_arg,
fail_count, exc_to_raise):
list_arg.append(1)
dict_arg[max(dict_arg.keys()) + 1] = True
self.fail_count = getattr(self, 'fail_count', fail_count + 1) - 1
if self.fail_count:
raise exc_to_raise
return list_arg, dict_arg
def test_stacked_retries_dont_explode_retry_count(self):
context = mock.Mock()
context.session.is_active = False
e = db_exc.DBConnectionError()
mock.patch('time.sleep').start()
with testtools.ExpectedException(db_exc.DBConnectionError):
# after 10 failures, the inner retry should give up and
# the exception should be tagged to prevent the outer retry
self._alt_context_function(context, 11, e)
def test_retry_if_session_inactive_args_not_mutated_after_retries(self):
context = mock.Mock()
context.session.is_active = False
list_arg = [1, 2, 3, 4]
dict_arg = {1: 'a', 2: 'b'}
l, d = self._context_function(context, list_arg, dict_arg,
5, db_exc.DBDeadlock())
# even though we had 5 failures the list and dict should only
# be mutated once
self.assertEqual(5, len(l))
self.assertEqual(3, len(d))
def test_retry_if_session_inactive_kwargs_not_mutated_after_retries(self):
context = mock.Mock()
context.session.is_active = False
list_arg = [1, 2, 3, 4]
dict_arg = {1: 'a', 2: 'b'}
l, d = self._context_function(context, list_arg=list_arg,
dict_arg=dict_arg,
fail_count=5,
exc_to_raise=db_exc.DBDeadlock())
# even though we had 5 failures the list and dict should only
# be mutated once
self.assertEqual(5, len(l))
self.assertEqual(3, len(d))
def test_retry_if_session_inactive_no_retry_in_active_session(self):
context = mock.Mock()
context.session.is_active = True
with testtools.ExpectedException(db_exc.DBDeadlock):
# retry decorator should have no effect in an active session
self._context_function(context, [], {1: 2},
fail_count=1,
exc_to_raise=db_exc.DBDeadlock())
class TestCommonDBfunctions(base.BaseTestCase):
def test_set_hook(self):
with mock.patch.object(osprofiler.opts, 'is_trace_enabled',
return_value=True),\
mock.patch.object(osprofiler.opts, 'is_db_trace_enabled',
return_value=True):
with mock.patch.object(osprofiler.sqlalchemy,
'add_tracing') as add_tracing:
engine_mock = mock.Mock()
db_api.set_hook(engine_mock)
add_tracing.assert_called_with(sqlalchemy, engine_mock,
'neutron.db')

View File

@ -22,6 +22,7 @@ from neutron_lib import constants
from neutron_lib import context
from neutron_lib import exceptions as lib_exc
from neutron_lib.exceptions import dvr as dvr_exc
from neutron_lib import fixture
from neutron_lib.plugins import directory
from neutron_lib.utils import net
@ -71,8 +72,8 @@ class DvrDbMixinTestCase(test_plugin.Ml2PluginV2TestCase):
def test__create_dvr_mac_address_retries_exceeded_retry_logic(self):
# limit retries so test doesn't take 40 seconds
mock.patch('neutron.db.api._retry_db_errors.max_retries',
new=2).start()
retry_fixture = fixture.DBRetryErrorsFixture(max_retries=2)
retry_fixture.setUp()
non_unique_mac = tools.get_random_EUI()
self._create_dvr_mac_entry('foo_host_1', non_unique_mac)
@ -81,6 +82,7 @@ class DvrDbMixinTestCase(test_plugin.Ml2PluginV2TestCase):
self.assertRaises(lib_exc.HostMacAddressGenerationFailure,
self.mixin._create_dvr_mac_address,
self.ctx, "foo_host_2")
retry_fixture.cleanUp()
def test_mac_not_cleared_on_agent_delete_event_with_remaining_agents(self):
plugin = directory.get_plugin()

View File

@ -30,6 +30,7 @@ from neutron_lib.callbacks import resources
from neutron_lib import constants
from neutron_lib import context
from neutron_lib import exceptions as exc
from neutron_lib import fixture
from neutron_lib.plugins import constants as plugin_constants
from neutron_lib.plugins import directory
from neutron_lib.plugins.ml2 import api as driver_api
@ -361,8 +362,8 @@ class TestMl2NetworksV2(test_plugin.TestNetworksV2,
def test_create_network_segment_allocation_fails(self):
plugin = directory.get_plugin()
mock.patch.object(db_api._retry_db_errors, 'max_retries',
new=2).start()
retry_fixture = fixture.DBRetryErrorsFixture(max_retries=2)
retry_fixture.setUp()
with mock.patch.object(
plugin.type_manager, 'create_network_segments',
side_effect=db_exc.RetryRequest(ValueError())
@ -374,6 +375,7 @@ class TestMl2NetworksV2(test_plugin.TestNetworksV2,
self.assertEqual(500, res.status_int)
# 1 + retry count
self.assertEqual(3, f.call_count)
retry_fixture.cleanUp()
class TestExternalNetwork(Ml2PluginV2TestCase):