Deprecate get_locked_port_and_binding

This deprecates get_locked_port_and_binding and removes
its usage from ML2.

This function was misleading because it uses with_lockmode('update')
which does not actually block concurrent uses in galera multi-writer
deployments. A lock violation in multi-writer will manifest as a
deadlock at that end.

The revision service plugin already provides us with protection from
concurrent updates within a transaction via a compare and swap on the
revision number that will manifest as a staledataerror in races. This
error can then be retried by the retry decorator in the same way a
deadlock on multi-writer would.

This also will help us avoid trying to carry this db locking into
oslo versioned objects.

Partially-Implements: blueprint adopt-oslo-versioned-objects-for-db
Change-Id: Id9f14db607cc5c6ca02293cb7e87bc1e68301808
This commit is contained in:
Kevin Benton 2017-01-20 20:10:18 -08:00 committed by Ihar Hrachyshka
parent eb203582d4
commit ffa7695038
3 changed files with 49 additions and 40 deletions

View File

@ -13,6 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
from debtcollector import removals
from neutron_lib import constants as n_const
from neutron_lib.plugins import directory
from oslo_db import exception as db_exc
@ -48,6 +49,11 @@ def add_port_binding(context, port_id):
return record
@removals.remove(
message="Use get_port from inside of a transaction. The revision plugin "
"provides protection against concurrent updates to the same "
"resource with compare and swap updates of the revision_number.",
removal_version='Queens')
def get_locked_port_and_binding(context, port_id):
"""Get port and port binding records for update within transaction."""

View File

@ -441,8 +441,17 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
# Get the current port state and build a new PortContext
# reflecting this state as original state for subsequent
# mechanism driver update_port_*commit() calls.
port_db, cur_binding = db.get_locked_port_and_binding(
plugin_context, port_id)
try:
port_db = self._get_port(plugin_context, port_id)
cur_binding = port_db.port_binding
except exc.PortNotFound:
port_db, cur_binding = None, None
if not port_db or not cur_binding:
# The port has been deleted concurrently, so just
# return the unbound result from the initial
# transaction that completed before the deletion.
LOG.debug("Port %s has been deleted concurrently", port_id)
return orig_context, False, False
# Since the mechanism driver bind_port() calls must be made
# outside a DB transaction locking the port state, it is
# possible (but unlikely) that the port's state could change
@ -452,19 +461,13 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
# used. If attributes such as binding:host_id, binding:profile,
# or binding:vnic_type are updated concurrently, the try_again
# flag is returned to indicate that the commit was unsuccessful.
if not port_db:
# The port has been deleted concurrently, so just
# return the unbound result from the initial
# transaction that completed before the deletion.
LOG.debug("Port %s has been deleted concurrently", port_id)
return orig_context, False, False
oport = self._make_port_dict(port_db)
port = self._make_port_dict(port_db)
network = bind_context.network.current
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
# REVISIT(rkukura): The PortBinding instance from the
# ml2_port_bindings table, returned as cur_binding
# from db.get_locked_port_and_binding() above, is
# from port_db.port_binding above, is
# currently not used for DVR distributed ports, and is
# replaced here with the DistributedPortBinding instance from
# the ml2_distributed_port_bindings table specific to the host
@ -1394,8 +1397,9 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
bound_mech_contexts = []
with session.begin(subtransactions=True):
port_db, binding = db.get_locked_port_and_binding(context, id)
if not port_db:
port_db = self._get_port(context, id)
binding = port_db.port_binding
if not binding:
raise exc.PortNotFound(port_id=id)
mac_address_updated = self._check_mac_update_allowed(
port_db, attrs, binding)
@ -1600,8 +1604,10 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
session = context.session
with session.begin(subtransactions=True):
port_db, binding = db.get_locked_port_and_binding(context, id)
if not port_db:
try:
port_db = self._get_port(context, id)
binding = port_db.port_binding
except exc.PortNotFound:
LOG.debug("The port '%s' was deleted", id)
return
port = self._make_port_dict(port_db)

View File

@ -1072,9 +1072,10 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase):
self.assertIsNone(l3plugin.disassociate_floatingips(ctx, port_id))
def test_create_port_tolerates_db_deadlock(self):
plugin = directory.get_plugin()
with self.network() as net:
with self.subnet(network=net) as subnet:
_orig = ml2_db.get_locked_port_and_binding
_orig = plugin._get_port
self._failed = False
def fail_once(*args, **kwargs):
@ -1082,9 +1083,8 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase):
self._failed = True
raise db_exc.DBDeadlock()
return _orig(*args, **kwargs)
with mock.patch('neutron.plugins.ml2.plugin.'
'db.get_locked_port_and_binding',
side_effect=fail_once) as get_port_mock:
with mock.patch.object(plugin, '_get_port',
side_effect=fail_once) as get_port_mock:
port_kwargs = {portbindings.HOST_ID: 'host1',
'subnet': subnet,
'device_id': 'deadlocktest'}
@ -1103,18 +1103,15 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase):
ctx = context.get_admin_context()
plugin = directory.get_plugin()
with self.port() as port:
port_db, binding = ml2_db.get_locked_port_and_binding(
ctx, port['port']['id'])
with mock.patch('neutron.plugins.ml2.plugin.'
'db.get_locked_port_and_binding') as lock:
lock.side_effect = [db_exc.DBDeadlock,
(port_db, binding)]
port_db = plugin._get_port(ctx, port['port']['id'])
with mock.patch.object(plugin, '_get_port') as gp:
gp.side_effect = [db_exc.DBDeadlock] + [port_db] * 3
req = self.new_delete_request('ports', port['port']['id'])
res = req.get_response(self.api)
self.assertEqual(204, res.status_int)
self.assertEqual(2, lock.call_count)
self.assertRaises(
exc.PortNotFound, plugin.get_port, ctx, port['port']['id'])
self.assertGreater(gp.call_count, 1)
self.assertRaises(
exc.PortNotFound, plugin.get_port, ctx, port['port']['id'])
def test_port_create_resillient_to_duplicate_records(self):
@ -1523,29 +1520,29 @@ class TestMl2PortBinding(Ml2PluginV2TestCase,
# create a port and delete it so we have an expired mechanism context
with self.port() as port:
plugin = directory.get_plugin()
binding = ml2_db.get_locked_port_and_binding(self.context,
port['port']['id'])[1]
binding = plugin._get_port(self.context,
port['port']['id']).port_binding
binding['host'] = 'test'
mech_context = driver_context.PortContext(
plugin, self.context, port['port'],
plugin.get_network(self.context, port['port']['network_id']),
binding, None)
with mock.patch(
'neutron.plugins.ml2.plugin.' 'db.get_locked_port_and_binding',
return_value=(None, None)) as glpab_mock,\
side = exc.PortNotFound(port_id=port['port']['id'])
with mock.patch.object(plugin, '_get_port',
side_effect=side) as gp_mock,\
mock.patch('neutron.plugins.ml2.plugin.Ml2Plugin.'
'_make_port_dict') as mpd_mock:
plugin._bind_port_if_needed(mech_context)
# called during deletion to get port
self.assertTrue(glpab_mock.mock_calls)
self.assertTrue(gp_mock.mock_calls)
# should have returned before calling _make_port_dict
self.assertFalse(mpd_mock.mock_calls)
def _create_port_and_bound_context(self, port_vif_type, bound_vif_type):
with self.port() as port:
plugin = directory.get_plugin()
binding = ml2_db.get_locked_port_and_binding(self.context,
port['port']['id'])[1]
binding = plugin._get_port(
self.context, port['port']['id']).port_binding
binding['host'] = 'fake_host'
binding['vif_type'] = port_vif_type
# Generates port context to be used before the bind.
@ -1644,8 +1641,8 @@ class TestMl2PortBinding(Ml2PluginV2TestCase,
def test_update_port_binding_host_id_none(self):
with self.port() as port:
plugin = directory.get_plugin()
binding = ml2_db.get_locked_port_and_binding(self.context,
port['port']['id'])[1]
binding = plugin._get_port(
self.context, port['port']['id']).port_binding
binding['host'] = 'test'
mech_context = driver_context.PortContext(
plugin, self.context, port['port'],
@ -1661,8 +1658,8 @@ class TestMl2PortBinding(Ml2PluginV2TestCase,
def test_update_port_binding_host_id_not_changed(self):
with self.port() as port:
plugin = directory.get_plugin()
binding = ml2_db.get_locked_port_and_binding(self.context,
port['port']['id'])[1]
binding = plugin._get_port(
self.context, port['port']['id']).port_binding
binding['host'] = 'test'
mech_context = driver_context.PortContext(
plugin, self.context, port['port'],
@ -2600,8 +2597,8 @@ class TestML2Segments(Ml2PluginV2TestCase):
ml2_db.subscribe()
plugin = directory.get_plugin()
with self.port(device_owner=fake_owner_compute) as port:
binding = ml2_db.get_locked_port_and_binding(self.context,
port['port']['id'])[1]
binding = plugin._get_port(
self.context, port['port']['id']).port_binding
binding['host'] = 'host-ovs-no_filter'
mech_context = driver_context.PortContext(
plugin, self.context, port['port'],