Browse Source

Set binding profile directly from OVNTrunkDriver (redo cont.)

This is a tweak to changes done to fix bug 1834637. Specifically,
this change addresses scaling. The previous gerrit change had
modifications to all OVN sub-ports performed as a single
transaction. That did not account for race-condition on neutron
DB queries, which leads to timeouts under heavy loads.

Another cleanup done by this change is to fold an additional
update on neutron db into ovn trunk driver. That saves
update from doing another database transaction, thus making
it faster. The no longer needed function in mech_driver was
called _update_subport_host_if_needed

By breaking the iteration into multiple transactions, the
change in time is marginal:

  Service-level agreement
  NeutronTrunks :: neutron.create_trunk

  from 34.2 sec to 35.6 for 50%ile
  from 35.6 sec to 36.1 for 95%ile

This patch doesn't go to master as it's frozen now because of
the code being merged to Neutron. The patch will be sent to master
Neutron once code migration is finished.

Change-Id: I7de3ac2a7cf8869ead8ab5fbb34a9861a96d3a0c
Closes-Bug: #1834637
Co-authored-by: Maciej Józefczyk <mjozefcz@redhat.com>
tags/7.1.0
Flavio Fernandes 6 months ago
parent
commit
c418dd720b
4 changed files with 41 additions and 65 deletions
  1. +0
    -21
      networking_ovn/ml2/mech_driver.py
  2. +24
    -13
      networking_ovn/ml2/trunk_driver.py
  3. +0
    -21
      networking_ovn/tests/unit/ml2/test_mech_driver.py
  4. +17
    -10
      networking_ovn/tests/unit/ml2/test_trunk_driver.py

+ 0
- 21
networking_ovn/ml2/mech_driver.py View File

@@ -34,7 +34,6 @@ from neutron_lib import exceptions as n_exc
from neutron_lib.plugins import directory
from neutron_lib.plugins.ml2 import api
from neutron_lib.services.qos import constants as qos_consts
from neutron_lib.services.trunk import constants as trunk_consts
from oslo_config import cfg
from oslo_db import exception as os_db_exc
from oslo_log import log
@@ -752,21 +751,6 @@ class OVNMechanismDriver(api.MechanismDriver):
# See doc/source/design/ovn_worker.rst for more details.
return [worker.MaintenanceWorker()]

def _update_subport_host_if_needed(self, port_id):
parent_port = self._ovn_client.get_parent_port(port_id)
if parent_port:
admin_context = n_context.get_admin_context()
try:
port = self._plugin.get_port(admin_context, parent_port)
host_id = port.get(portbindings.HOST_ID, '')
subport = {
'port': {'binding:host_id': host_id,
'device_owner': trunk_consts.TRUNK_SUBPORT_OWNER}}
self._plugin.update_port(admin_context, port_id, subport)
except (os_db_exc.DBReferenceError, n_exc.PortNotFound):
LOG.debug("Error trying to set host_id %s for subport %s",
host_id, port_id)

def _update_dnat_entry_if_needed(self, port_id, up=True):
"""Update DNAT entry if using distributed floating ips."""
if not config.is_ovn_distributed_floating_ip():
@@ -821,11 +805,6 @@ class OVNMechanismDriver(api.MechanismDriver):
self._update_dnat_entry_if_needed(port_id)
self._wait_for_metadata_provisioned_if_needed(port_id)

# If this port is a subport, we need to update the host_id and set it
# to its parent's. Otherwise, Neutron won't even try to bind it and
# it will not transition from DOWN to ACTIVE.
self._update_subport_host_if_needed(port_id)

provisioning_blocks.provisioning_complete(
n_context.get_admin_context(),
port_id,


+ 24
- 13
networking_ovn/ml2/trunk_driver.py View File

@@ -44,20 +44,22 @@ class OVNTrunkHandler(object):
def _set_sub_ports(self, parent_port, subports):
txn = self.plugin_driver._nb_ovn.transaction
context = n_context.get_admin_context()
with context.session.begin(subtransactions=True), (
txn(check_error=True)) as ovn_txn:
for port in subports:
for port in subports:
with context.session.begin(subtransactions=True), (
txn(check_error=True)) as ovn_txn:
self._set_binding_profile(context, port, parent_port, ovn_txn)

def _unset_sub_ports(self, subports):
txn = self.plugin_driver._nb_ovn.transaction
context = n_context.get_admin_context()
with context.session.begin(subtransactions=True), (
txn(check_error=True)) as ovn_txn:
for port in subports:
for port in subports:
with context.session.begin(subtransactions=True), (
txn(check_error=True)) as ovn_txn:
self._unset_binding_profile(context, port, ovn_txn)

def _set_binding_profile(self, context, subport, parent_port, ovn_txn):
LOG.debug("Setting parent %s for subport %s",
parent_port, subport.port_id)
db_port = port_obj.Port.get_object(context, id=subport.port_id)
if not db_port:
LOG.debug("Port not found while trying to set "
@@ -65,15 +67,20 @@ class OVNTrunkHandler(object):
subport.port_id)
return
try:
# NOTE(flaviof): We expect binding's host to be set. Otherwise,
# sub-port will not transition from DOWN to ACTIVE.
db_port.device_owner = trunk_consts.TRUNK_SUBPORT_OWNER
for binding in db_port.bindings:
binding.profile.update({
'parent_name': parent_port,
'tag': subport.segmentation_id})
binding.profile['parent_name'] = parent_port
binding.profile['tag'] = subport.segmentation_id
# host + port_id is primary key
port_obj.PortBinding.update_object(
context, {'profile': binding.profile},
context,
{'profile': binding.profile,
'vif_type': portbindings.VIF_TYPE_OVS},
port_id=subport.port_id,
host=binding.host)
db_port.update()
except n_exc.ObjectNotFound:
LOG.debug("Port not found while trying to set "
"binding_profile: %s", subport.port_id)
@@ -82,8 +89,11 @@ class OVNTrunkHandler(object):
lport_name=subport.port_id,
parent_name=parent_port,
tag=subport.segmentation_id))
LOG.debug("Done setting parent %s for subport %s",
parent_port, subport.port_id)

def _unset_binding_profile(self, context, subport, ovn_txn):
LOG.debug("Unsetting parent for subport %s", subport.port_id)
db_port = port_obj.Port.get_object(context, id=subport.port_id)
if not db_port:
LOG.debug("Port not found while trying to unset "
@@ -91,6 +101,7 @@ class OVNTrunkHandler(object):
subport.port_id)
return
try:
db_port.device_owner = ''
for binding in db_port.bindings:
binding.profile.pop('tag', None)
binding.profile.pop('parent_name', None)
@@ -98,13 +109,12 @@ class OVNTrunkHandler(object):
port_obj.PortBinding.update_object(
context,
{'profile': binding.profile,
'vif_type': portbindings.VIF_TYPE_UNBOUND,
'vif_details': '',
'host': ''},
'vif_type': portbindings.VIF_TYPE_UNBOUND},
port_id=subport.port_id,
host=binding.host)
port_obj.PortBindingLevel.delete_objects(
context, port_id=subport.port_id, host=binding.host)
db_port.update()
except n_exc.ObjectNotFound:
LOG.debug("Port not found while trying to unset "
"binding_profile: %s", subport.port_id)
@@ -114,6 +124,7 @@ class OVNTrunkHandler(object):
parent_name=[],
up=False,
tag=[]))
LOG.debug("Done unsetting parent for subport %s", subport.port_id)

def trunk_created(self, trunk):
if trunk.sub_ports:


+ 0
- 21
networking_ovn/tests/unit/ml2/test_mech_driver.py View File

@@ -35,7 +35,6 @@ from neutron_lib import constants as const
from neutron_lib import context
from neutron_lib import exceptions as n_exc
from neutron_lib.plugins import directory
from neutron_lib.services.trunk import constants as trunk_consts
from neutron_lib.tests import tools
from neutron_lib.utils import net as n_net
from oslo_config import cfg
@@ -790,9 +789,6 @@ class TestOVNMechanismDriver(test_plugin.Ml2PluginV2TestCase):
device_owner=port_device_owner) as port1, \
mock.patch('neutron.db.provisioning_blocks.'
'provisioning_complete') as pc, \
mock.patch.object(
self.mech_driver,
'_update_subport_host_if_needed') as upd_subport, \
mock.patch.object(self.mech_driver,
'_update_dnat_entry_if_needed') as ude, \
mock.patch.object(
@@ -807,7 +803,6 @@ class TestOVNMechanismDriver(test_plugin.Ml2PluginV2TestCase):
resources.PORT,
provisioning_blocks.L2_AGENT_ENTITY
)
upd_subport.assert_called_once_with(port1['port']['id'])
ude.assert_called_once_with(port1['port']['id'])
wmp.assert_called_once_with(port1['port']['id'])

@@ -900,22 +895,6 @@ class TestOVNMechanismDriver(test_plugin.Ml2PluginV2TestCase):
)
ude.assert_called_once_with(port1['port']['id'], False)

def test__update_subport_host_if_needed(self):
"""Check that a subport is updated with parent's host_id."""
binding_host_id = {'binding:host_id': 'hostname',
'device_owner': trunk_consts.TRUNK_SUBPORT_OWNER}
with mock.patch.object(self.mech_driver._ovn_client, 'get_parent_port',
return_value='parent'), \
mock.patch.object(self.mech_driver._plugin, 'get_port',
return_value=binding_host_id) as get_port, \
mock.patch.object(self.mech_driver._plugin,
'update_port') as upd:
self.mech_driver._update_subport_host_if_needed('subport')

get_port.assert_called_once_with(mock.ANY, 'parent')
upd.assert_called_once_with(mock.ANY, 'subport',
{'port': binding_host_id})

def _test__wait_for_metadata_provisioned_if_needed(self, enable_dhcp,
wait_expected):
with self.network(set_context=True, tenant_id='test') as net1, \


+ 17
- 10
networking_ovn/tests/unit/ml2/test_trunk_driver.py View File

@@ -79,6 +79,8 @@ class TestTrunkHandler(base.BaseTestCase):
self.mock_get_port.side_effect = lambda ctxt, id: (
self.sub_port_1_obj if id == 'sub_port_1' else (
self.sub_port_2_obj if id == 'sub_port_2' else None))
self.mock_port_update = mock.patch(
"neutron.objects.ports.Port.update").start()
self.mock_update_pb = mock.patch(
"neutron.objects.ports.PortBinding.update_object").start()
self.mock_clear_levels = mock.patch(
@@ -108,10 +110,14 @@ class TestTrunkHandler(base.BaseTestCase):
self.trunk_1.sub_ports = [self.sub_port_1, self.sub_port_2]
self.handler.trunk_created(self.trunk_1)

calls = [mock.call(), mock.call()]
self._assert_calls(self.mock_port_update, calls)

calls = [
mock.call(mock.ANY,
{'profile': {'parent_name': trunk.port_id,
'tag': s_port.segmentation_id}},
'tag': s_port.segmentation_id},
'vif_type': portbindings.VIF_TYPE_OVS},
host=mock.ANY,
port_id=s_port.port_id)
for trunk, s_port in [(self.trunk_1, self.sub_port_1),
@@ -138,8 +144,10 @@ class TestTrunkHandler(base.BaseTestCase):
self.handler.trunk_created(self.trunk_1)
self.mock_update_pb.assert_called_once_with(
mock.ANY, {'profile': {'parent_name': self.sub_port_1.trunk_id,
'tag': self.sub_port_1.segmentation_id}},
'tag': self.sub_port_1.segmentation_id},
'vif_type': portbindings.VIF_TYPE_OVS},
host='foo.com', port_id=self.sub_port_1.port_id)
self.mock_port_update.assert_not_called()
self.plugin_driver._nb_ovn.set_lswitch_port.assert_not_called()

def test_delete_trunk(self):
@@ -160,13 +168,14 @@ class TestTrunkHandler(base.BaseTestCase):
'foo_field': self.sub_port_2.trunk_id})
self.handler.trunk_deleted(self.trunk_1)

calls = [mock.call(), mock.call()]
self._assert_calls(self.mock_port_update, calls)

calls = [
mock.call(
mock.ANY,
{'profile': {'foo_field': s_port.trunk_id},
'host': '',
'vif_type': portbindings.VIF_TYPE_UNBOUND,
'vif_details': ''},
'vif_type': portbindings.VIF_TYPE_UNBOUND},
host='foo.com',
port_id=s_port.port_id)
for trunk, s_port in [(self.trunk_1, self.sub_port_1),
@@ -197,9 +206,7 @@ class TestTrunkHandler(base.BaseTestCase):
calls = [
mock.call(mock.ANY,
{'profile': {'foo_field': s_port.trunk_id},
'host': '',
'vif_type': portbindings.VIF_TYPE_UNBOUND,
'vif_details': ''},
'vif_type': portbindings.VIF_TYPE_UNBOUND},
host='foo.com',
port_id=s_port.port_id)
for trunk, s_port in [(self.trunk_1, self.sub_port_1)]]
@@ -232,9 +239,9 @@ class TestTrunkHandler(base.BaseTestCase):
self.handler.trunk_deleted(self.trunk_1)
self.mock_update_pb.assert_called_once_with(
mock.ANY, {'profile': {},
'vif_type': portbindings.VIF_TYPE_UNBOUND,
'host': '', 'vif_details': ''},
'vif_type': portbindings.VIF_TYPE_UNBOUND},
host='foo.com', port_id=self.sub_port_1.port_id)
self.mock_port_update.assert_not_called()
self.plugin_driver._nb_ovn.set_lswitch_port.assert_not_called()
self.mock_clear_levels.assert_not_called()



Loading…
Cancel
Save