Merge "Set binding profile directly from OVNTrunkDriver (redo cont.)"

This commit is contained in:
Zuul 2020-01-14 07:28:05 +00:00 committed by Gerrit Code Review
commit ab24a11f13
4 changed files with 41 additions and 66 deletions

View File

@ -30,7 +30,6 @@ from neutron_lib import exceptions as n_exc
from neutron_lib.plugins import directory
from neutron_lib.plugins.ml2 import api
from neutron_lib.services.qos import constants as qos_consts
from neutron_lib.services.trunk import constants as trunk_consts
from oslo_config import cfg
from oslo_db import exception as os_db_exc
from oslo_log import log
@ -755,22 +754,6 @@ class OVNMechanismDriver(api.MechanismDriver):
# See doc/source/design/ovn_worker.rst for more details.
return [worker.MaintenanceWorker()]
def _update_subport_host_if_needed(self, port_id):
parent_port = self._ovn_client.get_parent_port(port_id)
if parent_port:
admin_context = n_context.get_admin_context()
host_id = None
try:
port = self._plugin.get_port(admin_context, parent_port)
host_id = port.get(portbindings.HOST_ID, '')
subport = {
'port': {'binding:host_id': host_id,
'device_owner': trunk_consts.TRUNK_SUBPORT_OWNER}}
self._plugin.update_port(admin_context, port_id, subport)
except (os_db_exc.DBReferenceError, n_exc.PortNotFound):
LOG.debug("Error trying to set host_id %s for subport %s",
host_id, port_id)
def _update_dnat_entry_if_needed(self, port_id, up=True):
"""Update DNAT entry if using distributed floating ips."""
if not ovn_conf.is_ovn_distributed_floating_ip():
@ -825,11 +808,6 @@ class OVNMechanismDriver(api.MechanismDriver):
self._update_dnat_entry_if_needed(port_id)
self._wait_for_metadata_provisioned_if_needed(port_id)
# If this port is a subport, we need to update the host_id and set it
# to its parent's. Otherwise, Neutron won't even try to bind it and
# it will not transition from DOWN to ACTIVE.
self._update_subport_host_if_needed(port_id)
provisioning_blocks.provisioning_complete(
n_context.get_admin_context(),
port_id,

View File

@ -44,20 +44,22 @@ class OVNTrunkHandler(object):
def _set_sub_ports(self, parent_port, subports):
txn = self.plugin_driver._nb_ovn.transaction
context = n_context.get_admin_context()
with context.session.begin(subtransactions=True), (
txn(check_error=True)) as ovn_txn:
for port in subports:
for port in subports:
with context.session.begin(subtransactions=True), (
txn(check_error=True)) as ovn_txn:
self._set_binding_profile(context, port, parent_port, ovn_txn)
def _unset_sub_ports(self, subports):
txn = self.plugin_driver._nb_ovn.transaction
context = n_context.get_admin_context()
with context.session.begin(subtransactions=True), (
txn(check_error=True)) as ovn_txn:
for port in subports:
for port in subports:
with context.session.begin(subtransactions=True), (
txn(check_error=True)) as ovn_txn:
self._unset_binding_profile(context, port, ovn_txn)
def _set_binding_profile(self, context, subport, parent_port, ovn_txn):
LOG.debug("Setting parent %s for subport %s",
parent_port, subport.port_id)
db_port = port_obj.Port.get_object(context, id=subport.port_id)
if not db_port:
LOG.debug("Port not found while trying to set "
@ -65,15 +67,20 @@ class OVNTrunkHandler(object):
subport.port_id)
return
try:
# NOTE(flaviof): We expect binding's host to be set. Otherwise,
# sub-port will not transition from DOWN to ACTIVE.
db_port.device_owner = trunk_consts.TRUNK_SUBPORT_OWNER
for binding in db_port.bindings:
binding.profile.update({
'parent_name': parent_port,
'tag': subport.segmentation_id})
binding.profile['parent_name'] = parent_port
binding.profile['tag'] = subport.segmentation_id
# host + port_id is primary key
port_obj.PortBinding.update_object(
context, {'profile': binding.profile},
context,
{'profile': binding.profile,
'vif_type': portbindings.VIF_TYPE_OVS},
port_id=subport.port_id,
host=binding.host)
db_port.update()
except n_exc.ObjectNotFound:
LOG.debug("Port not found while trying to set "
"binding_profile: %s", subport.port_id)
@ -82,8 +89,11 @@ class OVNTrunkHandler(object):
lport_name=subport.port_id,
parent_name=parent_port,
tag=subport.segmentation_id))
LOG.debug("Done setting parent %s for subport %s",
parent_port, subport.port_id)
def _unset_binding_profile(self, context, subport, ovn_txn):
LOG.debug("Unsetting parent for subport %s", subport.port_id)
db_port = port_obj.Port.get_object(context, id=subport.port_id)
if not db_port:
LOG.debug("Port not found while trying to unset "
@ -91,6 +101,7 @@ class OVNTrunkHandler(object):
subport.port_id)
return
try:
db_port.device_owner = ''
for binding in db_port.bindings:
binding.profile.pop('tag', None)
binding.profile.pop('parent_name', None)
@ -98,13 +109,12 @@ class OVNTrunkHandler(object):
port_obj.PortBinding.update_object(
context,
{'profile': binding.profile,
'vif_type': portbindings.VIF_TYPE_UNBOUND,
'vif_details': '',
'host': ''},
'vif_type': portbindings.VIF_TYPE_UNBOUND},
port_id=subport.port_id,
host=binding.host)
port_obj.PortBindingLevel.delete_objects(
context, port_id=subport.port_id, host=binding.host)
db_port.update()
except n_exc.ObjectNotFound:
LOG.debug("Port not found while trying to unset "
"binding_profile: %s", subport.port_id)
@ -114,6 +124,7 @@ class OVNTrunkHandler(object):
parent_name=[],
up=False,
tag=[]))
LOG.debug("Done unsetting parent for subport %s", subport.port_id)
def trunk_created(self, trunk):
if trunk.sub_ports:

View File

@ -27,7 +27,6 @@ from neutron_lib import constants as const
from neutron_lib import context
from neutron_lib import exceptions as n_exc
from neutron_lib.plugins import directory
from neutron_lib.services.trunk import constants as trunk_consts
from neutron_lib.tests import tools
from neutron_lib.utils import net as n_net
from oslo_config import cfg
@ -784,9 +783,6 @@ class TestOVNMechanismDriver(test_plugin.Ml2PluginV2TestCase):
device_owner=port_device_owner) as port1, \
mock.patch.object(provisioning_blocks,
'provisioning_complete') as pc, \
mock.patch.object(
self.mech_driver,
'_update_subport_host_if_needed') as upd_subport, \
mock.patch.object(self.mech_driver,
'_update_dnat_entry_if_needed') as ude, \
mock.patch.object(
@ -801,7 +797,6 @@ class TestOVNMechanismDriver(test_plugin.Ml2PluginV2TestCase):
resources.PORT,
provisioning_blocks.L2_AGENT_ENTITY
)
upd_subport.assert_called_once_with(port1['port']['id'])
ude.assert_called_once_with(port1['port']['id'])
wmp.assert_called_once_with(port1['port']['id'])
@ -894,22 +889,6 @@ class TestOVNMechanismDriver(test_plugin.Ml2PluginV2TestCase):
)
ude.assert_called_once_with(port1['port']['id'], False)
def test__update_subport_host_if_needed(self):
"""Check that a subport is updated with parent's host_id."""
binding_host_id = {'binding:host_id': 'hostname',
'device_owner': trunk_consts.TRUNK_SUBPORT_OWNER}
with mock.patch.object(self.mech_driver._ovn_client, 'get_parent_port',
return_value='parent'), \
mock.patch.object(self.mech_driver._plugin, 'get_port',
return_value=binding_host_id) as get_port, \
mock.patch.object(self.mech_driver._plugin,
'update_port') as upd:
self.mech_driver._update_subport_host_if_needed('subport')
get_port.assert_called_once_with(mock.ANY, 'parent')
upd.assert_called_once_with(mock.ANY, 'subport',
{'port': binding_host_id})
def _test__wait_for_metadata_provisioned_if_needed(self, enable_dhcp,
wait_expected):
with self.network(tenant_id='test') as net1, \

View File

@ -77,6 +77,8 @@ class TestTrunkHandler(base.BaseTestCase):
self.mock_get_port.side_effect = lambda ctxt, id: (
self.sub_port_1_obj if id == 'sub_port_1' else (
self.sub_port_2_obj if id == 'sub_port_2' else None))
self.mock_port_update = mock.patch(
"neutron.objects.ports.Port.update").start()
self.mock_update_pb = mock.patch(
"neutron.objects.ports.PortBinding.update_object").start()
self.mock_clear_levels = mock.patch(
@ -106,10 +108,14 @@ class TestTrunkHandler(base.BaseTestCase):
self.trunk_1.sub_ports = [self.sub_port_1, self.sub_port_2]
self.handler.trunk_created(self.trunk_1)
calls = [mock.call(), mock.call()]
self._assert_calls(self.mock_port_update, calls)
calls = [
mock.call(mock.ANY,
{'profile': {'parent_name': trunk.port_id,
'tag': s_port.segmentation_id}},
'tag': s_port.segmentation_id},
'vif_type': portbindings.VIF_TYPE_OVS},
host=mock.ANY,
port_id=s_port.port_id)
for trunk, s_port in [(self.trunk_1, self.sub_port_1),
@ -136,8 +142,10 @@ class TestTrunkHandler(base.BaseTestCase):
self.handler.trunk_created(self.trunk_1)
self.mock_update_pb.assert_called_once_with(
mock.ANY, {'profile': {'parent_name': self.sub_port_1.trunk_id,
'tag': self.sub_port_1.segmentation_id}},
'tag': self.sub_port_1.segmentation_id},
'vif_type': portbindings.VIF_TYPE_OVS},
host='foo.com', port_id=self.sub_port_1.port_id)
self.mock_port_update.assert_not_called()
self.plugin_driver._nb_ovn.set_lswitch_port.assert_not_called()
def test_delete_trunk(self):
@ -158,13 +166,14 @@ class TestTrunkHandler(base.BaseTestCase):
'foo_field': self.sub_port_2.trunk_id})
self.handler.trunk_deleted(self.trunk_1)
calls = [mock.call(), mock.call()]
self._assert_calls(self.mock_port_update, calls)
calls = [
mock.call(
mock.ANY,
{'profile': {'foo_field': s_port.trunk_id},
'host': '',
'vif_type': portbindings.VIF_TYPE_UNBOUND,
'vif_details': ''},
'vif_type': portbindings.VIF_TYPE_UNBOUND},
host='foo.com',
port_id=s_port.port_id)
for trunk, s_port in [(self.trunk_1, self.sub_port_1),
@ -195,9 +204,7 @@ class TestTrunkHandler(base.BaseTestCase):
calls = [
mock.call(mock.ANY,
{'profile': {'foo_field': s_port.trunk_id},
'host': '',
'vif_type': portbindings.VIF_TYPE_UNBOUND,
'vif_details': ''},
'vif_type': portbindings.VIF_TYPE_UNBOUND},
host='foo.com',
port_id=s_port.port_id)
for trunk, s_port in [(self.trunk_1, self.sub_port_1)]]
@ -230,9 +237,9 @@ class TestTrunkHandler(base.BaseTestCase):
self.handler.trunk_deleted(self.trunk_1)
self.mock_update_pb.assert_called_once_with(
mock.ANY, {'profile': {},
'vif_type': portbindings.VIF_TYPE_UNBOUND,
'host': '', 'vif_details': ''},
'vif_type': portbindings.VIF_TYPE_UNBOUND},
host='foo.com', port_id=self.sub_port_1.port_id)
self.mock_port_update.assert_not_called()
self.plugin_driver._nb_ovn.set_lswitch_port.assert_not_called()
self.mock_clear_levels.assert_not_called()