Revert "[OVN] Prevent Trunk creation/deletion with parent port bound"

There are three reasons to revert this patch.

1. It broke RPC push API for trunks because it added port db model to
   event payload that is not serializeable.

2. It also broke the callback event payload interface, which requires
   that all entries in .states attribute belong to the same core object.

To quote from neutron-lib,

```
 # an iterable of states for the resource from the newest to the oldest
 # for example db states or api request/response
 # the actual object type for states will vary depending on event caller
 self.states = ...
```

3. There is no good justification why ml2/ovn would not allow this
   operation. The rationale for the original patch was to align the
   behavior with ml2/ovs, but we don't such parity requirements. The 409
   error that can be returned by the API endpoints is backend specific.

To quote api-ref,

```
409 The operation returns this error code for one of these reasons:
    A system configuration prevents the operation from succeeding.
```

AFAIU there is nothing that prevents ml2/ovn to create a trunk in this
situation.

This will have to be backported in all supported branches (the original
patch was backported down to Wallaby).

Conflicts:
	neutron/services/trunk/drivers/ovn/trunk_driver.py

This reverts commit 833a6d82cd.

Closes-Bug: #2065707
Related-Bug: #2022059
Change-Id: I067c2f7286b2684b67b4389ca085d06a93f856ce
(cherry picked from commit ac15191f88)
This commit is contained in:
Ihar Hrachyshka 2024-05-16 10:42:50 -04:00
parent 0d8cc09c4a
commit 966fa566e5
9 changed files with 18 additions and 93 deletions

View File

@ -37,12 +37,9 @@ import eventlet
from eventlet.green import subprocess from eventlet.green import subprocess
import netaddr import netaddr
from neutron_lib.api.definitions import availability_zone as az_def from neutron_lib.api.definitions import availability_zone as az_def
from neutron_lib.api.definitions import portbindings
from neutron_lib.api.definitions import portbindings_extended
from neutron_lib import constants as n_const from neutron_lib import constants as n_const
from neutron_lib import context as n_context from neutron_lib import context as n_context
from neutron_lib.db import api as db_api from neutron_lib.db import api as db_api
from neutron_lib.plugins import utils as plugin_utils
from neutron_lib.services.qos import constants as qos_consts from neutron_lib.services.qos import constants as qos_consts
from neutron_lib.services.trunk import constants as trunk_constants from neutron_lib.services.trunk import constants as trunk_constants
from neutron_lib.utils import helpers from neutron_lib.utils import helpers
@ -1106,16 +1103,3 @@ def parse_permitted_ethertypes(permitted_ethertypes):
continue continue
return ret return ret
# TODO(slaweq): this should be moved to neutron_lib.plugins.utils module
def is_port_bound(port, log_message=True):
active_binding = plugin_utils.get_port_binding_by_status_and_host(
port.get('port_bindings', []), n_const.ACTIVE)
if not active_binding:
if log_message:
LOG.warning('Binding for port %s was not found.', port)
return False
return active_binding[portbindings_extended.VIF_TYPE] not in (
portbindings.VIF_TYPE_UNBOUND,
portbindings.VIF_TYPE_BINDING_FAILED)

View File

@ -17,6 +17,7 @@ import netaddr
from neutron_lib.api.definitions import external_net as extnet_apidef from neutron_lib.api.definitions import external_net as extnet_apidef
from neutron_lib.api.definitions import l3 as l3_apidef from neutron_lib.api.definitions import l3 as l3_apidef
from neutron_lib.api.definitions import portbindings from neutron_lib.api.definitions import portbindings
from neutron_lib.api.definitions import portbindings_extended
from neutron_lib.api.definitions import router_admin_state_down_before_update from neutron_lib.api.definitions import router_admin_state_down_before_update
from neutron_lib.api import validators from neutron_lib.api import validators
from neutron_lib.callbacks import events from neutron_lib.callbacks import events
@ -70,6 +71,18 @@ def is_admin_state_down_necessary():
return _IS_ADMIN_STATE_DOWN_NECESSARY return _IS_ADMIN_STATE_DOWN_NECESSARY
# TODO(slaweq): this should be moved to neutron_lib.plugins.utils module
def is_port_bound(port):
active_binding = plugin_utils.get_port_binding_by_status_and_host(
port.get("port_bindings", []), const.ACTIVE)
if not active_binding:
LOG.warning("Binding for port %s was not found.", port)
return False
return active_binding[portbindings_extended.VIF_TYPE] not in [
portbindings.VIF_TYPE_UNBOUND,
portbindings.VIF_TYPE_BINDING_FAILED]
@registry.has_registry_receivers @registry.has_registry_receivers
class DVRResourceOperationHandler(object): class DVRResourceOperationHandler(object):
"""Contains callbacks for DVR operations. """Contains callbacks for DVR operations.
@ -1422,7 +1435,7 @@ class L3_NAT_with_dvr_db_mixin(_DVRAgentInterfaceMixin,
def get_ports_under_dvr_connected_subnet(self, context, subnet_id): def get_ports_under_dvr_connected_subnet(self, context, subnet_id):
ports = dvr_mac_db.get_ports_query_by_subnet_and_ip(context, subnet_id) ports = dvr_mac_db.get_ports_query_by_subnet_and_ip(context, subnet_id)
ports = [p for p in ports if n_utils.is_port_bound(p)] ports = [p for p in ports if is_port_bound(p)]
# TODO(slaweq): if there would be way to pass to neutron-lib only # TODO(slaweq): if there would be way to pass to neutron-lib only
# list of extensions which actually should be processed, than setting # list of extensions which actually should be processed, than setting
# process_extensions=True below could avoid that second loop and # process_extensions=True below could avoid that second loop and

View File

@ -22,12 +22,10 @@ from oslo_config import cfg
from oslo_log import log from oslo_log import log
from neutron.common.ovn import constants as ovn_const from neutron.common.ovn import constants as ovn_const
from neutron.common import utils as n_utils
from neutron.db import db_base_plugin_common from neutron.db import db_base_plugin_common
from neutron.db import ovn_revision_numbers_db as db_rev from neutron.db import ovn_revision_numbers_db as db_rev
from neutron.objects import ports as port_obj from neutron.objects import ports as port_obj
from neutron.services.trunk.drivers import base as trunk_base from neutron.services.trunk.drivers import base as trunk_base
from neutron.services.trunk import exceptions as trunk_exc
SUPPORTED_INTERFACES = ( SUPPORTED_INTERFACES = (
@ -157,10 +155,6 @@ class OVNTrunkHandler(object):
LOG.debug("Done unsetting parent for subport %s", subport.port_id) LOG.debug("Done unsetting parent for subport %s", subport.port_id)
return db_port return db_port
@staticmethod
def _is_port_bound(port):
return n_utils.is_port_bound(port, log_message=False)
def trunk_created(self, resource, event, trunk_plugin, payload): def trunk_created(self, resource, event, trunk_plugin, payload):
trunk = payload.states[0] trunk = payload.states[0]
# Check if parent port is handled by OVN. # Check if parent port is handled by OVN.
@ -176,18 +170,6 @@ class OVNTrunkHandler(object):
if trunk.sub_ports: if trunk.sub_ports:
self._unset_sub_ports(trunk.sub_ports) self._unset_sub_ports(trunk.sub_ports)
def trunk_created_precommit(self, resource, event, trunk_plugin, payload):
# payload.desired_state below is the trunk object
parent_port = payload.desired_state.db_obj.port
if self._is_port_bound(parent_port):
raise trunk_exc.ParentPortInUse(port_id=parent_port.id)
def trunk_deleted_precommit(self, resource, event, trunk_plugin, payload):
trunk = payload.states[0]
parent_port = payload.states[1]
if self._is_port_bound(parent_port):
raise trunk_exc.TrunkInUse(trunk_id=trunk.id)
def subports_added(self, resource, event, trunk_plugin, payload): def subports_added(self, resource, event, trunk_plugin, payload):
trunk = payload.states[0] trunk = payload.states[0]
subports = payload.metadata['subports'] subports = payload.metadata['subports']
@ -226,14 +208,6 @@ class OVNTrunkDriver(trunk_base.DriverBase):
resource, event, trigger, payload=payload) resource, event, trigger, payload=payload)
self._handler = OVNTrunkHandler(self.plugin_driver) self._handler = OVNTrunkHandler(self.plugin_driver)
registry.subscribe(
self._handler.trunk_created_precommit,
resources.TRUNK,
events.PRECOMMIT_CREATE)
registry.subscribe(
self._handler.trunk_deleted_precommit,
resources.TRUNK,
events.PRECOMMIT_DELETE)
registry.subscribe( registry.subscribe(
self._handler.trunk_created, resources.TRUNK, events.AFTER_CREATE) self._handler.trunk_created, resources.TRUNK, events.AFTER_CREATE)
registry.subscribe( registry.subscribe(

View File

@ -294,7 +294,6 @@ class TrunkPlugin(service_base.ServicePluginBase):
trunk = self._get_trunk(context, trunk_id) trunk = self._get_trunk(context, trunk_id)
rules.trunk_can_be_managed(context, trunk) rules.trunk_can_be_managed(context, trunk)
trunk_port_validator = rules.TrunkPortValidator(trunk.port_id) trunk_port_validator = rules.TrunkPortValidator(trunk.port_id)
parent_port = trunk.db_obj.port
if trunk_port_validator.can_be_trunked_or_untrunked(context): if trunk_port_validator.can_be_trunked_or_untrunked(context):
# NOTE(status_police): when a trunk is deleted, the logical # NOTE(status_police): when a trunk is deleted, the logical
# object disappears from the datastore, therefore there is no # object disappears from the datastore, therefore there is no
@ -308,7 +307,7 @@ class TrunkPlugin(service_base.ServicePluginBase):
'deleting trunk port %s: %s', trunk_id, 'deleting trunk port %s: %s', trunk_id,
str(e)) str(e))
payload = events.DBEventPayload(context, resource_id=trunk_id, payload = events.DBEventPayload(context, resource_id=trunk_id,
states=(trunk, parent_port)) states=(trunk,))
registry.publish(resources.TRUNK, events.PRECOMMIT_DELETE, registry.publish(resources.TRUNK, events.PRECOMMIT_DELETE,
self, payload=payload) self, payload=payload)
else: else:
@ -318,7 +317,7 @@ class TrunkPlugin(service_base.ServicePluginBase):
registry.publish(resources.TRUNK, events.AFTER_DELETE, self, registry.publish(resources.TRUNK, events.AFTER_DELETE, self,
payload=events.DBEventPayload( payload=events.DBEventPayload(
context, resource_id=trunk_id, context, resource_id=trunk_id,
states=(trunk, parent_port))) states=(trunk,)))
@db_base_plugin_common.convert_result_to_dict @db_base_plugin_common.convert_result_to_dict
def add_subports(self, context, trunk_id, subports): def add_subports(self, context, trunk_id, subports):

View File

@ -14,8 +14,6 @@
import contextlib import contextlib
from neutron_lib.api.definitions import portbindings
from neutron_lib.callbacks import exceptions as n_exc
from neutron_lib import constants as n_consts from neutron_lib import constants as n_consts
from neutron_lib.objects import registry as obj_reg from neutron_lib.objects import registry as obj_reg
from neutron_lib.plugins import utils from neutron_lib.plugins import utils
@ -23,7 +21,6 @@ from neutron_lib.services.trunk import constants as trunk_consts
from oslo_utils import uuidutils from oslo_utils import uuidutils
from neutron.common.ovn import constants as ovn_const from neutron.common.ovn import constants as ovn_const
from neutron.objects import ports as port_obj
from neutron.services.trunk import plugin as trunk_plugin from neutron.services.trunk import plugin as trunk_plugin
from neutron.tests.functional import base from neutron.tests.functional import base
@ -108,25 +105,6 @@ class TestOVNTrunkDriver(base.TestOVNFunctionalBase):
with self.trunk([subport]) as trunk: with self.trunk([subport]) as trunk:
self._verify_trunk_info(trunk, has_items=True) self._verify_trunk_info(trunk, has_items=True)
def test_trunk_create_parent_port_bound(self):
with self.network() as network:
with self.subnet(network=network) as subnet:
with self.port(subnet=subnet) as parent_port:
pb = port_obj.PortBinding.get_objects(
self.context, port_id=parent_port['port']['id'])
port_obj.PortBinding.update_object(
self.context, {'vif_type': portbindings.VIF_TYPE_OVS},
port_id=pb[0].port_id, host=pb[0].host)
tenant_id = uuidutils.generate_uuid()
trunk = {'trunk': {
'port_id': parent_port['port']['id'],
'tenant_id': tenant_id, 'project_id': tenant_id,
'admin_state_up': True,
'name': 'trunk', 'sub_ports': []}}
self.assertRaises(n_exc.CallbackFailure,
self.trunk_plugin.create_trunk,
self.context, trunk)
def test_subport_add(self): def test_subport_add(self):
with self.subport() as subport: with self.subport() as subport:
with self.trunk() as trunk: with self.trunk() as trunk:
@ -149,14 +127,3 @@ class TestOVNTrunkDriver(base.TestOVNFunctionalBase):
with self.trunk() as trunk: with self.trunk() as trunk:
self.trunk_plugin.delete_trunk(self.context, trunk['id']) self.trunk_plugin.delete_trunk(self.context, trunk['id'])
self._verify_trunk_info({}, has_items=False) self._verify_trunk_info({}, has_items=False)
def test_trunk_delete_parent_port_bound(self):
with self.trunk() as trunk:
bp = port_obj.PortBinding.get_objects(
self.context, port_id=trunk['port_id'])
port_obj.PortBinding.update_object(
self.context, {'vif_type': portbindings.VIF_TYPE_OVS},
port_id=bp[0].port_id, host=bp[0].host)
self.assertRaises(n_exc.CallbackFailure,
self.trunk_plugin.delete_trunk,
self.context, trunk['id'])

View File

@ -30,7 +30,6 @@ from neutron_lib.plugins import directory
from neutron_lib.plugins import utils as plugin_utils from neutron_lib.plugins import utils as plugin_utils
from oslo_utils import uuidutils from oslo_utils import uuidutils
from neutron.common import utils as n_utils
from neutron.db import agents_db from neutron.db import agents_db
from neutron.db import l3_dvr_db from neutron.db import l3_dvr_db
from neutron.db import l3_dvrscheduler_db from neutron.db import l3_dvrscheduler_db
@ -1511,7 +1510,7 @@ class L3DvrTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
self.assertTrue( self.assertTrue(
self.mixin.is_router_distributed(self.ctx, router_id)) self.mixin.is_router_distributed(self.ctx, router_id))
@mock.patch.object(n_utils, 'is_port_bound') @mock.patch.object(l3_dvr_db, "is_port_bound")
def test_get_ports_under_dvr_connected_subnet(self, is_port_bound_mock): def test_get_ports_under_dvr_connected_subnet(self, is_port_bound_mock):
router_dict = {'name': 'test_router', 'admin_state_up': True, router_dict = {'name': 'test_router', 'admin_state_up': True,
'distributed': True} 'distributed': True}

View File

@ -454,10 +454,6 @@ class TestTrunkDriver(base.BaseTestCase):
with mock.patch.object(registry, 'subscribe') as mock_subscribe: with mock.patch.object(registry, 'subscribe') as mock_subscribe:
driver.register(mock.ANY, mock.ANY, mock.Mock()) driver.register(mock.ANY, mock.ANY, mock.Mock())
calls = [ calls = [
mock.call.mock_subscribe(
mock.ANY, resources.TRUNK, events.PRECOMMIT_CREATE),
mock.call.mock_subscribe(
mock.ANY, resources.TRUNK, events.PRECOMMIT_DELETE),
mock.call.mock_subscribe( mock.call.mock_subscribe(
mock.ANY, resources.TRUNK, events.AFTER_CREATE), mock.ANY, resources.TRUNK, events.AFTER_CREATE),
mock.call.mock_subscribe( mock.call.mock_subscribe(

View File

@ -162,8 +162,7 @@ class TrunkPluginTestCase(test_plugin.Ml2PluginV2TestCase):
resources.TRUNK, event, self.trunk_plugin, payload=mock.ANY) resources.TRUNK, event, self.trunk_plugin, payload=mock.ANY)
payload = callback.mock_calls[0][2]['payload'] payload = callback.mock_calls[0][2]['payload']
self.assertEqual(self.context, payload.context) self.assertEqual(self.context, payload.context)
self.assertEqual(trunk_obj, payload.states[0]) self.assertEqual(trunk_obj, payload.latest_state)
self.assertEqual(parent_port['port']['id'], payload.states[1].id)
self.assertEqual(trunk['id'], payload.resource_id) self.assertEqual(trunk['id'], payload.resource_id)
def test_delete_trunk_notify_after_delete(self): def test_delete_trunk_notify_after_delete(self):

View File

@ -1,6 +0,0 @@
---
fixes:
- |
Now the ML2/OVN trunk driver prevents a trunk creation if the parent port
is already bound. In the same way, if a parent port being used in a trunk
is bound, the trunk cannot be deleted.