use payloads for PORT AFTER_UPDATE events
This patch switches over to callback payloads for PORT AFTER_UPDATE events. Change-Id: I5c00eae155afa6c0fc8e3956bc39edbeca3ea1e7
This commit is contained in:
parent
9b24a62c6d
commit
129b823a8b
@ -120,9 +120,12 @@ class DhcpAgentNotifyAPI(object):
|
||||
registry.subscribe(
|
||||
self._native_event_send_dhcp_notification_payload,
|
||||
resource, events.AFTER_CREATE)
|
||||
registry.subscribe(
|
||||
self._native_event_send_dhcp_notification_payload,
|
||||
resource, events.AFTER_UPDATE)
|
||||
else:
|
||||
registry.subscribe(callback, resource, events.AFTER_CREATE)
|
||||
registry.subscribe(callback, resource, events.AFTER_UPDATE)
|
||||
registry.subscribe(callback, resource, events.AFTER_UPDATE)
|
||||
registry.subscribe(callback, resource, events.AFTER_DELETE)
|
||||
|
||||
@property
|
||||
|
@ -1952,18 +1952,20 @@ class L3RpcNotifierMixin(object):
|
||||
|
||||
@staticmethod
|
||||
@registry.receives(resources.PORT, [events.AFTER_UPDATE])
|
||||
def _notify_gateway_port_ip_changed(resource, event, trigger, **kwargs):
|
||||
def _notify_gateway_port_ip_changed(resource, event, trigger,
|
||||
payload):
|
||||
l3plugin = directory.get_plugin(plugin_constants.L3)
|
||||
if not l3plugin:
|
||||
return
|
||||
new_port = kwargs.get('port')
|
||||
original_port = kwargs.get('original_port')
|
||||
context = payload.context
|
||||
new_port = payload.latest_state
|
||||
original_port = payload.states[0]
|
||||
|
||||
if original_port['device_owner'] != constants.DEVICE_OWNER_ROUTER_GW:
|
||||
return
|
||||
|
||||
if utils.port_ip_changed(new_port, original_port):
|
||||
l3plugin.notify_router_updated(kwargs['context'],
|
||||
l3plugin.notify_router_updated(context,
|
||||
new_port['device_id'])
|
||||
|
||||
@staticmethod
|
||||
|
@ -566,9 +566,9 @@ def _notify_port_delete(event, resource, trigger, **kwargs):
|
||||
context, info['router_id'], info['host'])
|
||||
|
||||
|
||||
def _notify_l3_agent_port_update(resource, event, trigger, **kwargs):
|
||||
new_port = kwargs.get('port')
|
||||
original_port = kwargs.get('original_port')
|
||||
def _notify_l3_agent_port_update(resource, event, trigger, payload):
|
||||
new_port = payload.latest_state
|
||||
original_port = payload.states[0]
|
||||
|
||||
is_fixed_ips_changed = n_utils.port_ip_changed(new_port, original_port)
|
||||
|
||||
@ -581,7 +581,7 @@ def _notify_l3_agent_port_update(resource, event, trigger, **kwargs):
|
||||
|
||||
if new_port and original_port:
|
||||
l3plugin = directory.get_plugin(plugin_constants.L3)
|
||||
context = kwargs['context']
|
||||
context = payload.context
|
||||
new_port_host = new_port.get(portbindings.HOST_ID)
|
||||
original_port_host = original_port.get(portbindings.HOST_ID)
|
||||
is_new_port_binding_changed = (
|
||||
@ -670,7 +670,7 @@ def _notify_l3_agent_port_update(resource, event, trigger, **kwargs):
|
||||
l3plugin, context, original_port, address_pair)
|
||||
return
|
||||
|
||||
if kwargs.get('mac_address_updated') or is_fixed_ips_changed:
|
||||
if payload.metadata.get('mac_address_updated') or is_fixed_ips_changed:
|
||||
l3plugin.update_arp_entry_for_dvr_service_port(
|
||||
context, new_port)
|
||||
|
||||
|
@ -55,10 +55,10 @@ class L3_HA_scheduler_db_mixin(l3_sch_db.AZL3AgentSchedulerDbMixin):
|
||||
return self._get_agents_dict_for_router(agents)
|
||||
|
||||
|
||||
def _notify_l3_agent_ha_port_update(resource, event, trigger, **kwargs):
|
||||
new_port = kwargs.get('port')
|
||||
original_port = kwargs.get('original_port')
|
||||
context = kwargs.get('context')
|
||||
def _notify_l3_agent_ha_port_update(resource, event, trigger, payload):
|
||||
new_port = payload.latest_state
|
||||
original_port = payload.states[0]
|
||||
context = payload.context
|
||||
host = new_port.get(portbindings.HOST_ID)
|
||||
|
||||
if new_port and original_port and host:
|
||||
|
@ -41,26 +41,28 @@ DHCP_RULE_PORT = {4: (67, 68, const.IPv4), 6: (547, 546, const.IPv6)}
|
||||
class SecurityGroupServerNotifierRpcMixin(sg_db.SecurityGroupDbMixin):
|
||||
"""Mixin class to add agent-based security group implementation."""
|
||||
|
||||
@registry.receives(resources.PORT, [events.AFTER_CREATE])
|
||||
def _notify_sg_on_port_after_update(
|
||||
self, resource, event, trigger, payload=None):
|
||||
@registry.receives(resources.PORT, [events.AFTER_CREATE,
|
||||
events.AFTER_UPDATE])
|
||||
def _notify_sg_on_port_after_create_and_update(
|
||||
self, resource, event, trigger, payload):
|
||||
# TODO(boden): refact back into single method when all callbacks are
|
||||
# moved to payload style events
|
||||
self.notify_security_groups_member_updated(
|
||||
payload.context, payload.latest_state)
|
||||
|
||||
@registry.receives(resources.PORT, [events.AFTER_UPDATE,
|
||||
events.AFTER_DELETE])
|
||||
def notify_sg_on_port_change(self, resource, event, trigger, context,
|
||||
port, *args, **kwargs):
|
||||
"""Trigger notification to other SG members on port changes."""
|
||||
context = payload.context
|
||||
port = payload.latest_state
|
||||
if event == events.AFTER_UPDATE:
|
||||
original_port = kwargs.get('original_port')
|
||||
original_port = payload.states[0]
|
||||
self.check_and_notify_security_group_member_changed(
|
||||
context, original_port, port)
|
||||
else:
|
||||
self.notify_security_groups_member_updated(context, port)
|
||||
|
||||
@registry.receives(resources.PORT, [events.AFTER_DELETE])
|
||||
def notify_sg_on_port_change(self, resource, event, trigger, context,
|
||||
port, *args, **kwargs):
|
||||
"""Trigger notification to other SG members on port changes."""
|
||||
|
||||
self.notify_security_groups_member_updated(context, port)
|
||||
|
||||
def create_security_group_rule(self, context, security_group_rule):
|
||||
rule = super(SecurityGroupServerNotifierRpcMixin,
|
||||
self).create_security_group_rule(context,
|
||||
|
@ -82,8 +82,9 @@ class Notifier(object):
|
||||
|
||||
@registry.receives(resources.PORT, [events.AFTER_UPDATE])
|
||||
def process_port_update_event(self, resource, event, trigger,
|
||||
original_port=None, port=None,
|
||||
**kwargs):
|
||||
payload):
|
||||
original_port = payload.states[0]
|
||||
port = payload.latest_state
|
||||
# We only want to notify about baremetal ports.
|
||||
if not (port[portbindings_def.VNIC_TYPE] ==
|
||||
portbindings_def.VNIC_BAREMETAL):
|
||||
|
@ -509,13 +509,13 @@ def _remove_data_from_external_dns_service(context, dns_driver, dns_domain,
|
||||
"ips": ', '.join(records)})
|
||||
|
||||
|
||||
def _update_port_in_external_dns_service(resource, event, trigger, **kwargs):
|
||||
def _update_port_in_external_dns_service(resource, event, trigger, payload):
|
||||
dns_driver = _get_dns_driver()
|
||||
if not dns_driver:
|
||||
return
|
||||
context = kwargs['context']
|
||||
updated_port = kwargs['port']
|
||||
original_port = kwargs.get('original_port')
|
||||
context = payload.context
|
||||
updated_port = payload.latest_state
|
||||
original_port = payload.states[0]
|
||||
if not original_port:
|
||||
return
|
||||
original_ips = _filter_by_subnet(context, original_port['fixed_ips'])
|
||||
|
@ -59,7 +59,8 @@ class _ObjectChangeHandler(object):
|
||||
# TODO(boden): remove shim below once all events use payloads
|
||||
if resource in self._PAYLOAD_RESOURCES:
|
||||
handler = self.handle_payload_event
|
||||
if resource == resources.PORT and event == events.AFTER_CREATE:
|
||||
if resource == resources.PORT and event in (events.AFTER_CREATE,
|
||||
events.AFTER_UPDATE):
|
||||
handler = self.handle_payload_event
|
||||
registry.subscribe(handler, resource, event)
|
||||
self._stop = threading.Event()
|
||||
|
@ -768,14 +768,14 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
|
||||
# just finished, whether that transaction committed new
|
||||
# results or discovered concurrent port state changes.
|
||||
# Also, Trigger notification for successful binding commit.
|
||||
kwargs = {
|
||||
'context': plugin_context,
|
||||
'port': self._make_port_dict(port_db), # ensure latest state
|
||||
'mac_address_updated': False,
|
||||
'original_port': oport,
|
||||
}
|
||||
registry.notify(resources.PORT, events.AFTER_UPDATE,
|
||||
self, **kwargs)
|
||||
context = plugin_context
|
||||
port = self._make_port_dict(port_db) # ensure latest state
|
||||
registry.publish(resources.PORT, events.AFTER_UPDATE, self,
|
||||
payload=events.DBEventPayload(
|
||||
context,
|
||||
resource_id=port['id'],
|
||||
metadata={'mac_address_updated': False},
|
||||
states=(oport, port,)))
|
||||
self.mechanism_manager.update_port_postcommit(cur_context)
|
||||
need_notify = True
|
||||
try_again = False
|
||||
@ -1807,13 +1807,13 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
|
||||
bound_mech_contexts.append(mech_context)
|
||||
|
||||
# Notifications must be sent after the above transaction is complete
|
||||
kwargs = {
|
||||
'context': context,
|
||||
'port': updated_port,
|
||||
'mac_address_updated': mac_address_updated,
|
||||
'original_port': original_port,
|
||||
}
|
||||
registry.notify(resources.PORT, events.AFTER_UPDATE, self, **kwargs)
|
||||
metadata = {'mac_address_updated': mac_address_updated}
|
||||
registry.publish(resources.PORT, events.AFTER_UPDATE, self,
|
||||
payload=events.DBEventPayload(
|
||||
context,
|
||||
resource_id=id,
|
||||
metadata=metadata,
|
||||
states=(original_port, updated_port,)))
|
||||
|
||||
# Note that DVR Interface ports will have bindings on
|
||||
# multiple hosts, and so will have multiple mech_contexts,
|
||||
@ -2229,10 +2229,12 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
|
||||
|
||||
if updated:
|
||||
self.mechanism_manager.update_port_postcommit(mech_context)
|
||||
kwargs = {'context': context, 'port': mech_context.current,
|
||||
'original_port': original_port}
|
||||
registry.notify(resources.PORT, events.AFTER_UPDATE, self,
|
||||
**kwargs)
|
||||
port = mech_context.current
|
||||
registry.publish(resources.PORT, events.AFTER_UPDATE, self,
|
||||
payload=events.DBEventPayload(
|
||||
context,
|
||||
resource_id=port['id'],
|
||||
states=(original_port, port,)))
|
||||
|
||||
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
|
||||
db.delete_distributed_port_binding_if_stale(context, binding)
|
||||
|
@ -456,19 +456,20 @@ class OVNL3RouterPlugin(service_base.ServicePluginBase,
|
||||
|
||||
@staticmethod
|
||||
@registry.receives(resources.PORT, [events.AFTER_UPDATE])
|
||||
def _port_update(resource, event, trigger, **kwargs):
|
||||
def _port_update(resource, event, trigger, payload):
|
||||
l3plugin = directory.get_plugin(plugin_constants.L3)
|
||||
if not l3plugin:
|
||||
return
|
||||
|
||||
current = kwargs['port']
|
||||
context = payload.context
|
||||
current = payload.latest_state
|
||||
|
||||
if utils.is_lsp_router_port(current):
|
||||
# We call the update_router port with if_exists, because neutron,
|
||||
# internally creates the port, and then calls update, which will
|
||||
# trigger this callback even before we had the chance to create
|
||||
# the OVN NB DB side
|
||||
l3plugin._ovn_client.update_router_port(kwargs['context'],
|
||||
l3plugin._ovn_client.update_router_port(context,
|
||||
current, if_exists=True)
|
||||
|
||||
def get_router_availability_zones(self, router):
|
||||
|
@ -178,8 +178,16 @@ class PortForwardingPlugin(fip_pf.PortForwardingPluginBase):
|
||||
if exist_pf_resources:
|
||||
raise pf_exc.FipInUseByPortForwarding(id=floatingip_id)
|
||||
|
||||
@registry.receives(resources.PORT, [events.AFTER_UPDATE,
|
||||
events.PRECOMMIT_DELETE])
|
||||
@registry.receives(resources.PORT, [events.AFTER_UPDATE])
|
||||
def _process_updated_port_request(self, resource, event, trigger,
|
||||
payload):
|
||||
# TODO(isabek): refactor back into 1 method when all code is moved
|
||||
# to event payloads
|
||||
return self._process_port_request(resource, event, trigger,
|
||||
payload.context,
|
||||
port=payload.latest_state)
|
||||
|
||||
@registry.receives(resources.PORT, [events.PRECOMMIT_DELETE])
|
||||
@db_api.retry_if_session_inactive()
|
||||
def _process_port_request(self, resource, event, trigger, context,
|
||||
**kwargs):
|
||||
|
@ -443,10 +443,10 @@ class NovaSegmentNotifier(object):
|
||||
segment_id, reserved=ipv4_subnets_number))
|
||||
|
||||
@registry.receives(resources.PORT, [events.AFTER_UPDATE])
|
||||
def _notify_port_updated(self, resource, event, trigger, context,
|
||||
**kwargs):
|
||||
port = kwargs.get('port')
|
||||
original_port = kwargs.get('original_port')
|
||||
def _notify_port_updated(self, resource, event, trigger, payload):
|
||||
context = payload.context
|
||||
port = payload.latest_state
|
||||
original_port = payload.states[0]
|
||||
does_original_port_require_nova_inventory_update = (
|
||||
self._does_port_require_nova_inventory_update(original_port))
|
||||
does_port_require_nova_inventory_update = (
|
||||
|
@ -452,15 +452,15 @@ class TrunkPlugin(service_base.ServicePluginBase):
|
||||
# AFTER_UPDATE to be problematic for setting trunk status when a
|
||||
# a parent port becomes unbound.
|
||||
@registry.receives(resources.PORT, [events.AFTER_UPDATE])
|
||||
def _trigger_trunk_status_change(self, resource, event, trigger, **kwargs):
|
||||
updated_port = kwargs['port']
|
||||
def _trigger_trunk_status_change(self, resource, event, trigger, payload):
|
||||
updated_port = payload.latest_state
|
||||
trunk_details = updated_port.get('trunk_details')
|
||||
# If no trunk_details, the port is not the parent of a trunk.
|
||||
if not trunk_details:
|
||||
return
|
||||
|
||||
context = kwargs['context']
|
||||
original_port = kwargs['original_port']
|
||||
context = payload.context
|
||||
original_port = payload.states[0]
|
||||
orig_vif_type = original_port.get(portbindings.VIF_TYPE)
|
||||
new_vif_type = updated_port.get(portbindings.VIF_TYPE)
|
||||
vif_type_changed = orig_vif_type != new_vif_type
|
||||
|
@ -279,8 +279,15 @@ class TestDhcpAgentNotifyAPI(base.BaseTestCase):
|
||||
context=mock.Mock(), **kwargs)
|
||||
# don't unsubscribe until all three types are observed
|
||||
self.assertEqual([], self.notifier._unsubscribed_resources)
|
||||
registry.notify(res, events.AFTER_UPDATE, self,
|
||||
context=mock.Mock(), **kwargs)
|
||||
if res == resources.PORT:
|
||||
registry.publish(res, events.AFTER_UPDATE, self,
|
||||
payload=events.DBEventPayload(
|
||||
mock.Mock(), states=({},)))
|
||||
|
||||
else:
|
||||
registry.notify(res, events.AFTER_UPDATE, self,
|
||||
context=mock.Mock(), **kwargs)
|
||||
|
||||
self.assertEqual([], self.notifier._unsubscribed_resources)
|
||||
registry.notify(res, events.AFTER_DELETE, self,
|
||||
context=mock.Mock(), **kwargs)
|
||||
|
@ -287,12 +287,11 @@ class TestL3NatBasePlugin(TestL3PluginBaseAttributes,
|
||||
new_port = super(TestL3NatBasePlugin, self).update_port(
|
||||
context, id, port)
|
||||
# Notifications must be sent after the above transaction is complete
|
||||
kwargs = {
|
||||
'context': context,
|
||||
'port': new_port,
|
||||
'original_port': original_port,
|
||||
}
|
||||
registry.notify(resources.PORT, events.AFTER_UPDATE, self, **kwargs)
|
||||
registry.publish(resources.PORT, events.AFTER_UPDATE, self,
|
||||
payload=events.DBEventPayload(
|
||||
context,
|
||||
resource_id=id,
|
||||
states=(original_port, new_port,)))
|
||||
return new_port
|
||||
|
||||
|
||||
|
@ -17,6 +17,7 @@ from unittest import mock
|
||||
|
||||
import eventlet
|
||||
from neutron_lib.api.definitions import portbindings as portbindings_def
|
||||
from neutron_lib.callbacks import events
|
||||
from neutron_lib import constants as n_const
|
||||
from openstack import connection
|
||||
from openstack import exceptions as os_exc
|
||||
@ -53,7 +54,8 @@ class TestIronicNotifier(base.BaseTestCase):
|
||||
original_port.update({'status': n_const.PORT_STATUS_DOWN})
|
||||
self.ironic_notifier.process_port_update_event(
|
||||
'fake_resource', 'fake_event', 'fake_trigger',
|
||||
original_port=original_port, port=port, **{})
|
||||
payload=events.DBEventPayload(
|
||||
mock.Mock(), states=(original_port, port,)))
|
||||
mock_queue_event.assert_called_with(
|
||||
self.ironic_notifier.batch_notifier,
|
||||
{'event': 'network.bind_port',
|
||||
@ -73,7 +75,8 @@ class TestIronicNotifier(base.BaseTestCase):
|
||||
original_port.update({'status': n_const.PORT_STATUS_DOWN})
|
||||
self.ironic_notifier.process_port_update_event(
|
||||
'fake_resource', 'fake_event', 'fake_trigger',
|
||||
original_port=original_port, port=port, **{})
|
||||
payload=events.DBEventPayload(
|
||||
mock.Mock(), states=(original_port, port,)))
|
||||
mock_queue_event.assert_called_with(
|
||||
self.ironic_notifier.batch_notifier,
|
||||
{'event': 'network.bind_port',
|
||||
@ -93,7 +96,8 @@ class TestIronicNotifier(base.BaseTestCase):
|
||||
original_port.update({'status': n_const.PORT_STATUS_ACTIVE})
|
||||
self.ironic_notifier.process_port_update_event(
|
||||
'fake_resource', 'fake_event', 'fake_trigger',
|
||||
original_port=original_port, port=port, **{})
|
||||
payload=events.DBEventPayload(
|
||||
mock.Mock(), states=(original_port, port,)))
|
||||
mock_queue_event.assert_called_with(
|
||||
self.ironic_notifier.batch_notifier,
|
||||
{'event': 'network.unbind_port',
|
||||
@ -113,7 +117,8 @@ class TestIronicNotifier(base.BaseTestCase):
|
||||
original_port.update({'status': n_const.PORT_STATUS_ACTIVE})
|
||||
self.ironic_notifier.process_port_update_event(
|
||||
'fake_resource', 'fake_event', 'fake_trigger',
|
||||
original_port=original_port, port=port, **{})
|
||||
payload=events.DBEventPayload(
|
||||
mock.Mock(), states=(original_port, port,)))
|
||||
mock_queue_event.assert_called_with(
|
||||
self.ironic_notifier.batch_notifier,
|
||||
{'event': 'network.unbind_port',
|
||||
@ -171,7 +176,8 @@ class TestIronicNotifier(base.BaseTestCase):
|
||||
original_port.update({'status': n_const.PORT_STATUS_DOWN})
|
||||
self.ironic_notifier.process_port_update_event(
|
||||
'fake_resource', 'fake_event', 'fake_trigger',
|
||||
original_port=original_port, port=port, **{})
|
||||
payload=events.DBEventPayload(
|
||||
mock.Mock(), states=(original_port, port,)))
|
||||
|
||||
self.assertEqual(
|
||||
2, len(self.ironic_notifier.batch_notifier._pending_events.queue))
|
||||
|
@ -1088,8 +1088,9 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase):
|
||||
|
||||
def test_port_after_update_outside_transaction(self):
|
||||
self.tx_open = True
|
||||
receive = lambda *a, **k: setattr(self, 'tx_open',
|
||||
k['context'].session.is_active)
|
||||
receive = lambda r, e, t, payload: \
|
||||
setattr(self, 'tx_open', payload.context.session.is_active)
|
||||
|
||||
with self.port() as p:
|
||||
registry.subscribe(receive, resources.PORT, events.AFTER_UPDATE)
|
||||
self._update('ports', p['port']['id'],
|
||||
@ -1470,7 +1471,9 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase):
|
||||
b_update_events = []
|
||||
a_update_events = []
|
||||
b_receiver = lambda r, e, t, payload: b_update_events.append(payload)
|
||||
a_receiver = lambda *a, **k: a_update_events.append(k['port'])
|
||||
a_receiver = lambda r, e, t, payload: \
|
||||
a_update_events.append(payload.latest_state)
|
||||
|
||||
registry.subscribe(b_receiver, resources.PORT,
|
||||
events.BEFORE_UPDATE)
|
||||
registry.subscribe(a_receiver, resources.PORT,
|
||||
@ -1743,7 +1746,9 @@ class TestMl2PortsV2WithRevisionPlugin(Ml2PluginV2TestCase):
|
||||
**host_arg) as port:
|
||||
port = plugin.get_port(ctx, port['port']['id'])
|
||||
updated_ports = []
|
||||
receiver = lambda *a, **k: updated_ports.append(k['port'])
|
||||
receiver = lambda r, e, t, payload: \
|
||||
updated_ports.append(payload.latest_state)
|
||||
|
||||
registry.subscribe(receiver, resources.PORT,
|
||||
events.AFTER_UPDATE)
|
||||
plugin.update_port_status(
|
||||
@ -1755,7 +1760,8 @@ class TestMl2PortsV2WithRevisionPlugin(Ml2PluginV2TestCase):
|
||||
def test_bind_port_bumps_revision(self):
|
||||
updated_ports = []
|
||||
created_ports = []
|
||||
ureceiver = lambda *a, **k: updated_ports.append(k['port'])
|
||||
ureceiver = lambda r, e, t, payload: \
|
||||
updated_ports.append(payload.latest_state)
|
||||
|
||||
def creceiver(r, e, t, payload=None):
|
||||
created_ports.append(payload.latest_state)
|
||||
|
@ -822,34 +822,32 @@ class L3DvrSchedulerTestCase(L3SchedulerBaseMixin,
|
||||
def test__notify_l3_agent_update_port_with_allowed_address_pairs_revert(
|
||||
self):
|
||||
port_id = uuidutils.generate_uuid()
|
||||
kwargs = {
|
||||
'context': self.adminContext,
|
||||
'port': {
|
||||
'id': port_id,
|
||||
'admin_state_up': False,
|
||||
portbindings.HOST_ID: 'vm-host',
|
||||
'device_id': 'vm-id',
|
||||
'allowed_address_pairs': [
|
||||
{'ip_address': '10.1.0.201',
|
||||
'mac_address': 'aa:bb:cc:dd:ee:ff'}],
|
||||
'device_owner': DEVICE_OWNER_COMPUTE,
|
||||
},
|
||||
'original_port': {
|
||||
'id': port_id,
|
||||
'admin_state_up': True,
|
||||
portbindings.HOST_ID: 'vm-host',
|
||||
'device_id': 'vm-id',
|
||||
'allowed_address_pairs': [
|
||||
{'ip_address': '10.1.0.201',
|
||||
'mac_address': 'aa:bb:cc:dd:ee:ff'}],
|
||||
'device_owner': DEVICE_OWNER_COMPUTE,
|
||||
},
|
||||
}
|
||||
port = kwargs.get('original_port')
|
||||
context = self.adminContext
|
||||
port = {
|
||||
'id': port_id,
|
||||
'admin_state_up': False,
|
||||
portbindings.HOST_ID: 'vm-host',
|
||||
'device_id': 'vm-id',
|
||||
'allowed_address_pairs': [
|
||||
{'ip_address': '10.1.0.201',
|
||||
'mac_address': 'aa:bb:cc:dd:ee:ff'}],
|
||||
'device_owner': DEVICE_OWNER_COMPUTE, }
|
||||
original_port = {
|
||||
'id': port_id,
|
||||
'admin_state_up': True,
|
||||
portbindings.HOST_ID: 'vm-host',
|
||||
'device_id': 'vm-id',
|
||||
'allowed_address_pairs': [
|
||||
{'ip_address': '10.1.0.201',
|
||||
'mac_address': 'aa:bb:cc:dd:ee:ff'}],
|
||||
'device_owner': DEVICE_OWNER_COMPUTE, }
|
||||
|
||||
l3plugin = mock.Mock()
|
||||
directory.add_plugin(plugin_constants.L3, l3plugin)
|
||||
l3_dvrscheduler_db._notify_l3_agent_port_update(
|
||||
'port', 'after_update', mock.ANY, **kwargs)
|
||||
resources.PORT, events.AFTER_UPDATE, mock.ANY,
|
||||
payload=events.DBEventPayload(
|
||||
context, resource_id=port_id, states=(original_port, port,)))
|
||||
l3plugin._get_allowed_address_pair_fixed_ips.return_value = (
|
||||
['10.1.0.21'])
|
||||
self.assertFalse(
|
||||
@ -857,56 +855,50 @@ class L3DvrSchedulerTestCase(L3SchedulerBaseMixin,
|
||||
l3plugin.delete_arp_entry_for_dvr_service_port.\
|
||||
assert_called_once_with(
|
||||
self.adminContext,
|
||||
port,
|
||||
original_port,
|
||||
fixed_ips_to_delete=mock.ANY)
|
||||
|
||||
def test__notify_l3_agent_update_port_with_allowed_address_pairs(self):
|
||||
port_id = uuidutils.generate_uuid()
|
||||
kwargs = {
|
||||
'context': self.adminContext,
|
||||
'port': {
|
||||
'id': port_id,
|
||||
portbindings.HOST_ID: 'vm-host',
|
||||
'allowed_address_pairs': [
|
||||
{'ip_address': '10.1.0.201',
|
||||
'mac_address': 'aa:bb:cc:dd:ee:ff'}],
|
||||
'device_id': 'vm-id',
|
||||
'device_owner': DEVICE_OWNER_COMPUTE,
|
||||
'admin_state_up': True,
|
||||
},
|
||||
'original_port': {
|
||||
'id': port_id,
|
||||
portbindings.HOST_ID: 'vm-host',
|
||||
'device_id': 'vm-id',
|
||||
'device_owner': DEVICE_OWNER_COMPUTE,
|
||||
'admin_state_up': True,
|
||||
},
|
||||
}
|
||||
context = self.adminContext
|
||||
port = {
|
||||
'id': port_id,
|
||||
portbindings.HOST_ID: 'vm-host',
|
||||
'allowed_address_pairs': [
|
||||
{'ip_address': '10.1.0.201',
|
||||
'mac_address': 'aa:bb:cc:dd:ee:ff'}],
|
||||
'device_id': 'vm-id',
|
||||
'device_owner': DEVICE_OWNER_COMPUTE,
|
||||
'admin_state_up': True, }
|
||||
original_port = {
|
||||
'id': port_id,
|
||||
portbindings.HOST_ID: 'vm-host',
|
||||
'device_id': 'vm-id',
|
||||
'device_owner': DEVICE_OWNER_COMPUTE,
|
||||
'admin_state_up': True, }
|
||||
l3plugin = mock.Mock()
|
||||
directory.add_plugin(plugin_constants.L3, l3plugin)
|
||||
l3_dvrscheduler_db._notify_l3_agent_port_update(
|
||||
'port', 'after_update', mock.ANY, **kwargs)
|
||||
resources.PORT, events.AFTER_UPDATE, mock.ANY,
|
||||
payload=events.DBEventPayload(
|
||||
context, resource_id=port_id, states=(original_port, port,)))
|
||||
self.assertTrue(
|
||||
l3plugin.update_arp_entry_for_dvr_service_port.called)
|
||||
|
||||
def test__notify_l3_agent_when_unbound_port_migrates_to_bound_host(self):
|
||||
port_id = 'fake-port'
|
||||
kwargs = {
|
||||
'context': self.adminContext,
|
||||
'original_port': {
|
||||
'id': port_id,
|
||||
portbindings.HOST_ID: '',
|
||||
'device_owner': '',
|
||||
'admin_state_up': True,
|
||||
},
|
||||
'port': {
|
||||
'id': port_id,
|
||||
portbindings.HOST_ID: 'vm-host',
|
||||
'device_owner': DEVICE_OWNER_COMPUTE,
|
||||
'mac_address': '02:04:05:17:18:19'
|
||||
},
|
||||
}
|
||||
port = kwargs.get('port')
|
||||
context = self.adminContext
|
||||
original_port = {
|
||||
'id': port_id,
|
||||
portbindings.HOST_ID: '',
|
||||
'device_owner': '',
|
||||
'admin_state_up': True}
|
||||
port = {
|
||||
'id': port_id,
|
||||
portbindings.HOST_ID: 'vm-host',
|
||||
'device_owner': DEVICE_OWNER_COMPUTE,
|
||||
'mac_address': '02:04:05:17:18:19'}
|
||||
|
||||
plugin = directory.get_plugin()
|
||||
l3plugin = mock.MagicMock()
|
||||
l3plugin.supported_extension_aliases = [
|
||||
@ -915,24 +907,22 @@ class L3DvrSchedulerTestCase(L3SchedulerBaseMixin,
|
||||
]
|
||||
directory.add_plugin(plugin_constants.L3, l3plugin)
|
||||
l3_dvrscheduler_db._notify_l3_agent_port_update(
|
||||
'port', 'after_update', plugin, **kwargs)
|
||||
resources.PORT, events.AFTER_UPDATE, plugin,
|
||||
payload=events.DBEventPayload(
|
||||
context, resource_id=port_id, states=(original_port, port,)))
|
||||
l3plugin.dvr_handle_new_service_port.assert_called_once_with(
|
||||
self.adminContext, port, unbound_migrate=True)
|
||||
|
||||
def test__notify_l3_agent_update_port_no_removing_routers(self):
|
||||
port_id = 'fake-port'
|
||||
kwargs = {
|
||||
'context': self.adminContext,
|
||||
'port': None,
|
||||
'original_port': {
|
||||
'id': port_id,
|
||||
portbindings.HOST_ID: 'vm-host',
|
||||
'device_id': 'vm-id',
|
||||
'device_owner': DEVICE_OWNER_COMPUTE,
|
||||
'mac_address': '02:04:05:17:18:19'
|
||||
},
|
||||
'mac_address_updated': True
|
||||
}
|
||||
context = self.adminContext
|
||||
port = None
|
||||
original_port = {
|
||||
'id': port_id,
|
||||
portbindings.HOST_ID: 'vm-host',
|
||||
'device_id': 'vm-id',
|
||||
'device_owner': DEVICE_OWNER_COMPUTE,
|
||||
'mac_address': '02:04:05:17:18:19'}
|
||||
|
||||
plugin = directory.get_plugin()
|
||||
l3plugin = mock.Mock()
|
||||
@ -942,7 +932,12 @@ class L3DvrSchedulerTestCase(L3SchedulerBaseMixin,
|
||||
]
|
||||
directory.add_plugin(plugin_constants.L3, l3plugin)
|
||||
l3_dvrscheduler_db._notify_l3_agent_port_update(
|
||||
'port', 'after_update', plugin, **kwargs)
|
||||
resources.PORT, events.AFTER_UPDATE, plugin,
|
||||
payload=events.DBEventPayload(
|
||||
context,
|
||||
metadata={'mac_address_updated': True},
|
||||
resource_id=port_id,
|
||||
states=(original_port, port,)))
|
||||
self.assertFalse(
|
||||
l3plugin.update_arp_entry_for_dvr_service_port.called)
|
||||
self.assertFalse(
|
||||
@ -979,48 +974,44 @@ class L3DvrSchedulerTestCase(L3SchedulerBaseMixin,
|
||||
l3plugin.dvr_handle_new_service_port.called)
|
||||
|
||||
def test__notify_l3_agent_update_port_with_migration_port_profile(self):
|
||||
kwargs = {
|
||||
'context': self.adminContext,
|
||||
'original_port': {
|
||||
portbindings.HOST_ID: 'vm-host',
|
||||
'device_owner': DEVICE_OWNER_COMPUTE,
|
||||
'id': uuidutils.generate_uuid()
|
||||
},
|
||||
'port': {
|
||||
portbindings.HOST_ID: 'vm-host',
|
||||
'device_owner': DEVICE_OWNER_COMPUTE,
|
||||
portbindings.PROFILE: {'migrating_to': 'vm-host2'},
|
||||
},
|
||||
}
|
||||
context = self.adminContext
|
||||
original_port = {
|
||||
portbindings.HOST_ID: 'vm-host',
|
||||
'device_owner': DEVICE_OWNER_COMPUTE,
|
||||
'id': uuidutils.generate_uuid()}
|
||||
port = {
|
||||
portbindings.HOST_ID: 'vm-host',
|
||||
'device_owner': DEVICE_OWNER_COMPUTE,
|
||||
portbindings.PROFILE: {'migrating_to': 'vm-host2'}}
|
||||
l3plugin = mock.MagicMock()
|
||||
directory.add_plugin(plugin_constants.L3, l3plugin)
|
||||
with mock.patch.object(l3plugin, '_get_floatingips_by_port_id',
|
||||
return_value=[]):
|
||||
l3_dvrscheduler_db._notify_l3_agent_port_update(
|
||||
'port', 'after_update', mock.ANY, **kwargs)
|
||||
resources.PORT, events.AFTER_UPDATE, mock.ANY,
|
||||
payload=events.DBEventPayload(
|
||||
context, states=(original_port, port,)))
|
||||
l3plugin.dvr_handle_new_service_port.assert_called_once_with(
|
||||
self.adminContext, kwargs.get('port'),
|
||||
context, port,
|
||||
dest_host='vm-host2', router_id=None)
|
||||
l3plugin.update_arp_entry_for_dvr_service_port.\
|
||||
assert_called_once_with(
|
||||
self.adminContext, kwargs.get('port'))
|
||||
context, port)
|
||||
|
||||
def test__notify_l3_agent_update_port_no_action(self):
|
||||
kwargs = {
|
||||
'context': self.adminContext,
|
||||
'original_port': {
|
||||
portbindings.HOST_ID: 'vm-host',
|
||||
'device_owner': DEVICE_OWNER_COMPUTE,
|
||||
},
|
||||
'port': {
|
||||
portbindings.HOST_ID: 'vm-host',
|
||||
'device_owner': DEVICE_OWNER_COMPUTE,
|
||||
},
|
||||
}
|
||||
context = self.adminContext
|
||||
original_port = {
|
||||
portbindings.HOST_ID: 'vm-host',
|
||||
'device_owner': DEVICE_OWNER_COMPUTE}
|
||||
port = {
|
||||
portbindings.HOST_ID: 'vm-host',
|
||||
'device_owner': DEVICE_OWNER_COMPUTE}
|
||||
l3plugin = mock.Mock()
|
||||
directory.add_plugin(plugin_constants.L3, l3plugin)
|
||||
l3_dvrscheduler_db._notify_l3_agent_port_update(
|
||||
'port', 'after_update', mock.ANY, **kwargs)
|
||||
resources.PORT, events.AFTER_UPDATE, mock.ANY,
|
||||
payload=events.DBEventPayload(
|
||||
context, states=(original_port, port,)))
|
||||
|
||||
self.assertFalse(
|
||||
l3plugin.update_arp_entry_for_dvr_service_port.called)
|
||||
@ -1030,75 +1021,75 @@ class L3DvrSchedulerTestCase(L3SchedulerBaseMixin,
|
||||
self.assertFalse(l3plugin.get_dvr_routers_to_remove.called)
|
||||
|
||||
def test__notify_l3_agent_update_port_with_mac_address_update(self):
|
||||
kwargs = {
|
||||
'context': self.adminContext,
|
||||
'original_port': {
|
||||
portbindings.HOST_ID: 'vm-host',
|
||||
'device_owner': DEVICE_OWNER_COMPUTE,
|
||||
'mac_address': '02:04:05:17:18:19'
|
||||
},
|
||||
'port': {
|
||||
portbindings.HOST_ID: 'vm-host',
|
||||
'device_owner': DEVICE_OWNER_COMPUTE,
|
||||
'mac_address': '02:04:05:17:18:29'
|
||||
},
|
||||
'mac_address_updated': True
|
||||
}
|
||||
context = self.adminContext
|
||||
original_port = {
|
||||
portbindings.HOST_ID: 'vm-host',
|
||||
'device_owner': DEVICE_OWNER_COMPUTE,
|
||||
'mac_address': '02:04:05:17:18:19'}
|
||||
port = {
|
||||
portbindings.HOST_ID: 'vm-host',
|
||||
'device_owner': DEVICE_OWNER_COMPUTE,
|
||||
'mac_address': '02:04:05:17:18:29'}
|
||||
|
||||
l3plugin = mock.Mock()
|
||||
directory.add_plugin(plugin_constants.L3, l3plugin)
|
||||
l3_dvrscheduler_db._notify_l3_agent_port_update(
|
||||
'port', 'after_update', mock.ANY, **kwargs)
|
||||
resources.PORT, events.AFTER_UPDATE, mock.ANY,
|
||||
payload=events.DBEventPayload(
|
||||
context,
|
||||
metadata={'mac_address_updated': True},
|
||||
states=(original_port, port,)))
|
||||
|
||||
l3plugin.update_arp_entry_for_dvr_service_port.\
|
||||
assert_called_once_with(
|
||||
self.adminContext, kwargs.get('port'))
|
||||
context, port)
|
||||
self.assertFalse(l3plugin.dvr_handle_new_service_port.called)
|
||||
|
||||
def test__notify_l3_agent_update_port_with_ip_update(self):
|
||||
kwargs = {
|
||||
'context': self.adminContext,
|
||||
'original_port': {
|
||||
portbindings.HOST_ID: 'vm-host',
|
||||
'device_owner': constants.DEVICE_OWNER_ROUTER_GW,
|
||||
'fixed_ips': [{'ip_address': '1.1.1.1'}],
|
||||
'mac_address': '02:04:05:17:18:19'
|
||||
},
|
||||
'port': {
|
||||
portbindings.HOST_ID: 'vm-host',
|
||||
'device_owner': constants.DEVICE_OWNER_ROUTER_GW,
|
||||
'fixed_ips': [{'ip_address': '2.2.2.2'}],
|
||||
'mac_address': '02:04:05:17:18:19'
|
||||
},
|
||||
'mac_address_updated': False
|
||||
context = self.adminContext
|
||||
original_port = {
|
||||
portbindings.HOST_ID: 'vm-host',
|
||||
'device_owner': constants.DEVICE_OWNER_ROUTER_GW,
|
||||
'fixed_ips': [{'ip_address': '1.1.1.1'}],
|
||||
'mac_address': '02:04:05:17:18:19'
|
||||
}
|
||||
port = {
|
||||
portbindings.HOST_ID: 'vm-host',
|
||||
'device_owner': constants.DEVICE_OWNER_ROUTER_GW,
|
||||
'fixed_ips': [{'ip_address': '2.2.2.2'}],
|
||||
'mac_address': '02:04:05:17:18:19'
|
||||
}
|
||||
l3plugin = mock.Mock()
|
||||
directory.add_plugin(plugin_constants.L3, l3plugin)
|
||||
l3_dvrscheduler_db._notify_l3_agent_port_update(
|
||||
'port', 'after_update', mock.ANY, **kwargs)
|
||||
resources.PORT, events.AFTER_UPDATE, mock.ANY,
|
||||
payload=events.DBEventPayload(
|
||||
context,
|
||||
metadata={'mac_address_updated': True},
|
||||
states=(original_port, port,)))
|
||||
|
||||
l3plugin.update_arp_entry_for_dvr_service_port.\
|
||||
assert_called_once_with(
|
||||
self.adminContext, kwargs.get('port'))
|
||||
context, port)
|
||||
self.assertFalse(l3plugin.dvr_handle_new_service_port.called)
|
||||
|
||||
def test__notify_l3_agent_update_port_without_ip_change(self):
|
||||
kwargs = {
|
||||
'context': self.adminContext,
|
||||
'original_port': {
|
||||
portbindings.HOST_ID: 'vm-host',
|
||||
'device_owner': constants.DEVICE_OWNER_ROUTER_GW,
|
||||
'fixed_ips': [{'ip_address': '1.1.1.1'}],
|
||||
},
|
||||
'port': {
|
||||
portbindings.HOST_ID: 'vm-host',
|
||||
'device_owner': constants.DEVICE_OWNER_ROUTER_GW,
|
||||
'fixed_ips': [{'ip_address': '1.1.1.1'}],
|
||||
},
|
||||
context = self.adminContext
|
||||
original_port = {
|
||||
portbindings.HOST_ID: 'vm-host',
|
||||
'device_owner': constants.DEVICE_OWNER_ROUTER_GW,
|
||||
'fixed_ips': [{'ip_address': '1.1.1.1'}],
|
||||
}
|
||||
port = {
|
||||
portbindings.HOST_ID: 'vm-host',
|
||||
'device_owner': constants.DEVICE_OWNER_ROUTER_GW,
|
||||
'fixed_ips': [{'ip_address': '1.1.1.1'}]}
|
||||
l3plugin = mock.Mock()
|
||||
directory.add_plugin(plugin_constants.L3, l3plugin)
|
||||
l3_dvrscheduler_db._notify_l3_agent_port_update(
|
||||
'port', 'after_update', mock.ANY, **kwargs)
|
||||
resources.PORT, events.AFTER_UPDATE, mock.ANY,
|
||||
payload=events.DBEventPayload(
|
||||
context, states=(original_port, port,)))
|
||||
|
||||
self.assertFalse(l3plugin.update_arp_entry_for_dvr_service_port.called)
|
||||
self.assertFalse(l3plugin.dvr_handle_new_service_port.called)
|
||||
@ -1160,18 +1151,14 @@ class L3DvrSchedulerTestCase(L3SchedulerBaseMixin,
|
||||
is_distributed=False,
|
||||
router_id=None):
|
||||
source_host = 'vm-host1'
|
||||
kwargs = {
|
||||
'context': self.adminContext,
|
||||
'original_port': {
|
||||
'id': uuidutils.generate_uuid(),
|
||||
portbindings.HOST_ID: source_host,
|
||||
'device_owner': DEVICE_OWNER_COMPUTE,
|
||||
},
|
||||
'port': {
|
||||
portbindings.HOST_ID: 'vm-host2',
|
||||
'device_owner': DEVICE_OWNER_COMPUTE,
|
||||
},
|
||||
}
|
||||
context = self.adminContext
|
||||
original_port = {
|
||||
'id': uuidutils.generate_uuid(),
|
||||
portbindings.HOST_ID: source_host,
|
||||
'device_owner': DEVICE_OWNER_COMPUTE}
|
||||
port = {
|
||||
portbindings.HOST_ID: 'vm-host2',
|
||||
'device_owner': DEVICE_OWNER_COMPUTE}
|
||||
l3plugin = mock.Mock()
|
||||
directory.add_plugin(plugin_constants.L3, l3plugin)
|
||||
with mock.patch.object(l3plugin, 'get_dvr_routers_to_remove',
|
||||
@ -1182,7 +1169,9 @@ class L3DvrSchedulerTestCase(L3SchedulerBaseMixin,
|
||||
mock.patch.object(l3_dvr_db, 'is_distributed_router',
|
||||
return_value=is_distributed):
|
||||
l3_dvrscheduler_db._notify_l3_agent_port_update(
|
||||
'port', 'after_update', mock.ANY, **kwargs)
|
||||
resources.PORT, events.AFTER_UPDATE, mock.ANY,
|
||||
payload=events.DBEventPayload(
|
||||
context, states=(original_port, port,)))
|
||||
if routers_to_remove:
|
||||
(l3plugin.l3_rpc_notifier.router_removed_from_agent.
|
||||
assert_called_once_with(mock.ANY, 'foo_id', source_host))
|
||||
@ -1196,28 +1185,23 @@ class L3DvrSchedulerTestCase(L3SchedulerBaseMixin,
|
||||
self.assertEqual(
|
||||
1, l3plugin.update_arp_entry_for_dvr_service_port.call_count)
|
||||
l3plugin.dvr_handle_new_service_port.assert_called_once_with(
|
||||
self.adminContext, kwargs.get('port'),
|
||||
context, port,
|
||||
dest_host=None, router_id=router_id)
|
||||
|
||||
def test__notify_l3_agent_update_port_removing_routers(self):
|
||||
port_id = 'fake-port'
|
||||
source_host = 'vm-host'
|
||||
kwargs = {
|
||||
'context': self.adminContext,
|
||||
'port': {
|
||||
'id': port_id,
|
||||
portbindings.HOST_ID: None,
|
||||
'device_id': '',
|
||||
'device_owner': ''
|
||||
},
|
||||
'mac_address_updated': False,
|
||||
'original_port': {
|
||||
'id': port_id,
|
||||
portbindings.HOST_ID: source_host,
|
||||
'device_id': 'vm-id',
|
||||
'device_owner': DEVICE_OWNER_COMPUTE
|
||||
}
|
||||
}
|
||||
context = self.adminContext
|
||||
port = {
|
||||
'id': port_id,
|
||||
portbindings.HOST_ID: None,
|
||||
'device_id': '',
|
||||
'device_owner': ''}
|
||||
original_port = {
|
||||
'id': port_id,
|
||||
portbindings.HOST_ID: source_host,
|
||||
'device_id': 'vm-id',
|
||||
'device_owner': DEVICE_OWNER_COMPUTE}
|
||||
|
||||
plugin = directory.get_plugin()
|
||||
l3plugin = mock.Mock()
|
||||
@ -1233,7 +1217,12 @@ class L3DvrSchedulerTestCase(L3SchedulerBaseMixin,
|
||||
mock.patch.object(l3plugin, '_get_floatingips_by_port_id',
|
||||
return_value=[]):
|
||||
l3_dvrscheduler_db._notify_l3_agent_port_update(
|
||||
'port', 'after_update', plugin, **kwargs)
|
||||
resources.PORT, events.AFTER_UPDATE, plugin,
|
||||
payload=events.DBEventPayload(
|
||||
context,
|
||||
metadata={'mac_address_updated': True},
|
||||
resource_id=port_id,
|
||||
states=(original_port, port,)))
|
||||
|
||||
self.assertEqual(
|
||||
1, l3plugin.delete_arp_entry_for_dvr_service_port.call_count)
|
||||
|
@ -1377,19 +1377,22 @@ class TestOVNL3RouterPlugin(test_mech_driver.Ml2PluginV2TestCase):
|
||||
@mock.patch('neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb.'
|
||||
'ovn_client.OVNClient.update_router_port')
|
||||
def test_port_update_postcommit(self, update_rp_mock):
|
||||
kwargs = {'port': {'device_owner': 'foo'},
|
||||
'context': 'fake_context'}
|
||||
context = 'fake_context'
|
||||
port = {'device_owner': 'foo'}
|
||||
self.l3_inst._port_update(resources.PORT, events.AFTER_UPDATE, None,
|
||||
**kwargs)
|
||||
payload=events.DBEventPayload(
|
||||
context,
|
||||
states=(port,)))
|
||||
update_rp_mock.assert_not_called()
|
||||
|
||||
kwargs = {'port': {'device_owner': constants.DEVICE_OWNER_ROUTER_INTF},
|
||||
'context': 'fake_context'}
|
||||
port = {'device_owner': constants.DEVICE_OWNER_ROUTER_INTF}
|
||||
self.l3_inst._port_update(resources.PORT, events.AFTER_UPDATE, None,
|
||||
**kwargs)
|
||||
payload=events.DBEventPayload(
|
||||
context,
|
||||
states=(port,)))
|
||||
|
||||
update_rp_mock.assert_called_once_with(kwargs['context'],
|
||||
kwargs['port'],
|
||||
update_rp_mock.assert_called_once_with(context,
|
||||
port,
|
||||
if_exists=True)
|
||||
|
||||
@mock.patch('neutron.plugins.ml2.plugin.Ml2Plugin.update_port_status')
|
||||
|
@ -360,11 +360,12 @@ class TrunkPluginTestCase(test_plugin.Ml2PluginV2TestCase):
|
||||
trunk_details = {'trunk_id': trunk.id}
|
||||
new_parent['trunk_details'] = trunk_details
|
||||
original_parent['trunk_details'] = trunk_details
|
||||
kwargs = {'context': self.context, 'port': new_parent,
|
||||
'original_port': original_parent}
|
||||
self.trunk_plugin._trigger_trunk_status_change(resources.PORT,
|
||||
events.AFTER_UPDATE,
|
||||
None, **kwargs)
|
||||
self.trunk_plugin._trigger_trunk_status_change(
|
||||
resources.PORT,
|
||||
events.AFTER_UPDATE,
|
||||
None,
|
||||
payload=events.DBEventPayload(
|
||||
self.context, states=(original_parent, new_parent)))
|
||||
current_trunk = self._get_trunk_obj(trunk.id)
|
||||
self.assertEqual(final_trunk_status, current_trunk.status)
|
||||
return trunk, current_trunk
|
||||
|
Loading…
Reference in New Issue
Block a user