use payloads for PORT AFTER_UPDATE events
This patch switches over to callback payloads for PORT AFTER_UPDATE events. Change-Id: I5c00eae155afa6c0fc8e3956bc39edbeca3ea1e7
This commit is contained in:
parent
9b24a62c6d
commit
129b823a8b
@ -120,9 +120,12 @@ class DhcpAgentNotifyAPI(object):
|
|||||||
registry.subscribe(
|
registry.subscribe(
|
||||||
self._native_event_send_dhcp_notification_payload,
|
self._native_event_send_dhcp_notification_payload,
|
||||||
resource, events.AFTER_CREATE)
|
resource, events.AFTER_CREATE)
|
||||||
|
registry.subscribe(
|
||||||
|
self._native_event_send_dhcp_notification_payload,
|
||||||
|
resource, events.AFTER_UPDATE)
|
||||||
else:
|
else:
|
||||||
registry.subscribe(callback, resource, events.AFTER_CREATE)
|
registry.subscribe(callback, resource, events.AFTER_CREATE)
|
||||||
registry.subscribe(callback, resource, events.AFTER_UPDATE)
|
registry.subscribe(callback, resource, events.AFTER_UPDATE)
|
||||||
registry.subscribe(callback, resource, events.AFTER_DELETE)
|
registry.subscribe(callback, resource, events.AFTER_DELETE)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
|
@ -1952,18 +1952,20 @@ class L3RpcNotifierMixin(object):
|
|||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@registry.receives(resources.PORT, [events.AFTER_UPDATE])
|
@registry.receives(resources.PORT, [events.AFTER_UPDATE])
|
||||||
def _notify_gateway_port_ip_changed(resource, event, trigger, **kwargs):
|
def _notify_gateway_port_ip_changed(resource, event, trigger,
|
||||||
|
payload):
|
||||||
l3plugin = directory.get_plugin(plugin_constants.L3)
|
l3plugin = directory.get_plugin(plugin_constants.L3)
|
||||||
if not l3plugin:
|
if not l3plugin:
|
||||||
return
|
return
|
||||||
new_port = kwargs.get('port')
|
context = payload.context
|
||||||
original_port = kwargs.get('original_port')
|
new_port = payload.latest_state
|
||||||
|
original_port = payload.states[0]
|
||||||
|
|
||||||
if original_port['device_owner'] != constants.DEVICE_OWNER_ROUTER_GW:
|
if original_port['device_owner'] != constants.DEVICE_OWNER_ROUTER_GW:
|
||||||
return
|
return
|
||||||
|
|
||||||
if utils.port_ip_changed(new_port, original_port):
|
if utils.port_ip_changed(new_port, original_port):
|
||||||
l3plugin.notify_router_updated(kwargs['context'],
|
l3plugin.notify_router_updated(context,
|
||||||
new_port['device_id'])
|
new_port['device_id'])
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
|
@ -566,9 +566,9 @@ def _notify_port_delete(event, resource, trigger, **kwargs):
|
|||||||
context, info['router_id'], info['host'])
|
context, info['router_id'], info['host'])
|
||||||
|
|
||||||
|
|
||||||
def _notify_l3_agent_port_update(resource, event, trigger, **kwargs):
|
def _notify_l3_agent_port_update(resource, event, trigger, payload):
|
||||||
new_port = kwargs.get('port')
|
new_port = payload.latest_state
|
||||||
original_port = kwargs.get('original_port')
|
original_port = payload.states[0]
|
||||||
|
|
||||||
is_fixed_ips_changed = n_utils.port_ip_changed(new_port, original_port)
|
is_fixed_ips_changed = n_utils.port_ip_changed(new_port, original_port)
|
||||||
|
|
||||||
@ -581,7 +581,7 @@ def _notify_l3_agent_port_update(resource, event, trigger, **kwargs):
|
|||||||
|
|
||||||
if new_port and original_port:
|
if new_port and original_port:
|
||||||
l3plugin = directory.get_plugin(plugin_constants.L3)
|
l3plugin = directory.get_plugin(plugin_constants.L3)
|
||||||
context = kwargs['context']
|
context = payload.context
|
||||||
new_port_host = new_port.get(portbindings.HOST_ID)
|
new_port_host = new_port.get(portbindings.HOST_ID)
|
||||||
original_port_host = original_port.get(portbindings.HOST_ID)
|
original_port_host = original_port.get(portbindings.HOST_ID)
|
||||||
is_new_port_binding_changed = (
|
is_new_port_binding_changed = (
|
||||||
@ -670,7 +670,7 @@ def _notify_l3_agent_port_update(resource, event, trigger, **kwargs):
|
|||||||
l3plugin, context, original_port, address_pair)
|
l3plugin, context, original_port, address_pair)
|
||||||
return
|
return
|
||||||
|
|
||||||
if kwargs.get('mac_address_updated') or is_fixed_ips_changed:
|
if payload.metadata.get('mac_address_updated') or is_fixed_ips_changed:
|
||||||
l3plugin.update_arp_entry_for_dvr_service_port(
|
l3plugin.update_arp_entry_for_dvr_service_port(
|
||||||
context, new_port)
|
context, new_port)
|
||||||
|
|
||||||
|
@ -55,10 +55,10 @@ class L3_HA_scheduler_db_mixin(l3_sch_db.AZL3AgentSchedulerDbMixin):
|
|||||||
return self._get_agents_dict_for_router(agents)
|
return self._get_agents_dict_for_router(agents)
|
||||||
|
|
||||||
|
|
||||||
def _notify_l3_agent_ha_port_update(resource, event, trigger, **kwargs):
|
def _notify_l3_agent_ha_port_update(resource, event, trigger, payload):
|
||||||
new_port = kwargs.get('port')
|
new_port = payload.latest_state
|
||||||
original_port = kwargs.get('original_port')
|
original_port = payload.states[0]
|
||||||
context = kwargs.get('context')
|
context = payload.context
|
||||||
host = new_port.get(portbindings.HOST_ID)
|
host = new_port.get(portbindings.HOST_ID)
|
||||||
|
|
||||||
if new_port and original_port and host:
|
if new_port and original_port and host:
|
||||||
|
@ -41,26 +41,28 @@ DHCP_RULE_PORT = {4: (67, 68, const.IPv4), 6: (547, 546, const.IPv6)}
|
|||||||
class SecurityGroupServerNotifierRpcMixin(sg_db.SecurityGroupDbMixin):
|
class SecurityGroupServerNotifierRpcMixin(sg_db.SecurityGroupDbMixin):
|
||||||
"""Mixin class to add agent-based security group implementation."""
|
"""Mixin class to add agent-based security group implementation."""
|
||||||
|
|
||||||
@registry.receives(resources.PORT, [events.AFTER_CREATE])
|
@registry.receives(resources.PORT, [events.AFTER_CREATE,
|
||||||
def _notify_sg_on_port_after_update(
|
events.AFTER_UPDATE])
|
||||||
self, resource, event, trigger, payload=None):
|
def _notify_sg_on_port_after_create_and_update(
|
||||||
|
self, resource, event, trigger, payload):
|
||||||
# TODO(boden): refact back into single method when all callbacks are
|
# TODO(boden): refact back into single method when all callbacks are
|
||||||
# moved to payload style events
|
# moved to payload style events
|
||||||
self.notify_security_groups_member_updated(
|
context = payload.context
|
||||||
payload.context, payload.latest_state)
|
port = payload.latest_state
|
||||||
|
|
||||||
@registry.receives(resources.PORT, [events.AFTER_UPDATE,
|
|
||||||
events.AFTER_DELETE])
|
|
||||||
def notify_sg_on_port_change(self, resource, event, trigger, context,
|
|
||||||
port, *args, **kwargs):
|
|
||||||
"""Trigger notification to other SG members on port changes."""
|
|
||||||
if event == events.AFTER_UPDATE:
|
if event == events.AFTER_UPDATE:
|
||||||
original_port = kwargs.get('original_port')
|
original_port = payload.states[0]
|
||||||
self.check_and_notify_security_group_member_changed(
|
self.check_and_notify_security_group_member_changed(
|
||||||
context, original_port, port)
|
context, original_port, port)
|
||||||
else:
|
else:
|
||||||
self.notify_security_groups_member_updated(context, port)
|
self.notify_security_groups_member_updated(context, port)
|
||||||
|
|
||||||
|
@registry.receives(resources.PORT, [events.AFTER_DELETE])
|
||||||
|
def notify_sg_on_port_change(self, resource, event, trigger, context,
|
||||||
|
port, *args, **kwargs):
|
||||||
|
"""Trigger notification to other SG members on port changes."""
|
||||||
|
|
||||||
|
self.notify_security_groups_member_updated(context, port)
|
||||||
|
|
||||||
def create_security_group_rule(self, context, security_group_rule):
|
def create_security_group_rule(self, context, security_group_rule):
|
||||||
rule = super(SecurityGroupServerNotifierRpcMixin,
|
rule = super(SecurityGroupServerNotifierRpcMixin,
|
||||||
self).create_security_group_rule(context,
|
self).create_security_group_rule(context,
|
||||||
|
@ -82,8 +82,9 @@ class Notifier(object):
|
|||||||
|
|
||||||
@registry.receives(resources.PORT, [events.AFTER_UPDATE])
|
@registry.receives(resources.PORT, [events.AFTER_UPDATE])
|
||||||
def process_port_update_event(self, resource, event, trigger,
|
def process_port_update_event(self, resource, event, trigger,
|
||||||
original_port=None, port=None,
|
payload):
|
||||||
**kwargs):
|
original_port = payload.states[0]
|
||||||
|
port = payload.latest_state
|
||||||
# We only want to notify about baremetal ports.
|
# We only want to notify about baremetal ports.
|
||||||
if not (port[portbindings_def.VNIC_TYPE] ==
|
if not (port[portbindings_def.VNIC_TYPE] ==
|
||||||
portbindings_def.VNIC_BAREMETAL):
|
portbindings_def.VNIC_BAREMETAL):
|
||||||
|
@ -509,13 +509,13 @@ def _remove_data_from_external_dns_service(context, dns_driver, dns_domain,
|
|||||||
"ips": ', '.join(records)})
|
"ips": ', '.join(records)})
|
||||||
|
|
||||||
|
|
||||||
def _update_port_in_external_dns_service(resource, event, trigger, **kwargs):
|
def _update_port_in_external_dns_service(resource, event, trigger, payload):
|
||||||
dns_driver = _get_dns_driver()
|
dns_driver = _get_dns_driver()
|
||||||
if not dns_driver:
|
if not dns_driver:
|
||||||
return
|
return
|
||||||
context = kwargs['context']
|
context = payload.context
|
||||||
updated_port = kwargs['port']
|
updated_port = payload.latest_state
|
||||||
original_port = kwargs.get('original_port')
|
original_port = payload.states[0]
|
||||||
if not original_port:
|
if not original_port:
|
||||||
return
|
return
|
||||||
original_ips = _filter_by_subnet(context, original_port['fixed_ips'])
|
original_ips = _filter_by_subnet(context, original_port['fixed_ips'])
|
||||||
|
@ -59,7 +59,8 @@ class _ObjectChangeHandler(object):
|
|||||||
# TODO(boden): remove shim below once all events use payloads
|
# TODO(boden): remove shim below once all events use payloads
|
||||||
if resource in self._PAYLOAD_RESOURCES:
|
if resource in self._PAYLOAD_RESOURCES:
|
||||||
handler = self.handle_payload_event
|
handler = self.handle_payload_event
|
||||||
if resource == resources.PORT and event == events.AFTER_CREATE:
|
if resource == resources.PORT and event in (events.AFTER_CREATE,
|
||||||
|
events.AFTER_UPDATE):
|
||||||
handler = self.handle_payload_event
|
handler = self.handle_payload_event
|
||||||
registry.subscribe(handler, resource, event)
|
registry.subscribe(handler, resource, event)
|
||||||
self._stop = threading.Event()
|
self._stop = threading.Event()
|
||||||
|
@ -768,14 +768,14 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
|
|||||||
# just finished, whether that transaction committed new
|
# just finished, whether that transaction committed new
|
||||||
# results or discovered concurrent port state changes.
|
# results or discovered concurrent port state changes.
|
||||||
# Also, Trigger notification for successful binding commit.
|
# Also, Trigger notification for successful binding commit.
|
||||||
kwargs = {
|
context = plugin_context
|
||||||
'context': plugin_context,
|
port = self._make_port_dict(port_db) # ensure latest state
|
||||||
'port': self._make_port_dict(port_db), # ensure latest state
|
registry.publish(resources.PORT, events.AFTER_UPDATE, self,
|
||||||
'mac_address_updated': False,
|
payload=events.DBEventPayload(
|
||||||
'original_port': oport,
|
context,
|
||||||
}
|
resource_id=port['id'],
|
||||||
registry.notify(resources.PORT, events.AFTER_UPDATE,
|
metadata={'mac_address_updated': False},
|
||||||
self, **kwargs)
|
states=(oport, port,)))
|
||||||
self.mechanism_manager.update_port_postcommit(cur_context)
|
self.mechanism_manager.update_port_postcommit(cur_context)
|
||||||
need_notify = True
|
need_notify = True
|
||||||
try_again = False
|
try_again = False
|
||||||
@ -1807,13 +1807,13 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
|
|||||||
bound_mech_contexts.append(mech_context)
|
bound_mech_contexts.append(mech_context)
|
||||||
|
|
||||||
# Notifications must be sent after the above transaction is complete
|
# Notifications must be sent after the above transaction is complete
|
||||||
kwargs = {
|
metadata = {'mac_address_updated': mac_address_updated}
|
||||||
'context': context,
|
registry.publish(resources.PORT, events.AFTER_UPDATE, self,
|
||||||
'port': updated_port,
|
payload=events.DBEventPayload(
|
||||||
'mac_address_updated': mac_address_updated,
|
context,
|
||||||
'original_port': original_port,
|
resource_id=id,
|
||||||
}
|
metadata=metadata,
|
||||||
registry.notify(resources.PORT, events.AFTER_UPDATE, self, **kwargs)
|
states=(original_port, updated_port,)))
|
||||||
|
|
||||||
# Note that DVR Interface ports will have bindings on
|
# Note that DVR Interface ports will have bindings on
|
||||||
# multiple hosts, and so will have multiple mech_contexts,
|
# multiple hosts, and so will have multiple mech_contexts,
|
||||||
@ -2229,10 +2229,12 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
|
|||||||
|
|
||||||
if updated:
|
if updated:
|
||||||
self.mechanism_manager.update_port_postcommit(mech_context)
|
self.mechanism_manager.update_port_postcommit(mech_context)
|
||||||
kwargs = {'context': context, 'port': mech_context.current,
|
port = mech_context.current
|
||||||
'original_port': original_port}
|
registry.publish(resources.PORT, events.AFTER_UPDATE, self,
|
||||||
registry.notify(resources.PORT, events.AFTER_UPDATE, self,
|
payload=events.DBEventPayload(
|
||||||
**kwargs)
|
context,
|
||||||
|
resource_id=port['id'],
|
||||||
|
states=(original_port, port,)))
|
||||||
|
|
||||||
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
|
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
|
||||||
db.delete_distributed_port_binding_if_stale(context, binding)
|
db.delete_distributed_port_binding_if_stale(context, binding)
|
||||||
|
@ -456,19 +456,20 @@ class OVNL3RouterPlugin(service_base.ServicePluginBase,
|
|||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@registry.receives(resources.PORT, [events.AFTER_UPDATE])
|
@registry.receives(resources.PORT, [events.AFTER_UPDATE])
|
||||||
def _port_update(resource, event, trigger, **kwargs):
|
def _port_update(resource, event, trigger, payload):
|
||||||
l3plugin = directory.get_plugin(plugin_constants.L3)
|
l3plugin = directory.get_plugin(plugin_constants.L3)
|
||||||
if not l3plugin:
|
if not l3plugin:
|
||||||
return
|
return
|
||||||
|
|
||||||
current = kwargs['port']
|
context = payload.context
|
||||||
|
current = payload.latest_state
|
||||||
|
|
||||||
if utils.is_lsp_router_port(current):
|
if utils.is_lsp_router_port(current):
|
||||||
# We call the update_router port with if_exists, because neutron,
|
# We call the update_router port with if_exists, because neutron,
|
||||||
# internally creates the port, and then calls update, which will
|
# internally creates the port, and then calls update, which will
|
||||||
# trigger this callback even before we had the chance to create
|
# trigger this callback even before we had the chance to create
|
||||||
# the OVN NB DB side
|
# the OVN NB DB side
|
||||||
l3plugin._ovn_client.update_router_port(kwargs['context'],
|
l3plugin._ovn_client.update_router_port(context,
|
||||||
current, if_exists=True)
|
current, if_exists=True)
|
||||||
|
|
||||||
def get_router_availability_zones(self, router):
|
def get_router_availability_zones(self, router):
|
||||||
|
@ -178,8 +178,16 @@ class PortForwardingPlugin(fip_pf.PortForwardingPluginBase):
|
|||||||
if exist_pf_resources:
|
if exist_pf_resources:
|
||||||
raise pf_exc.FipInUseByPortForwarding(id=floatingip_id)
|
raise pf_exc.FipInUseByPortForwarding(id=floatingip_id)
|
||||||
|
|
||||||
@registry.receives(resources.PORT, [events.AFTER_UPDATE,
|
@registry.receives(resources.PORT, [events.AFTER_UPDATE])
|
||||||
events.PRECOMMIT_DELETE])
|
def _process_updated_port_request(self, resource, event, trigger,
|
||||||
|
payload):
|
||||||
|
# TODO(isabek): refactor back into 1 method when all code is moved
|
||||||
|
# to event payloads
|
||||||
|
return self._process_port_request(resource, event, trigger,
|
||||||
|
payload.context,
|
||||||
|
port=payload.latest_state)
|
||||||
|
|
||||||
|
@registry.receives(resources.PORT, [events.PRECOMMIT_DELETE])
|
||||||
@db_api.retry_if_session_inactive()
|
@db_api.retry_if_session_inactive()
|
||||||
def _process_port_request(self, resource, event, trigger, context,
|
def _process_port_request(self, resource, event, trigger, context,
|
||||||
**kwargs):
|
**kwargs):
|
||||||
|
@ -443,10 +443,10 @@ class NovaSegmentNotifier(object):
|
|||||||
segment_id, reserved=ipv4_subnets_number))
|
segment_id, reserved=ipv4_subnets_number))
|
||||||
|
|
||||||
@registry.receives(resources.PORT, [events.AFTER_UPDATE])
|
@registry.receives(resources.PORT, [events.AFTER_UPDATE])
|
||||||
def _notify_port_updated(self, resource, event, trigger, context,
|
def _notify_port_updated(self, resource, event, trigger, payload):
|
||||||
**kwargs):
|
context = payload.context
|
||||||
port = kwargs.get('port')
|
port = payload.latest_state
|
||||||
original_port = kwargs.get('original_port')
|
original_port = payload.states[0]
|
||||||
does_original_port_require_nova_inventory_update = (
|
does_original_port_require_nova_inventory_update = (
|
||||||
self._does_port_require_nova_inventory_update(original_port))
|
self._does_port_require_nova_inventory_update(original_port))
|
||||||
does_port_require_nova_inventory_update = (
|
does_port_require_nova_inventory_update = (
|
||||||
|
@ -452,15 +452,15 @@ class TrunkPlugin(service_base.ServicePluginBase):
|
|||||||
# AFTER_UPDATE to be problematic for setting trunk status when a
|
# AFTER_UPDATE to be problematic for setting trunk status when a
|
||||||
# a parent port becomes unbound.
|
# a parent port becomes unbound.
|
||||||
@registry.receives(resources.PORT, [events.AFTER_UPDATE])
|
@registry.receives(resources.PORT, [events.AFTER_UPDATE])
|
||||||
def _trigger_trunk_status_change(self, resource, event, trigger, **kwargs):
|
def _trigger_trunk_status_change(self, resource, event, trigger, payload):
|
||||||
updated_port = kwargs['port']
|
updated_port = payload.latest_state
|
||||||
trunk_details = updated_port.get('trunk_details')
|
trunk_details = updated_port.get('trunk_details')
|
||||||
# If no trunk_details, the port is not the parent of a trunk.
|
# If no trunk_details, the port is not the parent of a trunk.
|
||||||
if not trunk_details:
|
if not trunk_details:
|
||||||
return
|
return
|
||||||
|
|
||||||
context = kwargs['context']
|
context = payload.context
|
||||||
original_port = kwargs['original_port']
|
original_port = payload.states[0]
|
||||||
orig_vif_type = original_port.get(portbindings.VIF_TYPE)
|
orig_vif_type = original_port.get(portbindings.VIF_TYPE)
|
||||||
new_vif_type = updated_port.get(portbindings.VIF_TYPE)
|
new_vif_type = updated_port.get(portbindings.VIF_TYPE)
|
||||||
vif_type_changed = orig_vif_type != new_vif_type
|
vif_type_changed = orig_vif_type != new_vif_type
|
||||||
|
@ -279,8 +279,15 @@ class TestDhcpAgentNotifyAPI(base.BaseTestCase):
|
|||||||
context=mock.Mock(), **kwargs)
|
context=mock.Mock(), **kwargs)
|
||||||
# don't unsubscribe until all three types are observed
|
# don't unsubscribe until all three types are observed
|
||||||
self.assertEqual([], self.notifier._unsubscribed_resources)
|
self.assertEqual([], self.notifier._unsubscribed_resources)
|
||||||
registry.notify(res, events.AFTER_UPDATE, self,
|
if res == resources.PORT:
|
||||||
context=mock.Mock(), **kwargs)
|
registry.publish(res, events.AFTER_UPDATE, self,
|
||||||
|
payload=events.DBEventPayload(
|
||||||
|
mock.Mock(), states=({},)))
|
||||||
|
|
||||||
|
else:
|
||||||
|
registry.notify(res, events.AFTER_UPDATE, self,
|
||||||
|
context=mock.Mock(), **kwargs)
|
||||||
|
|
||||||
self.assertEqual([], self.notifier._unsubscribed_resources)
|
self.assertEqual([], self.notifier._unsubscribed_resources)
|
||||||
registry.notify(res, events.AFTER_DELETE, self,
|
registry.notify(res, events.AFTER_DELETE, self,
|
||||||
context=mock.Mock(), **kwargs)
|
context=mock.Mock(), **kwargs)
|
||||||
|
@ -287,12 +287,11 @@ class TestL3NatBasePlugin(TestL3PluginBaseAttributes,
|
|||||||
new_port = super(TestL3NatBasePlugin, self).update_port(
|
new_port = super(TestL3NatBasePlugin, self).update_port(
|
||||||
context, id, port)
|
context, id, port)
|
||||||
# Notifications must be sent after the above transaction is complete
|
# Notifications must be sent after the above transaction is complete
|
||||||
kwargs = {
|
registry.publish(resources.PORT, events.AFTER_UPDATE, self,
|
||||||
'context': context,
|
payload=events.DBEventPayload(
|
||||||
'port': new_port,
|
context,
|
||||||
'original_port': original_port,
|
resource_id=id,
|
||||||
}
|
states=(original_port, new_port,)))
|
||||||
registry.notify(resources.PORT, events.AFTER_UPDATE, self, **kwargs)
|
|
||||||
return new_port
|
return new_port
|
||||||
|
|
||||||
|
|
||||||
|
@ -17,6 +17,7 @@ from unittest import mock
|
|||||||
|
|
||||||
import eventlet
|
import eventlet
|
||||||
from neutron_lib.api.definitions import portbindings as portbindings_def
|
from neutron_lib.api.definitions import portbindings as portbindings_def
|
||||||
|
from neutron_lib.callbacks import events
|
||||||
from neutron_lib import constants as n_const
|
from neutron_lib import constants as n_const
|
||||||
from openstack import connection
|
from openstack import connection
|
||||||
from openstack import exceptions as os_exc
|
from openstack import exceptions as os_exc
|
||||||
@ -53,7 +54,8 @@ class TestIronicNotifier(base.BaseTestCase):
|
|||||||
original_port.update({'status': n_const.PORT_STATUS_DOWN})
|
original_port.update({'status': n_const.PORT_STATUS_DOWN})
|
||||||
self.ironic_notifier.process_port_update_event(
|
self.ironic_notifier.process_port_update_event(
|
||||||
'fake_resource', 'fake_event', 'fake_trigger',
|
'fake_resource', 'fake_event', 'fake_trigger',
|
||||||
original_port=original_port, port=port, **{})
|
payload=events.DBEventPayload(
|
||||||
|
mock.Mock(), states=(original_port, port,)))
|
||||||
mock_queue_event.assert_called_with(
|
mock_queue_event.assert_called_with(
|
||||||
self.ironic_notifier.batch_notifier,
|
self.ironic_notifier.batch_notifier,
|
||||||
{'event': 'network.bind_port',
|
{'event': 'network.bind_port',
|
||||||
@ -73,7 +75,8 @@ class TestIronicNotifier(base.BaseTestCase):
|
|||||||
original_port.update({'status': n_const.PORT_STATUS_DOWN})
|
original_port.update({'status': n_const.PORT_STATUS_DOWN})
|
||||||
self.ironic_notifier.process_port_update_event(
|
self.ironic_notifier.process_port_update_event(
|
||||||
'fake_resource', 'fake_event', 'fake_trigger',
|
'fake_resource', 'fake_event', 'fake_trigger',
|
||||||
original_port=original_port, port=port, **{})
|
payload=events.DBEventPayload(
|
||||||
|
mock.Mock(), states=(original_port, port,)))
|
||||||
mock_queue_event.assert_called_with(
|
mock_queue_event.assert_called_with(
|
||||||
self.ironic_notifier.batch_notifier,
|
self.ironic_notifier.batch_notifier,
|
||||||
{'event': 'network.bind_port',
|
{'event': 'network.bind_port',
|
||||||
@ -93,7 +96,8 @@ class TestIronicNotifier(base.BaseTestCase):
|
|||||||
original_port.update({'status': n_const.PORT_STATUS_ACTIVE})
|
original_port.update({'status': n_const.PORT_STATUS_ACTIVE})
|
||||||
self.ironic_notifier.process_port_update_event(
|
self.ironic_notifier.process_port_update_event(
|
||||||
'fake_resource', 'fake_event', 'fake_trigger',
|
'fake_resource', 'fake_event', 'fake_trigger',
|
||||||
original_port=original_port, port=port, **{})
|
payload=events.DBEventPayload(
|
||||||
|
mock.Mock(), states=(original_port, port,)))
|
||||||
mock_queue_event.assert_called_with(
|
mock_queue_event.assert_called_with(
|
||||||
self.ironic_notifier.batch_notifier,
|
self.ironic_notifier.batch_notifier,
|
||||||
{'event': 'network.unbind_port',
|
{'event': 'network.unbind_port',
|
||||||
@ -113,7 +117,8 @@ class TestIronicNotifier(base.BaseTestCase):
|
|||||||
original_port.update({'status': n_const.PORT_STATUS_ACTIVE})
|
original_port.update({'status': n_const.PORT_STATUS_ACTIVE})
|
||||||
self.ironic_notifier.process_port_update_event(
|
self.ironic_notifier.process_port_update_event(
|
||||||
'fake_resource', 'fake_event', 'fake_trigger',
|
'fake_resource', 'fake_event', 'fake_trigger',
|
||||||
original_port=original_port, port=port, **{})
|
payload=events.DBEventPayload(
|
||||||
|
mock.Mock(), states=(original_port, port,)))
|
||||||
mock_queue_event.assert_called_with(
|
mock_queue_event.assert_called_with(
|
||||||
self.ironic_notifier.batch_notifier,
|
self.ironic_notifier.batch_notifier,
|
||||||
{'event': 'network.unbind_port',
|
{'event': 'network.unbind_port',
|
||||||
@ -171,7 +176,8 @@ class TestIronicNotifier(base.BaseTestCase):
|
|||||||
original_port.update({'status': n_const.PORT_STATUS_DOWN})
|
original_port.update({'status': n_const.PORT_STATUS_DOWN})
|
||||||
self.ironic_notifier.process_port_update_event(
|
self.ironic_notifier.process_port_update_event(
|
||||||
'fake_resource', 'fake_event', 'fake_trigger',
|
'fake_resource', 'fake_event', 'fake_trigger',
|
||||||
original_port=original_port, port=port, **{})
|
payload=events.DBEventPayload(
|
||||||
|
mock.Mock(), states=(original_port, port,)))
|
||||||
|
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
2, len(self.ironic_notifier.batch_notifier._pending_events.queue))
|
2, len(self.ironic_notifier.batch_notifier._pending_events.queue))
|
||||||
|
@ -1088,8 +1088,9 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase):
|
|||||||
|
|
||||||
def test_port_after_update_outside_transaction(self):
|
def test_port_after_update_outside_transaction(self):
|
||||||
self.tx_open = True
|
self.tx_open = True
|
||||||
receive = lambda *a, **k: setattr(self, 'tx_open',
|
receive = lambda r, e, t, payload: \
|
||||||
k['context'].session.is_active)
|
setattr(self, 'tx_open', payload.context.session.is_active)
|
||||||
|
|
||||||
with self.port() as p:
|
with self.port() as p:
|
||||||
registry.subscribe(receive, resources.PORT, events.AFTER_UPDATE)
|
registry.subscribe(receive, resources.PORT, events.AFTER_UPDATE)
|
||||||
self._update('ports', p['port']['id'],
|
self._update('ports', p['port']['id'],
|
||||||
@ -1470,7 +1471,9 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase):
|
|||||||
b_update_events = []
|
b_update_events = []
|
||||||
a_update_events = []
|
a_update_events = []
|
||||||
b_receiver = lambda r, e, t, payload: b_update_events.append(payload)
|
b_receiver = lambda r, e, t, payload: b_update_events.append(payload)
|
||||||
a_receiver = lambda *a, **k: a_update_events.append(k['port'])
|
a_receiver = lambda r, e, t, payload: \
|
||||||
|
a_update_events.append(payload.latest_state)
|
||||||
|
|
||||||
registry.subscribe(b_receiver, resources.PORT,
|
registry.subscribe(b_receiver, resources.PORT,
|
||||||
events.BEFORE_UPDATE)
|
events.BEFORE_UPDATE)
|
||||||
registry.subscribe(a_receiver, resources.PORT,
|
registry.subscribe(a_receiver, resources.PORT,
|
||||||
@ -1743,7 +1746,9 @@ class TestMl2PortsV2WithRevisionPlugin(Ml2PluginV2TestCase):
|
|||||||
**host_arg) as port:
|
**host_arg) as port:
|
||||||
port = plugin.get_port(ctx, port['port']['id'])
|
port = plugin.get_port(ctx, port['port']['id'])
|
||||||
updated_ports = []
|
updated_ports = []
|
||||||
receiver = lambda *a, **k: updated_ports.append(k['port'])
|
receiver = lambda r, e, t, payload: \
|
||||||
|
updated_ports.append(payload.latest_state)
|
||||||
|
|
||||||
registry.subscribe(receiver, resources.PORT,
|
registry.subscribe(receiver, resources.PORT,
|
||||||
events.AFTER_UPDATE)
|
events.AFTER_UPDATE)
|
||||||
plugin.update_port_status(
|
plugin.update_port_status(
|
||||||
@ -1755,7 +1760,8 @@ class TestMl2PortsV2WithRevisionPlugin(Ml2PluginV2TestCase):
|
|||||||
def test_bind_port_bumps_revision(self):
|
def test_bind_port_bumps_revision(self):
|
||||||
updated_ports = []
|
updated_ports = []
|
||||||
created_ports = []
|
created_ports = []
|
||||||
ureceiver = lambda *a, **k: updated_ports.append(k['port'])
|
ureceiver = lambda r, e, t, payload: \
|
||||||
|
updated_ports.append(payload.latest_state)
|
||||||
|
|
||||||
def creceiver(r, e, t, payload=None):
|
def creceiver(r, e, t, payload=None):
|
||||||
created_ports.append(payload.latest_state)
|
created_ports.append(payload.latest_state)
|
||||||
|
@ -822,34 +822,32 @@ class L3DvrSchedulerTestCase(L3SchedulerBaseMixin,
|
|||||||
def test__notify_l3_agent_update_port_with_allowed_address_pairs_revert(
|
def test__notify_l3_agent_update_port_with_allowed_address_pairs_revert(
|
||||||
self):
|
self):
|
||||||
port_id = uuidutils.generate_uuid()
|
port_id = uuidutils.generate_uuid()
|
||||||
kwargs = {
|
context = self.adminContext
|
||||||
'context': self.adminContext,
|
port = {
|
||||||
'port': {
|
'id': port_id,
|
||||||
'id': port_id,
|
'admin_state_up': False,
|
||||||
'admin_state_up': False,
|
portbindings.HOST_ID: 'vm-host',
|
||||||
portbindings.HOST_ID: 'vm-host',
|
'device_id': 'vm-id',
|
||||||
'device_id': 'vm-id',
|
'allowed_address_pairs': [
|
||||||
'allowed_address_pairs': [
|
{'ip_address': '10.1.0.201',
|
||||||
{'ip_address': '10.1.0.201',
|
'mac_address': 'aa:bb:cc:dd:ee:ff'}],
|
||||||
'mac_address': 'aa:bb:cc:dd:ee:ff'}],
|
'device_owner': DEVICE_OWNER_COMPUTE, }
|
||||||
'device_owner': DEVICE_OWNER_COMPUTE,
|
original_port = {
|
||||||
},
|
'id': port_id,
|
||||||
'original_port': {
|
'admin_state_up': True,
|
||||||
'id': port_id,
|
portbindings.HOST_ID: 'vm-host',
|
||||||
'admin_state_up': True,
|
'device_id': 'vm-id',
|
||||||
portbindings.HOST_ID: 'vm-host',
|
'allowed_address_pairs': [
|
||||||
'device_id': 'vm-id',
|
{'ip_address': '10.1.0.201',
|
||||||
'allowed_address_pairs': [
|
'mac_address': 'aa:bb:cc:dd:ee:ff'}],
|
||||||
{'ip_address': '10.1.0.201',
|
'device_owner': DEVICE_OWNER_COMPUTE, }
|
||||||
'mac_address': 'aa:bb:cc:dd:ee:ff'}],
|
|
||||||
'device_owner': DEVICE_OWNER_COMPUTE,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
port = kwargs.get('original_port')
|
|
||||||
l3plugin = mock.Mock()
|
l3plugin = mock.Mock()
|
||||||
directory.add_plugin(plugin_constants.L3, l3plugin)
|
directory.add_plugin(plugin_constants.L3, l3plugin)
|
||||||
l3_dvrscheduler_db._notify_l3_agent_port_update(
|
l3_dvrscheduler_db._notify_l3_agent_port_update(
|
||||||
'port', 'after_update', mock.ANY, **kwargs)
|
resources.PORT, events.AFTER_UPDATE, mock.ANY,
|
||||||
|
payload=events.DBEventPayload(
|
||||||
|
context, resource_id=port_id, states=(original_port, port,)))
|
||||||
l3plugin._get_allowed_address_pair_fixed_ips.return_value = (
|
l3plugin._get_allowed_address_pair_fixed_ips.return_value = (
|
||||||
['10.1.0.21'])
|
['10.1.0.21'])
|
||||||
self.assertFalse(
|
self.assertFalse(
|
||||||
@ -857,56 +855,50 @@ class L3DvrSchedulerTestCase(L3SchedulerBaseMixin,
|
|||||||
l3plugin.delete_arp_entry_for_dvr_service_port.\
|
l3plugin.delete_arp_entry_for_dvr_service_port.\
|
||||||
assert_called_once_with(
|
assert_called_once_with(
|
||||||
self.adminContext,
|
self.adminContext,
|
||||||
port,
|
original_port,
|
||||||
fixed_ips_to_delete=mock.ANY)
|
fixed_ips_to_delete=mock.ANY)
|
||||||
|
|
||||||
def test__notify_l3_agent_update_port_with_allowed_address_pairs(self):
|
def test__notify_l3_agent_update_port_with_allowed_address_pairs(self):
|
||||||
port_id = uuidutils.generate_uuid()
|
port_id = uuidutils.generate_uuid()
|
||||||
kwargs = {
|
context = self.adminContext
|
||||||
'context': self.adminContext,
|
port = {
|
||||||
'port': {
|
'id': port_id,
|
||||||
'id': port_id,
|
portbindings.HOST_ID: 'vm-host',
|
||||||
portbindings.HOST_ID: 'vm-host',
|
'allowed_address_pairs': [
|
||||||
'allowed_address_pairs': [
|
{'ip_address': '10.1.0.201',
|
||||||
{'ip_address': '10.1.0.201',
|
'mac_address': 'aa:bb:cc:dd:ee:ff'}],
|
||||||
'mac_address': 'aa:bb:cc:dd:ee:ff'}],
|
'device_id': 'vm-id',
|
||||||
'device_id': 'vm-id',
|
'device_owner': DEVICE_OWNER_COMPUTE,
|
||||||
'device_owner': DEVICE_OWNER_COMPUTE,
|
'admin_state_up': True, }
|
||||||
'admin_state_up': True,
|
original_port = {
|
||||||
},
|
'id': port_id,
|
||||||
'original_port': {
|
portbindings.HOST_ID: 'vm-host',
|
||||||
'id': port_id,
|
'device_id': 'vm-id',
|
||||||
portbindings.HOST_ID: 'vm-host',
|
'device_owner': DEVICE_OWNER_COMPUTE,
|
||||||
'device_id': 'vm-id',
|
'admin_state_up': True, }
|
||||||
'device_owner': DEVICE_OWNER_COMPUTE,
|
|
||||||
'admin_state_up': True,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
l3plugin = mock.Mock()
|
l3plugin = mock.Mock()
|
||||||
directory.add_plugin(plugin_constants.L3, l3plugin)
|
directory.add_plugin(plugin_constants.L3, l3plugin)
|
||||||
l3_dvrscheduler_db._notify_l3_agent_port_update(
|
l3_dvrscheduler_db._notify_l3_agent_port_update(
|
||||||
'port', 'after_update', mock.ANY, **kwargs)
|
resources.PORT, events.AFTER_UPDATE, mock.ANY,
|
||||||
|
payload=events.DBEventPayload(
|
||||||
|
context, resource_id=port_id, states=(original_port, port,)))
|
||||||
self.assertTrue(
|
self.assertTrue(
|
||||||
l3plugin.update_arp_entry_for_dvr_service_port.called)
|
l3plugin.update_arp_entry_for_dvr_service_port.called)
|
||||||
|
|
||||||
def test__notify_l3_agent_when_unbound_port_migrates_to_bound_host(self):
|
def test__notify_l3_agent_when_unbound_port_migrates_to_bound_host(self):
|
||||||
port_id = 'fake-port'
|
port_id = 'fake-port'
|
||||||
kwargs = {
|
context = self.adminContext
|
||||||
'context': self.adminContext,
|
original_port = {
|
||||||
'original_port': {
|
'id': port_id,
|
||||||
'id': port_id,
|
portbindings.HOST_ID: '',
|
||||||
portbindings.HOST_ID: '',
|
'device_owner': '',
|
||||||
'device_owner': '',
|
'admin_state_up': True}
|
||||||
'admin_state_up': True,
|
port = {
|
||||||
},
|
'id': port_id,
|
||||||
'port': {
|
portbindings.HOST_ID: 'vm-host',
|
||||||
'id': port_id,
|
'device_owner': DEVICE_OWNER_COMPUTE,
|
||||||
portbindings.HOST_ID: 'vm-host',
|
'mac_address': '02:04:05:17:18:19'}
|
||||||
'device_owner': DEVICE_OWNER_COMPUTE,
|
|
||||||
'mac_address': '02:04:05:17:18:19'
|
|
||||||
},
|
|
||||||
}
|
|
||||||
port = kwargs.get('port')
|
|
||||||
plugin = directory.get_plugin()
|
plugin = directory.get_plugin()
|
||||||
l3plugin = mock.MagicMock()
|
l3plugin = mock.MagicMock()
|
||||||
l3plugin.supported_extension_aliases = [
|
l3plugin.supported_extension_aliases = [
|
||||||
@ -915,24 +907,22 @@ class L3DvrSchedulerTestCase(L3SchedulerBaseMixin,
|
|||||||
]
|
]
|
||||||
directory.add_plugin(plugin_constants.L3, l3plugin)
|
directory.add_plugin(plugin_constants.L3, l3plugin)
|
||||||
l3_dvrscheduler_db._notify_l3_agent_port_update(
|
l3_dvrscheduler_db._notify_l3_agent_port_update(
|
||||||
'port', 'after_update', plugin, **kwargs)
|
resources.PORT, events.AFTER_UPDATE, plugin,
|
||||||
|
payload=events.DBEventPayload(
|
||||||
|
context, resource_id=port_id, states=(original_port, port,)))
|
||||||
l3plugin.dvr_handle_new_service_port.assert_called_once_with(
|
l3plugin.dvr_handle_new_service_port.assert_called_once_with(
|
||||||
self.adminContext, port, unbound_migrate=True)
|
self.adminContext, port, unbound_migrate=True)
|
||||||
|
|
||||||
def test__notify_l3_agent_update_port_no_removing_routers(self):
|
def test__notify_l3_agent_update_port_no_removing_routers(self):
|
||||||
port_id = 'fake-port'
|
port_id = 'fake-port'
|
||||||
kwargs = {
|
context = self.adminContext
|
||||||
'context': self.adminContext,
|
port = None
|
||||||
'port': None,
|
original_port = {
|
||||||
'original_port': {
|
'id': port_id,
|
||||||
'id': port_id,
|
portbindings.HOST_ID: 'vm-host',
|
||||||
portbindings.HOST_ID: 'vm-host',
|
'device_id': 'vm-id',
|
||||||
'device_id': 'vm-id',
|
'device_owner': DEVICE_OWNER_COMPUTE,
|
||||||
'device_owner': DEVICE_OWNER_COMPUTE,
|
'mac_address': '02:04:05:17:18:19'}
|
||||||
'mac_address': '02:04:05:17:18:19'
|
|
||||||
},
|
|
||||||
'mac_address_updated': True
|
|
||||||
}
|
|
||||||
|
|
||||||
plugin = directory.get_plugin()
|
plugin = directory.get_plugin()
|
||||||
l3plugin = mock.Mock()
|
l3plugin = mock.Mock()
|
||||||
@ -942,7 +932,12 @@ class L3DvrSchedulerTestCase(L3SchedulerBaseMixin,
|
|||||||
]
|
]
|
||||||
directory.add_plugin(plugin_constants.L3, l3plugin)
|
directory.add_plugin(plugin_constants.L3, l3plugin)
|
||||||
l3_dvrscheduler_db._notify_l3_agent_port_update(
|
l3_dvrscheduler_db._notify_l3_agent_port_update(
|
||||||
'port', 'after_update', plugin, **kwargs)
|
resources.PORT, events.AFTER_UPDATE, plugin,
|
||||||
|
payload=events.DBEventPayload(
|
||||||
|
context,
|
||||||
|
metadata={'mac_address_updated': True},
|
||||||
|
resource_id=port_id,
|
||||||
|
states=(original_port, port,)))
|
||||||
self.assertFalse(
|
self.assertFalse(
|
||||||
l3plugin.update_arp_entry_for_dvr_service_port.called)
|
l3plugin.update_arp_entry_for_dvr_service_port.called)
|
||||||
self.assertFalse(
|
self.assertFalse(
|
||||||
@ -979,48 +974,44 @@ class L3DvrSchedulerTestCase(L3SchedulerBaseMixin,
|
|||||||
l3plugin.dvr_handle_new_service_port.called)
|
l3plugin.dvr_handle_new_service_port.called)
|
||||||
|
|
||||||
def test__notify_l3_agent_update_port_with_migration_port_profile(self):
|
def test__notify_l3_agent_update_port_with_migration_port_profile(self):
|
||||||
kwargs = {
|
context = self.adminContext
|
||||||
'context': self.adminContext,
|
original_port = {
|
||||||
'original_port': {
|
portbindings.HOST_ID: 'vm-host',
|
||||||
portbindings.HOST_ID: 'vm-host',
|
'device_owner': DEVICE_OWNER_COMPUTE,
|
||||||
'device_owner': DEVICE_OWNER_COMPUTE,
|
'id': uuidutils.generate_uuid()}
|
||||||
'id': uuidutils.generate_uuid()
|
port = {
|
||||||
},
|
portbindings.HOST_ID: 'vm-host',
|
||||||
'port': {
|
'device_owner': DEVICE_OWNER_COMPUTE,
|
||||||
portbindings.HOST_ID: 'vm-host',
|
portbindings.PROFILE: {'migrating_to': 'vm-host2'}}
|
||||||
'device_owner': DEVICE_OWNER_COMPUTE,
|
|
||||||
portbindings.PROFILE: {'migrating_to': 'vm-host2'},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
l3plugin = mock.MagicMock()
|
l3plugin = mock.MagicMock()
|
||||||
directory.add_plugin(plugin_constants.L3, l3plugin)
|
directory.add_plugin(plugin_constants.L3, l3plugin)
|
||||||
with mock.patch.object(l3plugin, '_get_floatingips_by_port_id',
|
with mock.patch.object(l3plugin, '_get_floatingips_by_port_id',
|
||||||
return_value=[]):
|
return_value=[]):
|
||||||
l3_dvrscheduler_db._notify_l3_agent_port_update(
|
l3_dvrscheduler_db._notify_l3_agent_port_update(
|
||||||
'port', 'after_update', mock.ANY, **kwargs)
|
resources.PORT, events.AFTER_UPDATE, mock.ANY,
|
||||||
|
payload=events.DBEventPayload(
|
||||||
|
context, states=(original_port, port,)))
|
||||||
l3plugin.dvr_handle_new_service_port.assert_called_once_with(
|
l3plugin.dvr_handle_new_service_port.assert_called_once_with(
|
||||||
self.adminContext, kwargs.get('port'),
|
context, port,
|
||||||
dest_host='vm-host2', router_id=None)
|
dest_host='vm-host2', router_id=None)
|
||||||
l3plugin.update_arp_entry_for_dvr_service_port.\
|
l3plugin.update_arp_entry_for_dvr_service_port.\
|
||||||
assert_called_once_with(
|
assert_called_once_with(
|
||||||
self.adminContext, kwargs.get('port'))
|
context, port)
|
||||||
|
|
||||||
def test__notify_l3_agent_update_port_no_action(self):
|
def test__notify_l3_agent_update_port_no_action(self):
|
||||||
kwargs = {
|
context = self.adminContext
|
||||||
'context': self.adminContext,
|
original_port = {
|
||||||
'original_port': {
|
portbindings.HOST_ID: 'vm-host',
|
||||||
portbindings.HOST_ID: 'vm-host',
|
'device_owner': DEVICE_OWNER_COMPUTE}
|
||||||
'device_owner': DEVICE_OWNER_COMPUTE,
|
port = {
|
||||||
},
|
portbindings.HOST_ID: 'vm-host',
|
||||||
'port': {
|
'device_owner': DEVICE_OWNER_COMPUTE}
|
||||||
portbindings.HOST_ID: 'vm-host',
|
|
||||||
'device_owner': DEVICE_OWNER_COMPUTE,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
l3plugin = mock.Mock()
|
l3plugin = mock.Mock()
|
||||||
directory.add_plugin(plugin_constants.L3, l3plugin)
|
directory.add_plugin(plugin_constants.L3, l3plugin)
|
||||||
l3_dvrscheduler_db._notify_l3_agent_port_update(
|
l3_dvrscheduler_db._notify_l3_agent_port_update(
|
||||||
'port', 'after_update', mock.ANY, **kwargs)
|
resources.PORT, events.AFTER_UPDATE, mock.ANY,
|
||||||
|
payload=events.DBEventPayload(
|
||||||
|
context, states=(original_port, port,)))
|
||||||
|
|
||||||
self.assertFalse(
|
self.assertFalse(
|
||||||
l3plugin.update_arp_entry_for_dvr_service_port.called)
|
l3plugin.update_arp_entry_for_dvr_service_port.called)
|
||||||
@ -1030,75 +1021,75 @@ class L3DvrSchedulerTestCase(L3SchedulerBaseMixin,
|
|||||||
self.assertFalse(l3plugin.get_dvr_routers_to_remove.called)
|
self.assertFalse(l3plugin.get_dvr_routers_to_remove.called)
|
||||||
|
|
||||||
def test__notify_l3_agent_update_port_with_mac_address_update(self):
|
def test__notify_l3_agent_update_port_with_mac_address_update(self):
|
||||||
kwargs = {
|
context = self.adminContext
|
||||||
'context': self.adminContext,
|
original_port = {
|
||||||
'original_port': {
|
portbindings.HOST_ID: 'vm-host',
|
||||||
portbindings.HOST_ID: 'vm-host',
|
'device_owner': DEVICE_OWNER_COMPUTE,
|
||||||
'device_owner': DEVICE_OWNER_COMPUTE,
|
'mac_address': '02:04:05:17:18:19'}
|
||||||
'mac_address': '02:04:05:17:18:19'
|
port = {
|
||||||
},
|
portbindings.HOST_ID: 'vm-host',
|
||||||
'port': {
|
'device_owner': DEVICE_OWNER_COMPUTE,
|
||||||
portbindings.HOST_ID: 'vm-host',
|
'mac_address': '02:04:05:17:18:29'}
|
||||||
'device_owner': DEVICE_OWNER_COMPUTE,
|
|
||||||
'mac_address': '02:04:05:17:18:29'
|
|
||||||
},
|
|
||||||
'mac_address_updated': True
|
|
||||||
}
|
|
||||||
l3plugin = mock.Mock()
|
l3plugin = mock.Mock()
|
||||||
directory.add_plugin(plugin_constants.L3, l3plugin)
|
directory.add_plugin(plugin_constants.L3, l3plugin)
|
||||||
l3_dvrscheduler_db._notify_l3_agent_port_update(
|
l3_dvrscheduler_db._notify_l3_agent_port_update(
|
||||||
'port', 'after_update', mock.ANY, **kwargs)
|
resources.PORT, events.AFTER_UPDATE, mock.ANY,
|
||||||
|
payload=events.DBEventPayload(
|
||||||
|
context,
|
||||||
|
metadata={'mac_address_updated': True},
|
||||||
|
states=(original_port, port,)))
|
||||||
|
|
||||||
l3plugin.update_arp_entry_for_dvr_service_port.\
|
l3plugin.update_arp_entry_for_dvr_service_port.\
|
||||||
assert_called_once_with(
|
assert_called_once_with(
|
||||||
self.adminContext, kwargs.get('port'))
|
context, port)
|
||||||
self.assertFalse(l3plugin.dvr_handle_new_service_port.called)
|
self.assertFalse(l3plugin.dvr_handle_new_service_port.called)
|
||||||
|
|
||||||
def test__notify_l3_agent_update_port_with_ip_update(self):
|
def test__notify_l3_agent_update_port_with_ip_update(self):
|
||||||
kwargs = {
|
context = self.adminContext
|
||||||
'context': self.adminContext,
|
original_port = {
|
||||||
'original_port': {
|
portbindings.HOST_ID: 'vm-host',
|
||||||
portbindings.HOST_ID: 'vm-host',
|
'device_owner': constants.DEVICE_OWNER_ROUTER_GW,
|
||||||
'device_owner': constants.DEVICE_OWNER_ROUTER_GW,
|
'fixed_ips': [{'ip_address': '1.1.1.1'}],
|
||||||
'fixed_ips': [{'ip_address': '1.1.1.1'}],
|
'mac_address': '02:04:05:17:18:19'
|
||||||
'mac_address': '02:04:05:17:18:19'
|
}
|
||||||
},
|
port = {
|
||||||
'port': {
|
portbindings.HOST_ID: 'vm-host',
|
||||||
portbindings.HOST_ID: 'vm-host',
|
'device_owner': constants.DEVICE_OWNER_ROUTER_GW,
|
||||||
'device_owner': constants.DEVICE_OWNER_ROUTER_GW,
|
'fixed_ips': [{'ip_address': '2.2.2.2'}],
|
||||||
'fixed_ips': [{'ip_address': '2.2.2.2'}],
|
'mac_address': '02:04:05:17:18:19'
|
||||||
'mac_address': '02:04:05:17:18:19'
|
|
||||||
},
|
|
||||||
'mac_address_updated': False
|
|
||||||
}
|
}
|
||||||
l3plugin = mock.Mock()
|
l3plugin = mock.Mock()
|
||||||
directory.add_plugin(plugin_constants.L3, l3plugin)
|
directory.add_plugin(plugin_constants.L3, l3plugin)
|
||||||
l3_dvrscheduler_db._notify_l3_agent_port_update(
|
l3_dvrscheduler_db._notify_l3_agent_port_update(
|
||||||
'port', 'after_update', mock.ANY, **kwargs)
|
resources.PORT, events.AFTER_UPDATE, mock.ANY,
|
||||||
|
payload=events.DBEventPayload(
|
||||||
|
context,
|
||||||
|
metadata={'mac_address_updated': True},
|
||||||
|
states=(original_port, port,)))
|
||||||
|
|
||||||
l3plugin.update_arp_entry_for_dvr_service_port.\
|
l3plugin.update_arp_entry_for_dvr_service_port.\
|
||||||
assert_called_once_with(
|
assert_called_once_with(
|
||||||
self.adminContext, kwargs.get('port'))
|
context, port)
|
||||||
self.assertFalse(l3plugin.dvr_handle_new_service_port.called)
|
self.assertFalse(l3plugin.dvr_handle_new_service_port.called)
|
||||||
|
|
||||||
def test__notify_l3_agent_update_port_without_ip_change(self):
|
def test__notify_l3_agent_update_port_without_ip_change(self):
|
||||||
kwargs = {
|
context = self.adminContext
|
||||||
'context': self.adminContext,
|
original_port = {
|
||||||
'original_port': {
|
portbindings.HOST_ID: 'vm-host',
|
||||||
portbindings.HOST_ID: 'vm-host',
|
'device_owner': constants.DEVICE_OWNER_ROUTER_GW,
|
||||||
'device_owner': constants.DEVICE_OWNER_ROUTER_GW,
|
'fixed_ips': [{'ip_address': '1.1.1.1'}],
|
||||||
'fixed_ips': [{'ip_address': '1.1.1.1'}],
|
|
||||||
},
|
|
||||||
'port': {
|
|
||||||
portbindings.HOST_ID: 'vm-host',
|
|
||||||
'device_owner': constants.DEVICE_OWNER_ROUTER_GW,
|
|
||||||
'fixed_ips': [{'ip_address': '1.1.1.1'}],
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
port = {
|
||||||
|
portbindings.HOST_ID: 'vm-host',
|
||||||
|
'device_owner': constants.DEVICE_OWNER_ROUTER_GW,
|
||||||
|
'fixed_ips': [{'ip_address': '1.1.1.1'}]}
|
||||||
l3plugin = mock.Mock()
|
l3plugin = mock.Mock()
|
||||||
directory.add_plugin(plugin_constants.L3, l3plugin)
|
directory.add_plugin(plugin_constants.L3, l3plugin)
|
||||||
l3_dvrscheduler_db._notify_l3_agent_port_update(
|
l3_dvrscheduler_db._notify_l3_agent_port_update(
|
||||||
'port', 'after_update', mock.ANY, **kwargs)
|
resources.PORT, events.AFTER_UPDATE, mock.ANY,
|
||||||
|
payload=events.DBEventPayload(
|
||||||
|
context, states=(original_port, port,)))
|
||||||
|
|
||||||
self.assertFalse(l3plugin.update_arp_entry_for_dvr_service_port.called)
|
self.assertFalse(l3plugin.update_arp_entry_for_dvr_service_port.called)
|
||||||
self.assertFalse(l3plugin.dvr_handle_new_service_port.called)
|
self.assertFalse(l3plugin.dvr_handle_new_service_port.called)
|
||||||
@ -1160,18 +1151,14 @@ class L3DvrSchedulerTestCase(L3SchedulerBaseMixin,
|
|||||||
is_distributed=False,
|
is_distributed=False,
|
||||||
router_id=None):
|
router_id=None):
|
||||||
source_host = 'vm-host1'
|
source_host = 'vm-host1'
|
||||||
kwargs = {
|
context = self.adminContext
|
||||||
'context': self.adminContext,
|
original_port = {
|
||||||
'original_port': {
|
'id': uuidutils.generate_uuid(),
|
||||||
'id': uuidutils.generate_uuid(),
|
portbindings.HOST_ID: source_host,
|
||||||
portbindings.HOST_ID: source_host,
|
'device_owner': DEVICE_OWNER_COMPUTE}
|
||||||
'device_owner': DEVICE_OWNER_COMPUTE,
|
port = {
|
||||||
},
|
portbindings.HOST_ID: 'vm-host2',
|
||||||
'port': {
|
'device_owner': DEVICE_OWNER_COMPUTE}
|
||||||
portbindings.HOST_ID: 'vm-host2',
|
|
||||||
'device_owner': DEVICE_OWNER_COMPUTE,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
l3plugin = mock.Mock()
|
l3plugin = mock.Mock()
|
||||||
directory.add_plugin(plugin_constants.L3, l3plugin)
|
directory.add_plugin(plugin_constants.L3, l3plugin)
|
||||||
with mock.patch.object(l3plugin, 'get_dvr_routers_to_remove',
|
with mock.patch.object(l3plugin, 'get_dvr_routers_to_remove',
|
||||||
@ -1182,7 +1169,9 @@ class L3DvrSchedulerTestCase(L3SchedulerBaseMixin,
|
|||||||
mock.patch.object(l3_dvr_db, 'is_distributed_router',
|
mock.patch.object(l3_dvr_db, 'is_distributed_router',
|
||||||
return_value=is_distributed):
|
return_value=is_distributed):
|
||||||
l3_dvrscheduler_db._notify_l3_agent_port_update(
|
l3_dvrscheduler_db._notify_l3_agent_port_update(
|
||||||
'port', 'after_update', mock.ANY, **kwargs)
|
resources.PORT, events.AFTER_UPDATE, mock.ANY,
|
||||||
|
payload=events.DBEventPayload(
|
||||||
|
context, states=(original_port, port,)))
|
||||||
if routers_to_remove:
|
if routers_to_remove:
|
||||||
(l3plugin.l3_rpc_notifier.router_removed_from_agent.
|
(l3plugin.l3_rpc_notifier.router_removed_from_agent.
|
||||||
assert_called_once_with(mock.ANY, 'foo_id', source_host))
|
assert_called_once_with(mock.ANY, 'foo_id', source_host))
|
||||||
@ -1196,28 +1185,23 @@ class L3DvrSchedulerTestCase(L3SchedulerBaseMixin,
|
|||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
1, l3plugin.update_arp_entry_for_dvr_service_port.call_count)
|
1, l3plugin.update_arp_entry_for_dvr_service_port.call_count)
|
||||||
l3plugin.dvr_handle_new_service_port.assert_called_once_with(
|
l3plugin.dvr_handle_new_service_port.assert_called_once_with(
|
||||||
self.adminContext, kwargs.get('port'),
|
context, port,
|
||||||
dest_host=None, router_id=router_id)
|
dest_host=None, router_id=router_id)
|
||||||
|
|
||||||
def test__notify_l3_agent_update_port_removing_routers(self):
|
def test__notify_l3_agent_update_port_removing_routers(self):
|
||||||
port_id = 'fake-port'
|
port_id = 'fake-port'
|
||||||
source_host = 'vm-host'
|
source_host = 'vm-host'
|
||||||
kwargs = {
|
context = self.adminContext
|
||||||
'context': self.adminContext,
|
port = {
|
||||||
'port': {
|
'id': port_id,
|
||||||
'id': port_id,
|
portbindings.HOST_ID: None,
|
||||||
portbindings.HOST_ID: None,
|
'device_id': '',
|
||||||
'device_id': '',
|
'device_owner': ''}
|
||||||
'device_owner': ''
|
original_port = {
|
||||||
},
|
'id': port_id,
|
||||||
'mac_address_updated': False,
|
portbindings.HOST_ID: source_host,
|
||||||
'original_port': {
|
'device_id': 'vm-id',
|
||||||
'id': port_id,
|
'device_owner': DEVICE_OWNER_COMPUTE}
|
||||||
portbindings.HOST_ID: source_host,
|
|
||||||
'device_id': 'vm-id',
|
|
||||||
'device_owner': DEVICE_OWNER_COMPUTE
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
plugin = directory.get_plugin()
|
plugin = directory.get_plugin()
|
||||||
l3plugin = mock.Mock()
|
l3plugin = mock.Mock()
|
||||||
@ -1233,7 +1217,12 @@ class L3DvrSchedulerTestCase(L3SchedulerBaseMixin,
|
|||||||
mock.patch.object(l3plugin, '_get_floatingips_by_port_id',
|
mock.patch.object(l3plugin, '_get_floatingips_by_port_id',
|
||||||
return_value=[]):
|
return_value=[]):
|
||||||
l3_dvrscheduler_db._notify_l3_agent_port_update(
|
l3_dvrscheduler_db._notify_l3_agent_port_update(
|
||||||
'port', 'after_update', plugin, **kwargs)
|
resources.PORT, events.AFTER_UPDATE, plugin,
|
||||||
|
payload=events.DBEventPayload(
|
||||||
|
context,
|
||||||
|
metadata={'mac_address_updated': True},
|
||||||
|
resource_id=port_id,
|
||||||
|
states=(original_port, port,)))
|
||||||
|
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
1, l3plugin.delete_arp_entry_for_dvr_service_port.call_count)
|
1, l3plugin.delete_arp_entry_for_dvr_service_port.call_count)
|
||||||
|
@ -1377,19 +1377,22 @@ class TestOVNL3RouterPlugin(test_mech_driver.Ml2PluginV2TestCase):
|
|||||||
@mock.patch('neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb.'
|
@mock.patch('neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb.'
|
||||||
'ovn_client.OVNClient.update_router_port')
|
'ovn_client.OVNClient.update_router_port')
|
||||||
def test_port_update_postcommit(self, update_rp_mock):
|
def test_port_update_postcommit(self, update_rp_mock):
|
||||||
kwargs = {'port': {'device_owner': 'foo'},
|
context = 'fake_context'
|
||||||
'context': 'fake_context'}
|
port = {'device_owner': 'foo'}
|
||||||
self.l3_inst._port_update(resources.PORT, events.AFTER_UPDATE, None,
|
self.l3_inst._port_update(resources.PORT, events.AFTER_UPDATE, None,
|
||||||
**kwargs)
|
payload=events.DBEventPayload(
|
||||||
|
context,
|
||||||
|
states=(port,)))
|
||||||
update_rp_mock.assert_not_called()
|
update_rp_mock.assert_not_called()
|
||||||
|
|
||||||
kwargs = {'port': {'device_owner': constants.DEVICE_OWNER_ROUTER_INTF},
|
port = {'device_owner': constants.DEVICE_OWNER_ROUTER_INTF}
|
||||||
'context': 'fake_context'}
|
|
||||||
self.l3_inst._port_update(resources.PORT, events.AFTER_UPDATE, None,
|
self.l3_inst._port_update(resources.PORT, events.AFTER_UPDATE, None,
|
||||||
**kwargs)
|
payload=events.DBEventPayload(
|
||||||
|
context,
|
||||||
|
states=(port,)))
|
||||||
|
|
||||||
update_rp_mock.assert_called_once_with(kwargs['context'],
|
update_rp_mock.assert_called_once_with(context,
|
||||||
kwargs['port'],
|
port,
|
||||||
if_exists=True)
|
if_exists=True)
|
||||||
|
|
||||||
@mock.patch('neutron.plugins.ml2.plugin.Ml2Plugin.update_port_status')
|
@mock.patch('neutron.plugins.ml2.plugin.Ml2Plugin.update_port_status')
|
||||||
|
@ -360,11 +360,12 @@ class TrunkPluginTestCase(test_plugin.Ml2PluginV2TestCase):
|
|||||||
trunk_details = {'trunk_id': trunk.id}
|
trunk_details = {'trunk_id': trunk.id}
|
||||||
new_parent['trunk_details'] = trunk_details
|
new_parent['trunk_details'] = trunk_details
|
||||||
original_parent['trunk_details'] = trunk_details
|
original_parent['trunk_details'] = trunk_details
|
||||||
kwargs = {'context': self.context, 'port': new_parent,
|
self.trunk_plugin._trigger_trunk_status_change(
|
||||||
'original_port': original_parent}
|
resources.PORT,
|
||||||
self.trunk_plugin._trigger_trunk_status_change(resources.PORT,
|
events.AFTER_UPDATE,
|
||||||
events.AFTER_UPDATE,
|
None,
|
||||||
None, **kwargs)
|
payload=events.DBEventPayload(
|
||||||
|
self.context, states=(original_parent, new_parent)))
|
||||||
current_trunk = self._get_trunk_obj(trunk.id)
|
current_trunk = self._get_trunk_obj(trunk.id)
|
||||||
self.assertEqual(final_trunk_status, current_trunk.status)
|
self.assertEqual(final_trunk_status, current_trunk.status)
|
||||||
return trunk, current_trunk
|
return trunk, current_trunk
|
||||||
|
Loading…
x
Reference in New Issue
Block a user