Merge "Revert "Use Port_Binding up column to set Neutron port status"" into stable/ussuri
This commit is contained in:
@@ -287,10 +287,6 @@ LSP_OPTIONS_MCAST_FLOOD = 'mcast_flood'
|
||||
|
||||
LRP_OPTIONS_RESIDE_REDIR_CH = 'reside-on-redirect-chassis'
|
||||
|
||||
# Port Binding types
|
||||
PB_TYPE_PATCH = 'patch'
|
||||
PB_TYPE_VIRTUAL = 'virtual'
|
||||
|
||||
HA_CHASSIS_GROUP_DEFAULT_NAME = 'default_ha_chassis_group'
|
||||
HA_CHASSIS_GROUP_HIGHEST_PRIORITY = 32767
|
||||
|
||||
|
||||
@@ -173,6 +173,47 @@ class ChassisEvent(row_event.RowEvent):
|
||||
self.handle_ha_chassis_group_changes(event, row, old)
|
||||
|
||||
|
||||
class PortBindingChassisUpdateEvent(row_event.RowEvent):
|
||||
"""Event for matching a port moving chassis
|
||||
|
||||
If the LSP is up and the Port_Binding chassis has just changed,
|
||||
there is a good chance the host died without cleaning up the chassis
|
||||
column on the Port_Binding. The port never goes down, so we won't
|
||||
see update the driver with the LogicalSwitchPortUpdateUpEvent which
|
||||
only monitors for transitions from DOWN to UP.
|
||||
"""
|
||||
|
||||
def __init__(self, driver):
|
||||
self.driver = driver
|
||||
table = 'Port_Binding'
|
||||
events = (self.ROW_UPDATE,)
|
||||
super(PortBindingChassisUpdateEvent, self).__init__(
|
||||
events, table, None)
|
||||
self.event_name = self.__class__.__name__
|
||||
|
||||
def match_fn(self, event, row, old=None):
|
||||
# NOTE(twilson) ROW_UPDATE events always pass old, but chassis will
|
||||
# only be set if chassis has changed
|
||||
old_chassis = getattr(old, 'chassis', None)
|
||||
if not (row.chassis and old_chassis) or row.chassis == old_chassis:
|
||||
return False
|
||||
if row.type == ovn_const.OVN_CHASSIS_REDIRECT:
|
||||
return False
|
||||
try:
|
||||
lsp = self.driver.nb_ovn.lookup('Logical_Switch_Port',
|
||||
row.logical_port)
|
||||
except idlutils.RowNotFound:
|
||||
LOG.warning("Logical Switch Port %(port)s not found for "
|
||||
"Port_Binding %(binding)s",
|
||||
{'port': row.logical_port, 'binding': row.uuid})
|
||||
return False
|
||||
|
||||
return bool(lsp.up)
|
||||
|
||||
def run(self, event, row, old=None):
|
||||
self.driver.set_port_status_up(row.logical_port)
|
||||
|
||||
|
||||
class PortBindingChassisEvent(row_event.RowEvent):
|
||||
"""Port_Binding update event - set chassis for chassisredirect port.
|
||||
|
||||
@@ -206,8 +247,8 @@ class PortBindingChassisEvent(row_event.RowEvent):
|
||||
router, host)
|
||||
|
||||
|
||||
class PortBindingCreateUpEvent(row_event.RowEvent):
|
||||
"""Row create event - Port_Binding 'up' = True.
|
||||
class LogicalSwitchPortCreateUpEvent(row_event.RowEvent):
|
||||
"""Row create event - Logical_Switch_Port 'up' = True.
|
||||
|
||||
On connection, we get a dump of all ports, so if there is a neutron
|
||||
port that is down that has since been activated, we'll catch it here.
|
||||
@@ -216,200 +257,73 @@ class PortBindingCreateUpEvent(row_event.RowEvent):
|
||||
|
||||
def __init__(self, driver):
|
||||
self.driver = driver
|
||||
table = 'Port_Binding'
|
||||
table = 'Logical_Switch_Port'
|
||||
events = (self.ROW_CREATE,)
|
||||
super(PortBindingCreateUpEvent, self).__init__(events, table, None)
|
||||
self.event_name = 'PortBindingCreateUpEvent'
|
||||
|
||||
def match_fn(self, event, row, old):
|
||||
if row.type in (ovn_const.PB_TYPE_VIRTUAL,
|
||||
ovn_const.OVN_CHASSIS_REDIRECT):
|
||||
# NOTE(ltomasbo): Skipping virtual ports as they are not being
|
||||
# set to ACTIVE
|
||||
# NOTE(ltomasbo): No need to handle cr ports
|
||||
return False
|
||||
if row.type == ovn_const.PB_TYPE_PATCH:
|
||||
# NOTE(ltomasbo): Only handle the logical_switch_port side,
|
||||
# not the router side.
|
||||
if (row.logical_port.startswith('lrp-') or
|
||||
row.logical_port.startswith('cr-lrp')):
|
||||
return False
|
||||
return True
|
||||
# TODO(ltomasbo): Remove the checkings for 'up' column once minimal
|
||||
# ovn version has it (v21.03.0). The match_fn can be then replaced
|
||||
# by different init method above:
|
||||
# super().__init__(
|
||||
# events, table, (('up', '=', True), ('type', '=', ''),))
|
||||
if hasattr(row, 'up'):
|
||||
# NOTE(ltomasbo): Due to bug in core ovn not setting the up field
|
||||
# to DOWN in some cases (for example subports detachment from
|
||||
# trunks), we need to also check the chassis is set to claim the
|
||||
# port as ACTIVE
|
||||
return row.chassis and bool(row.up[0])
|
||||
elif row.chassis:
|
||||
return True
|
||||
return False
|
||||
super(LogicalSwitchPortCreateUpEvent, self).__init__(
|
||||
events, table, (('up', '=', True),))
|
||||
self.event_name = 'LogicalSwitchPortCreateUpEvent'
|
||||
|
||||
def run(self, event, row, old):
|
||||
self.driver.set_port_status_up(row.logical_port)
|
||||
self.driver.set_port_status_up(row.name)
|
||||
|
||||
|
||||
class PortBindingCreateDownEvent(row_event.RowEvent):
|
||||
"""Row create event - Port_Binding 'up' = False
|
||||
class LogicalSwitchPortCreateDownEvent(row_event.RowEvent):
|
||||
"""Row create event - Logical_Switch_Port 'up' = False
|
||||
|
||||
On connection, we get a dump of all ports, so if there is a neutron
|
||||
port that is up that has since been deactivated, we'll catch it here.
|
||||
This event will not be generated for new ports getting created.
|
||||
"""
|
||||
|
||||
def __init__(self, driver):
|
||||
self.driver = driver
|
||||
table = 'Port_Binding'
|
||||
table = 'Logical_Switch_Port'
|
||||
events = (self.ROW_CREATE,)
|
||||
super(PortBindingCreateDownEvent, self).__init__(events, table, None)
|
||||
self.event_name = 'PortBindingCreateDownEvent'
|
||||
|
||||
def match_fn(self, event, row, old):
|
||||
if row.type in [ovn_const.PB_TYPE_VIRTUAL, ovn_const.PB_TYPE_PATCH,
|
||||
ovn_const.OVN_CHASSIS_REDIRECT]:
|
||||
# NOTE(ltomasbo): Skipping as virtual ports are not being set to
|
||||
# ACTIVE
|
||||
# Patch ports are set to UP on creation, no need to update
|
||||
# No need to handle cr ports
|
||||
return False
|
||||
|
||||
# TODO(ltomasbo): Remove the checkings for 'up' column once minimal
|
||||
# ovn version has it (v21.03.0). The match_fn can be then replaced
|
||||
# by different init method above:
|
||||
# super().__init__(
|
||||
# events, table, (('up', '=', False), ('type', '=', ''),))
|
||||
if hasattr(row, 'up'):
|
||||
# NOTE(ltomasbo): Due to bug in core ovn not setting the up field
|
||||
# to DOWN in some cases (for example subports detachment from
|
||||
# trunks), we need to also check if the chassis is unset to set
|
||||
# the port as DOWN
|
||||
return not row.chassis or not bool(row.up[0])
|
||||
elif not row.chassis:
|
||||
return True
|
||||
return False
|
||||
super(LogicalSwitchPortCreateDownEvent, self).__init__(
|
||||
events, table, (('up', '=', False),))
|
||||
self.event_name = 'LogicalSwitchPortCreateDownEvent'
|
||||
|
||||
def run(self, event, row, old):
|
||||
self.driver.set_port_status_down(row.logical_port)
|
||||
self.driver.set_port_status_down(row.name)
|
||||
|
||||
|
||||
class PortBindingUpdateUpEvent(row_event.RowEvent):
|
||||
"""Row update event - Port_Binding 'up' going from False to True
|
||||
class LogicalSwitchPortUpdateUpEvent(row_event.RowEvent):
|
||||
"""Row update event - Logical_Switch_Port 'up' going from False to True
|
||||
|
||||
This happens when the VM goes up.
|
||||
New value of Port_Binding 'up' will be True and the old value will
|
||||
be False. Or if that column does not exists, the chassis will be set
|
||||
and the old chassis value will be empty.
|
||||
New value of Logical_Switch_Port 'up' will be True and the old value will
|
||||
be False.
|
||||
"""
|
||||
|
||||
def __init__(self, driver):
|
||||
self.driver = driver
|
||||
table = 'Port_Binding'
|
||||
table = 'Logical_Switch_Port'
|
||||
events = (self.ROW_UPDATE,)
|
||||
super(PortBindingUpdateUpEvent, self).__init__(events, table, None)
|
||||
self.event_name = 'PortBindingUpdateUpEvent'
|
||||
|
||||
def match_fn(self, event, row, old):
|
||||
if row.type in (ovn_const.PB_TYPE_VIRTUAL,
|
||||
ovn_const.OVN_CHASSIS_REDIRECT):
|
||||
# NOTE(ltomasbo): Skipping virtual ports as they are not being
|
||||
# set to ACTIVE
|
||||
# NOTE(ltomasbo): No need to handle cr ports
|
||||
return False
|
||||
if row.type == ovn_const.PB_TYPE_PATCH:
|
||||
# NOTE(ltomasbo): Only handle the logical_switch_port side,
|
||||
# not the router side.
|
||||
if (row.logical_port.startswith('lrp-') or
|
||||
row.logical_port.startswith('cr-lrp')):
|
||||
return False
|
||||
try:
|
||||
if old.mac:
|
||||
# NOTE(ltomasbo): only execute it once (the first update
|
||||
# event for this port), as you don't need to set it to
|
||||
# active several time
|
||||
return True
|
||||
except AttributeError:
|
||||
return False
|
||||
return False
|
||||
# TODO(ltomasbo): Remove the checkings for 'up' column once minimal
|
||||
# ovn version has it (v21.03.0). The match_fn can be then replaced
|
||||
# by different init method above:
|
||||
# super().__init__(
|
||||
# events, table, (('up', '=', True), ('type', '=', '')),
|
||||
# old_conditions=(('up', '=', False),))
|
||||
try:
|
||||
if hasattr(row, 'up'):
|
||||
# NOTE(ltomasbo): Due to bug in core ovn not setting the up
|
||||
# field to DOWN in some cases (for example subports detachment
|
||||
# from trunks), we need to also check the chassis is set to
|
||||
# claim the port as ACTIVE
|
||||
return (bool(row.up[0]) and not bool(old.up[0]) and
|
||||
row.chassis)
|
||||
elif row.chassis and not old.chassis:
|
||||
return True
|
||||
except AttributeError:
|
||||
# NOTE(ltomasbo): do not process if there is no old up/chassis
|
||||
# information
|
||||
return False
|
||||
return False
|
||||
super(LogicalSwitchPortUpdateUpEvent, self).__init__(
|
||||
events, table, (('up', '=', True),),
|
||||
old_conditions=(('up', '=', False),))
|
||||
self.event_name = 'LogicalSwitchPortUpdateUpEvent'
|
||||
|
||||
def run(self, event, row, old):
|
||||
self.driver.set_port_status_up(row.logical_port)
|
||||
self.driver.set_port_status_up(row.name)
|
||||
|
||||
|
||||
class PortBindingUpdateDownEvent(row_event.RowEvent):
|
||||
"""Row update event - Port_Binding 'up' going from True to False
|
||||
class LogicalSwitchPortUpdateDownEvent(row_event.RowEvent):
|
||||
"""Row update event - Logical_Switch_Port 'up' going from True to False
|
||||
|
||||
This happens when the VM goes down.
|
||||
New value of Port_Binding 'up' will be False and the old value will
|
||||
be True. Or if that column does not exists, the chassis will be unset
|
||||
and the old chassis will be set.
|
||||
New value of Logical_Switch_Port 'up' will be False and the old value will
|
||||
be True.
|
||||
"""
|
||||
|
||||
def __init__(self, driver):
|
||||
self.driver = driver
|
||||
table = 'Port_Binding'
|
||||
table = 'Logical_Switch_Port'
|
||||
events = (self.ROW_UPDATE,)
|
||||
super(PortBindingUpdateDownEvent, self).__init__(events, table, None)
|
||||
self.event_name = 'PortBindingUpdateDownEvent'
|
||||
|
||||
def match_fn(self, event, row, old):
|
||||
if row.type in [ovn_const.PB_TYPE_VIRTUAL, ovn_const.PB_TYPE_PATCH,
|
||||
ovn_const.OVN_CHASSIS_REDIRECT]:
|
||||
# NOTE(ltomasbo): Skipping as virtual ports are not being set to
|
||||
# ACTIVE
|
||||
# Patch ports are meant to be always UP, after creation, no need
|
||||
# to update
|
||||
# No need to handle cr ports
|
||||
return False
|
||||
# TODO(ltomasbo): Remove the checkings for 'up' column once minimal
|
||||
# ovn version has it (v21.03.0). The match_fn can be then replaced
|
||||
# by different init method above:
|
||||
# super().__init__(
|
||||
# events, table, (('up', '=', False), ('type', '=', '')),
|
||||
# old_conditions=(('up', '=', True),))
|
||||
try:
|
||||
if hasattr(row, 'up'):
|
||||
# NOTE(ltomasbo): Due to bug in core ovn not setting the up
|
||||
# field to DOWN in some cases (for example subports detachment
|
||||
# from trunks), we need to also check if the chassis is being
|
||||
# unset to set the port as DOWN
|
||||
return ((not bool(row.up[0]) and bool(old.up[0])) or
|
||||
(not row.chassis and old.chassis))
|
||||
elif not row.chassis and old.chassis:
|
||||
return True
|
||||
except AttributeError:
|
||||
# NOTE(ltomasbo): do not process if there is no old up/chassis
|
||||
# information
|
||||
return False
|
||||
return False
|
||||
super(LogicalSwitchPortUpdateDownEvent, self).__init__(
|
||||
events, table, (('up', '=', False),),
|
||||
old_conditions=(('up', '=', True),))
|
||||
self.event_name = 'LogicalSwitchPortUpdateDownEvent'
|
||||
|
||||
def run(self, event, row, old):
|
||||
self.driver.set_port_status_down(row.logical_port)
|
||||
self.driver.set_port_status_down(row.name)
|
||||
|
||||
|
||||
class FIPAddDeleteEvent(row_event.RowEvent):
|
||||
@@ -591,9 +505,17 @@ class OvnNbIdl(OvnIdlDistributedLock):
|
||||
|
||||
def __init__(self, driver, remote, schema):
|
||||
super(OvnNbIdl, self).__init__(driver, remote, schema)
|
||||
self._lsp_update_up_event = LogicalSwitchPortUpdateUpEvent(driver)
|
||||
self._lsp_update_down_event = LogicalSwitchPortUpdateDownEvent(driver)
|
||||
self._lsp_create_up_event = LogicalSwitchPortCreateUpEvent(driver)
|
||||
self._lsp_create_down_event = LogicalSwitchPortCreateDownEvent(driver)
|
||||
self._fip_create_delete_event = FIPAddDeleteEvent(driver)
|
||||
|
||||
self.notify_handler.watch_events([self._fip_create_delete_event])
|
||||
self.notify_handler.watch_events([self._lsp_create_up_event,
|
||||
self._lsp_create_down_event,
|
||||
self._lsp_update_up_event,
|
||||
self._lsp_update_down_event,
|
||||
self._fip_create_delete_event])
|
||||
|
||||
@classmethod
|
||||
def from_server(cls, connection_string, schema_name, driver):
|
||||
@@ -603,21 +525,29 @@ class OvnNbIdl(OvnIdlDistributedLock):
|
||||
helper.register_all()
|
||||
return cls(driver, connection_string, helper)
|
||||
|
||||
def unwatch_logical_switch_port_create_events(self):
|
||||
"""Unwatch the logical switch port create events.
|
||||
|
||||
When the ovs idl client connects to the ovsdb-server, it gets
|
||||
a dump of all logical switch ports as events and we need to process
|
||||
them at start up.
|
||||
After the startup, there is no need to watch these events.
|
||||
So unwatch these events.
|
||||
"""
|
||||
self.notify_handler.unwatch_events([self._lsp_create_up_event,
|
||||
self._lsp_create_down_event])
|
||||
self._lsp_create_up_event = None
|
||||
self._lsp_create_down_event = None
|
||||
|
||||
def post_connect(self):
|
||||
self.unwatch_logical_switch_port_create_events()
|
||||
|
||||
|
||||
class OvnSbIdl(OvnIdlDistributedLock):
|
||||
|
||||
def __init__(self, driver, remote, schema, **kwargs):
|
||||
super(OvnSbIdl, self).__init__(driver, remote, schema, **kwargs)
|
||||
|
||||
self._pb_create_up_event = PortBindingCreateUpEvent(driver)
|
||||
self._pb_create_down_event = PortBindingCreateDownEvent(driver)
|
||||
|
||||
self.notify_handler.watch_events([
|
||||
self._pb_create_up_event,
|
||||
self._pb_create_down_event,
|
||||
PortBindingUpdateUpEvent(driver),
|
||||
PortBindingUpdateDownEvent(driver)])
|
||||
|
||||
@classmethod
|
||||
def from_server(cls, connection_string, schema_name, driver):
|
||||
_check_and_set_ssl_files(schema_name)
|
||||
@@ -648,23 +578,8 @@ class OvnSbIdl(OvnIdlDistributedLock):
|
||||
self._chassis_event = ChassisEvent(self.driver)
|
||||
self._portbinding_event = PortBindingChassisEvent(self.driver)
|
||||
self.notify_handler.watch_events(
|
||||
[self._chassis_event, self._portbinding_event])
|
||||
|
||||
self.unwatch_port_binding_create_events()
|
||||
|
||||
def unwatch_port_binding_create_events(self):
|
||||
"""Unwatch the port binding create events.
|
||||
|
||||
When the ovs idl client connects to the ovsdb-server, it gets
|
||||
a dump of all port binding events and we need to process
|
||||
them at start up.
|
||||
After the startup, there is no need to watch these events.
|
||||
So unwatch these events.
|
||||
"""
|
||||
self.notify_handler.unwatch_events([self._pb_create_up_event,
|
||||
self._pb_create_down_event])
|
||||
self._pb_create_up_event = None
|
||||
self._pb_create_down_event = None
|
||||
[self._chassis_event, self._portbinding_event,
|
||||
PortBindingChassisUpdateEvent(self.driver)])
|
||||
|
||||
|
||||
class OvnInitPGNbIdl(OvnIdl):
|
||||
|
||||
@@ -194,18 +194,12 @@ class TestNBDbMonitor(base.TestOVNFunctionalBase):
|
||||
lambda: not self._check_mac_binding_exists(macb_id),
|
||||
timeout=15, sleep=1)
|
||||
|
||||
def _get_port_uuid(self, port_id):
|
||||
sb_port = self.sb_api.db_find(
|
||||
'Port_Binding', ('logical_port', '=', port_id)).execute()[0]
|
||||
return sb_port['_uuid']
|
||||
|
||||
def _test_port_binding_and_status(self, port_id, action, status):
|
||||
# This function binds or unbinds port to chassis and
|
||||
# checks if port status matches with input status
|
||||
core_plugin = directory.get_plugin()
|
||||
self.sb_api.check_for_row_by_value_and_retry(
|
||||
'Port_Binding', 'logical_port', port_id)
|
||||
port_uuid = self._get_port_uuid(port_id)
|
||||
|
||||
def check_port_status(status):
|
||||
port = core_plugin.get_ports(
|
||||
@@ -214,21 +208,8 @@ class TestNBDbMonitor(base.TestOVNFunctionalBase):
|
||||
if action == 'bind':
|
||||
self.sb_api.lsp_bind(port_id, self.chassis,
|
||||
may_exist=True).execute(check_error=True)
|
||||
try:
|
||||
self.sb_api.db_set('Port_Binding', port_uuid,
|
||||
('up', True)).execute(check_error=True)
|
||||
except KeyError:
|
||||
self.sb_api.db_set('Port_Binding', port_uuid,
|
||||
('chassis', 'host-1')).execute(
|
||||
check_error=True)
|
||||
else:
|
||||
self.sb_api.lsp_unbind(port_id).execute(check_error=True)
|
||||
try:
|
||||
self.sb_api.db_set('Port_Binding', port_uuid,
|
||||
('up', False)).execute(check_error=True)
|
||||
except KeyError:
|
||||
self.sb_api.db_set('Port_Binding', port_uuid,
|
||||
('chassis', None)).execute(check_error=True)
|
||||
n_utils.wait_until_true(lambda: check_port_status(status))
|
||||
|
||||
def test_port_up_down_events(self):
|
||||
|
||||
@@ -85,15 +85,7 @@ OVN_SB_SCHEMA = {
|
||||
"min": 0, "max": "unlimited"}}},
|
||||
"isRoot": True,
|
||||
"indexes": [["name"]]
|
||||
},
|
||||
"Port_Binding": {
|
||||
"columns": {
|
||||
"logical_port": {"type": "string"},
|
||||
"type": {"type": "string"},
|
||||
"chassis": {"type": "string"},
|
||||
"up": {"type": {"key": "boolean", "min": 0, "max": 1}}},
|
||||
"indexes": [["logical_port"]],
|
||||
"isRoot": True},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -282,6 +274,35 @@ class TestOvnIdlDistributedLock(base.BaseTestCase):
|
||||
self.assertFalse(self.idl.notify_handler.notify.called)
|
||||
|
||||
|
||||
class TestPortBindingChassisUpdateEvent(base.BaseTestCase):
|
||||
def setUp(self):
|
||||
super(TestPortBindingChassisUpdateEvent, self).setUp()
|
||||
self.driver = mock.Mock()
|
||||
self.event = ovsdb_monitor.PortBindingChassisUpdateEvent(self.driver)
|
||||
|
||||
def _test_event(self, event, row, old):
|
||||
if self.event.matches(event, row, old):
|
||||
self.event.run(event, row, old)
|
||||
self.driver.set_port_status_up.assert_called()
|
||||
else:
|
||||
self.driver.set_port_status_up.assert_not_called()
|
||||
|
||||
def test_event_matches(self):
|
||||
# NOTE(twilson) This primarily tests implementation details. If a
|
||||
# scenario test is written that handles shutting down a compute
|
||||
# node uncleanly and performing a 'host-evacuate', this can be removed
|
||||
pbtable = fakes.FakeOvsdbTable.create_one_ovsdb_table(
|
||||
attrs={'name': 'Port_Binding'})
|
||||
ovsdb_row = fakes.FakeOvsdbRow.create_one_ovsdb_row
|
||||
self.driver.nb_ovn.lookup.return_value = ovsdb_row(attrs={'up': True})
|
||||
self._test_event(
|
||||
self.event.ROW_UPDATE,
|
||||
ovsdb_row(attrs={'_table': pbtable, 'chassis': 'one',
|
||||
'type': '_fake_', 'logical_port': 'foo'}),
|
||||
ovsdb_row(attrs={'_table': pbtable, 'chassis': 'two',
|
||||
'type': '_fake_'}))
|
||||
|
||||
|
||||
class TestOvnNbIdlNotifyHandler(test_mech_driver.OVNMechanismDriverTestCase):
|
||||
|
||||
def setUp(self):
|
||||
@@ -293,6 +314,105 @@ class TestOvnNbIdlNotifyHandler(test_mech_driver.OVNMechanismDriverTestCase):
|
||||
self.driver.set_port_status_up = mock.Mock()
|
||||
self.driver.set_port_status_down = mock.Mock()
|
||||
|
||||
def _test_lsp_helper(self, event, new_row_json, old_row_json=None,
|
||||
table=None):
|
||||
row_uuid = uuidutils.generate_uuid()
|
||||
if not table:
|
||||
table = self.lp_table
|
||||
lp_row = ovs_idl.Row.from_json(self.idl, table,
|
||||
row_uuid, new_row_json)
|
||||
if old_row_json:
|
||||
old_row = ovs_idl.Row.from_json(self.idl, table,
|
||||
row_uuid, old_row_json)
|
||||
else:
|
||||
old_row = None
|
||||
self.idl.notify(event, lp_row, updates=old_row)
|
||||
# Add a STOP EVENT to the queue
|
||||
self.idl.notify_handler.shutdown()
|
||||
# Execute the notifications queued
|
||||
self.idl.notify_handler.notify_loop()
|
||||
|
||||
def test_lsp_up_create_event(self):
|
||||
row_data = {"up": True, "name": "foo-name"}
|
||||
self._test_lsp_helper('create', row_data)
|
||||
self.driver.set_port_status_up.assert_called_once_with("foo-name")
|
||||
self.assertFalse(self.driver.set_port_status_down.called)
|
||||
|
||||
def test_lsp_down_create_event(self):
|
||||
row_data = {"up": False, "name": "foo-name"}
|
||||
self._test_lsp_helper('create', row_data)
|
||||
self.driver.set_port_status_down.assert_called_once_with("foo-name")
|
||||
self.assertFalse(self.driver.set_port_status_up.called)
|
||||
|
||||
def test_lsp_up_not_set_event(self):
|
||||
row_data = {"up": ['set', []], "name": "foo-name"}
|
||||
self._test_lsp_helper('create', row_data)
|
||||
self.assertFalse(self.driver.set_port_status_up.called)
|
||||
self.assertFalse(self.driver.set_port_status_down.called)
|
||||
|
||||
def test_unwatch_logical_switch_port_create_events(self):
|
||||
self.idl.unwatch_logical_switch_port_create_events()
|
||||
row_data = {"up": True, "name": "foo-name"}
|
||||
self._test_lsp_helper('create', row_data)
|
||||
self.assertFalse(self.driver.set_port_status_up.called)
|
||||
self.assertFalse(self.driver.set_port_status_down.called)
|
||||
|
||||
row_data["up"] = False
|
||||
self._test_lsp_helper('create', row_data)
|
||||
self.assertFalse(self.driver.set_port_status_up.called)
|
||||
self.assertFalse(self.driver.set_port_status_down.called)
|
||||
|
||||
def test_post_connect(self):
|
||||
self.idl.post_connect()
|
||||
self.assertIsNone(self.idl._lsp_create_up_event)
|
||||
self.assertIsNone(self.idl._lsp_create_down_event)
|
||||
|
||||
def test_lsp_up_update_event(self):
|
||||
new_row_json = {"up": True, "name": "foo-name"}
|
||||
old_row_json = {"up": False}
|
||||
self._test_lsp_helper('update', new_row_json,
|
||||
old_row_json=old_row_json)
|
||||
self.driver.set_port_status_up.assert_called_once_with("foo-name")
|
||||
self.assertFalse(self.driver.set_port_status_down.called)
|
||||
|
||||
def test_lsp_down_update_event(self):
|
||||
new_row_json = {"up": False, "name": "foo-name"}
|
||||
old_row_json = {"up": True}
|
||||
self._test_lsp_helper('update', new_row_json,
|
||||
old_row_json=old_row_json)
|
||||
self.driver.set_port_status_down.assert_called_once_with("foo-name")
|
||||
self.assertFalse(self.driver.set_port_status_up.called)
|
||||
|
||||
def test_lsp_up_update_event_no_old_data(self):
|
||||
new_row_json = {"up": True, "name": "foo-name"}
|
||||
self._test_lsp_helper('update', new_row_json,
|
||||
old_row_json=None)
|
||||
self.assertFalse(self.driver.set_port_status_up.called)
|
||||
self.assertFalse(self.driver.set_port_status_down.called)
|
||||
|
||||
def test_lsp_down_update_event_no_old_data(self):
|
||||
new_row_json = {"up": False, "name": "foo-name"}
|
||||
self._test_lsp_helper('update', new_row_json,
|
||||
old_row_json=None)
|
||||
self.assertFalse(self.driver.set_port_status_up.called)
|
||||
self.assertFalse(self.driver.set_port_status_down.called)
|
||||
|
||||
def test_lsp_other_column_update_event(self):
|
||||
new_row_json = {"up": False, "name": "foo-name",
|
||||
"addresses": ["10.0.0.2"]}
|
||||
old_row_json = {"addresses": ["10.0.0.3"]}
|
||||
self._test_lsp_helper('update', new_row_json,
|
||||
old_row_json=old_row_json)
|
||||
self.assertFalse(self.driver.set_port_status_up.called)
|
||||
self.assertFalse(self.driver.set_port_status_down.called)
|
||||
|
||||
def test_notify_other_table(self):
|
||||
new_row_json = {"name": "foo-name"}
|
||||
self._test_lsp_helper('create', new_row_json,
|
||||
table=self.idl.tables.get("Logical_Switch"))
|
||||
self.assertFalse(self.driver.set_port_status_up.called)
|
||||
self.assertFalse(self.driver.set_port_status_down.called)
|
||||
|
||||
@mock.patch.object(hash_ring_manager.HashRingManager, 'get_node')
|
||||
def test_notify_different_target_node(self, mock_get_node):
|
||||
mock_get_node.return_value = 'this-is-a-different-node'
|
||||
@@ -312,23 +432,23 @@ class TestOvnSbIdlNotifyHandler(test_mech_driver.OVNMechanismDriverTestCase):
|
||||
super(TestOvnSbIdlNotifyHandler, self).setUp()
|
||||
sb_helper = ovs_idl.SchemaHelper(schema_json=OVN_SB_SCHEMA)
|
||||
sb_helper.register_table('Chassis')
|
||||
sb_helper.register_table('Port_Binding')
|
||||
self.sb_idl = ovsdb_monitor.OvnSbIdl(self.driver, "remote", sb_helper)
|
||||
self.sb_idl.post_connect()
|
||||
self.chassis_table = self.sb_idl.tables.get('Chassis')
|
||||
self.driver.update_segment_host_mapping = mock.Mock()
|
||||
self.driver.set_port_status_up = mock.Mock()
|
||||
self.driver.set_port_status_down = mock.Mock()
|
||||
self.l3_plugin = directory.get_plugin(n_const.L3)
|
||||
self.l3_plugin.schedule_unhosted_gateways = mock.Mock()
|
||||
|
||||
self.chassis_row_json = {
|
||||
self.row_json = {
|
||||
"name": "fake-name",
|
||||
"hostname": "fake-hostname",
|
||||
"external_ids": ['map', [["ovn-bridge-mappings",
|
||||
"fake-phynet1:fake-br1"]]]
|
||||
}
|
||||
|
||||
def _test_helper(self, event, table, new_row_json, old_row_json=None):
|
||||
def _test_chassis_helper(self, event, new_row_json, old_row_json=None):
|
||||
row_uuid = uuidutils.generate_uuid()
|
||||
table = self.chassis_table
|
||||
row = ovs_idl.Row.from_json(self.sb_idl, table, row_uuid, new_row_json)
|
||||
if old_row_json:
|
||||
old_row = ovs_idl.Row.from_json(self.sb_idl, table,
|
||||
@@ -341,223 +461,76 @@ class TestOvnSbIdlNotifyHandler(test_mech_driver.OVNMechanismDriverTestCase):
|
||||
# Execute the notifications queued
|
||||
self.sb_idl.notify_handler.notify_loop()
|
||||
|
||||
def _test_chassis_helper(self, event, new_row_json, old_row_json=None,
|
||||
table=None):
|
||||
self.sb_idl.post_connect()
|
||||
self.chassis_table = self.sb_idl.tables.get('Chassis')
|
||||
if not table:
|
||||
table = self.chassis_table
|
||||
self._test_helper(event, table, new_row_json, old_row_json)
|
||||
|
||||
def test_chassis_create_event(self):
|
||||
self._test_chassis_helper('create', self.chassis_row_json)
|
||||
self._test_chassis_helper('create', self.row_json)
|
||||
self.driver.update_segment_host_mapping.assert_called_once_with(
|
||||
'fake-hostname', ['fake-phynet1'])
|
||||
self.l3_plugin.schedule_unhosted_gateways.assert_called_once_with(
|
||||
event_from_chassis=None)
|
||||
|
||||
def test_chassis_delete_event(self):
|
||||
self._test_chassis_helper('delete', self.chassis_row_json)
|
||||
self._test_chassis_helper('delete', self.row_json)
|
||||
self.driver.update_segment_host_mapping.assert_called_once_with(
|
||||
'fake-hostname', [])
|
||||
self.l3_plugin.schedule_unhosted_gateways.assert_called_once_with(
|
||||
event_from_chassis='fake-name')
|
||||
|
||||
def test_chassis_update_event(self):
|
||||
old_row_json = copy.deepcopy(self.chassis_row_json)
|
||||
old_row_json = copy.deepcopy(self.row_json)
|
||||
old_row_json['external_ids'][1][0][1] = (
|
||||
"fake-phynet2:fake-br2")
|
||||
self._test_chassis_helper('update', self.chassis_row_json,
|
||||
old_row_json)
|
||||
self._test_chassis_helper('update', self.row_json, old_row_json)
|
||||
self.driver.update_segment_host_mapping.assert_called_once_with(
|
||||
'fake-hostname', ['fake-phynet1'])
|
||||
self.l3_plugin.schedule_unhosted_gateways.assert_called_once_with(
|
||||
event_from_chassis=None)
|
||||
|
||||
def test_chassis_update_event_reschedule_not_needed(self):
|
||||
self.chassis_row_json['external_ids'][1].append(['foo_field',
|
||||
'foo_value_new'])
|
||||
old_row_json = copy.deepcopy(self.chassis_row_json)
|
||||
self.row_json['external_ids'][1].append(['foo_field', 'foo_value_new'])
|
||||
old_row_json = copy.deepcopy(self.row_json)
|
||||
old_row_json['external_ids'][1][1][1] = (
|
||||
"foo_value")
|
||||
self._test_chassis_helper('update', self.chassis_row_json,
|
||||
old_row_json)
|
||||
self._test_chassis_helper('update', self.row_json, old_row_json)
|
||||
self.driver.update_segment_host_mapping.assert_not_called()
|
||||
self.l3_plugin.schedule_unhosted_gateways.assert_not_called()
|
||||
|
||||
def test_chassis_update_event_reschedule_lost_physnet(self):
|
||||
old_row_json = copy.deepcopy(self.chassis_row_json)
|
||||
self.chassis_row_json['external_ids'][1][0][1] = ''
|
||||
self._test_chassis_helper('update', self.chassis_row_json,
|
||||
old_row_json)
|
||||
old_row_json = copy.deepcopy(self.row_json)
|
||||
self.row_json['external_ids'][1][0][1] = ''
|
||||
self._test_chassis_helper('update', self.row_json, old_row_json)
|
||||
self.l3_plugin.schedule_unhosted_gateways.assert_called_once_with(
|
||||
event_from_chassis='fake-name')
|
||||
|
||||
def test_chassis_update_event_reschedule_add_physnet(self):
|
||||
old_row_json = copy.deepcopy(self.chassis_row_json)
|
||||
self.chassis_row_json['external_ids'][1][0][1] += ',foo_physnet:foo_br'
|
||||
self._test_chassis_helper('update', self.chassis_row_json,
|
||||
old_row_json)
|
||||
old_row_json = copy.deepcopy(self.row_json)
|
||||
self.row_json['external_ids'][1][0][1] += ',foo_physnet:foo_br'
|
||||
self._test_chassis_helper('update', self.row_json, old_row_json)
|
||||
self.driver.update_segment_host_mapping.assert_called_once_with(
|
||||
'fake-hostname', ['fake-phynet1', 'foo_physnet'])
|
||||
self.l3_plugin.schedule_unhosted_gateways.assert_called_once_with(
|
||||
event_from_chassis=None)
|
||||
|
||||
def test_chassis_update_event_reschedule_add_and_remove_physnet(self):
|
||||
old_row_json = copy.deepcopy(self.chassis_row_json)
|
||||
self.chassis_row_json['external_ids'][1][0][1] = 'foo_physnet:foo_br'
|
||||
self._test_chassis_helper('update', self.chassis_row_json,
|
||||
old_row_json)
|
||||
old_row_json = copy.deepcopy(self.row_json)
|
||||
self.row_json['external_ids'][1][0][1] = 'foo_physnet:foo_br'
|
||||
self._test_chassis_helper('update', self.row_json, old_row_json)
|
||||
self.driver.update_segment_host_mapping.assert_called_once_with(
|
||||
'fake-hostname', ['foo_physnet'])
|
||||
self.l3_plugin.schedule_unhosted_gateways.assert_called_once_with(
|
||||
event_from_chassis=None)
|
||||
|
||||
def test_chassis_update_empty_no_external_ids(self):
|
||||
old_row_json = copy.deepcopy(self.chassis_row_json)
|
||||
old_row_json = copy.deepcopy(self.row_json)
|
||||
old_row_json.pop('external_ids')
|
||||
with mock.patch(
|
||||
'neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb.'
|
||||
'ovsdb_monitor.ChassisEvent.'
|
||||
'handle_ha_chassis_group_changes') as mock_ha:
|
||||
self._test_chassis_helper('update', self.chassis_row_json,
|
||||
old_row_json)
|
||||
self._test_chassis_helper('update', self.row_json, old_row_json)
|
||||
self.driver.update_segment_host_mapping.assert_not_called()
|
||||
self.l3_plugin.schedule_unhosted_gateways.assert_not_called()
|
||||
mock_ha.assert_not_called()
|
||||
|
||||
def _test_port_binding_helper(self, event, new_row_json,
|
||||
old_row_json=None, table=None):
|
||||
self.port_binding_table = self.sb_idl.tables.get('Port_Binding')
|
||||
if not table:
|
||||
table = self.port_binding_table
|
||||
self._test_helper(event, table, new_row_json, old_row_json)
|
||||
|
||||
def test_port_binding_up_create_event(self):
|
||||
row_data = {"type": "", "up": True, "logical_port": "foo-name",
|
||||
"chassis": "foo-host"}
|
||||
self._test_port_binding_helper('create', row_data)
|
||||
self.driver.set_port_status_up.assert_called_once_with("foo-name")
|
||||
self.assertFalse(self.driver.set_port_status_down.called)
|
||||
|
||||
def test_port_binding_up_create_patch_type_event(self):
|
||||
# Note(ltomasbo): This will behave the same for up = True|False
|
||||
# as the port needs to transition to up anyway
|
||||
row_data = {"type": "patch", "logical_port": "foo-name",
|
||||
"chassis": "foo-host"}
|
||||
self._test_port_binding_helper('create', row_data)
|
||||
self.driver.set_port_status_up.assert_called_once_with("foo-name")
|
||||
self.assertFalse(self.driver.set_port_status_down.called)
|
||||
|
||||
row_data = {"type": "patch", "logical_port": "lrp-foo-name"}
|
||||
self._test_port_binding_helper('create', row_data)
|
||||
self.assertEqual(1, self.driver.set_port_status_up.call_count)
|
||||
self.assertFalse(self.driver.set_port_status_down.called)
|
||||
|
||||
def test_port_binding_up_create_virtual_type_event(self):
|
||||
row_data = {"type": "virtual", "up": True,
|
||||
"logical_port": "foo-name", "chassis": "foo-host"}
|
||||
self._test_port_binding_helper('create', row_data)
|
||||
self.assertFalse(self.driver.set_port_status_up.called)
|
||||
self.assertFalse(self.driver.set_port_status_down.called)
|
||||
|
||||
def test_port_binding_down_create_event(self):
|
||||
row_data = {"type": "", "up": False, "logical_port": "foo-name",
|
||||
"chassis": None}
|
||||
self._test_port_binding_helper('create', row_data)
|
||||
self.driver.set_port_status_down.assert_called_once_with("foo-name")
|
||||
self.assertFalse(self.driver.set_port_status_up.called)
|
||||
|
||||
def test_port_binding_down_create_virtual_type_event(self):
|
||||
row_data = {"type": "virtual", "up": False,
|
||||
"logical_port": "foo-name", "chassis": None}
|
||||
self._test_port_binding_helper('create', row_data)
|
||||
self.assertFalse(self.driver.set_port_status_up.called)
|
||||
self.assertFalse(self.driver.set_port_status_down.called)
|
||||
|
||||
def test_port_binding_up_update_event(self):
|
||||
new_row_json = {"type": "", "up": True, "logical_port": "foo-name",
|
||||
"chassis": "foo-host"}
|
||||
old_row_json = {"type": "", "up": False, "logical_port": "foo-name",
|
||||
"chassis": None}
|
||||
self._test_port_binding_helper('update', new_row_json,
|
||||
old_row_json=old_row_json)
|
||||
self.driver.set_port_status_up.assert_called_once_with("foo-name")
|
||||
self.assertFalse(self.driver.set_port_status_down.called)
|
||||
|
||||
def test_port_binding_down_update_event(self):
|
||||
new_row_json = {"type": "", "up": False, "logical_port": "foo-name",
|
||||
"chassis": None}
|
||||
old_row_json = {"type": "", "up": True, "logical_port": "foo-name",
|
||||
"chassis": "foo-host"}
|
||||
self._test_port_binding_helper('update', new_row_json,
|
||||
old_row_json=old_row_json)
|
||||
self.driver.set_port_status_down.assert_called_once_with("foo-name")
|
||||
self.assertFalse(self.driver.set_port_status_up.called)
|
||||
|
||||
def test_port_binding_down_update_event_up_not_updated(self):
|
||||
new_row_json = {"type": "", "up": True, "logical_port": "foo-name",
|
||||
"chassis": None}
|
||||
old_row_json = {"type": "", "up": True, "logical_port": "foo-name",
|
||||
"chassis": "foo-host"}
|
||||
self._test_port_binding_helper('update', new_row_json,
|
||||
old_row_json=old_row_json)
|
||||
self.driver.set_port_status_down.assert_called_once_with("foo-name")
|
||||
self.assertFalse(self.driver.set_port_status_up.called)
|
||||
|
||||
def test_port_binding_up_update_event_no_old_data(self):
|
||||
new_row_json = {"type": "", "up": True, "logical_port": "foo-name",
|
||||
"chassis": "foo-host"}
|
||||
self._test_port_binding_helper('update', new_row_json,
|
||||
old_row_json=None)
|
||||
self.assertFalse(self.driver.set_port_status_up.called)
|
||||
self.assertFalse(self.driver.set_port_status_down.called)
|
||||
|
||||
def test_port_binding_down_update_event_no_old_data(self):
|
||||
new_row_json = {"type": "", "up": False, "logical_port": "foo-name",
|
||||
"chassis": None}
|
||||
self._test_port_binding_helper('update', new_row_json,
|
||||
old_row_json=None)
|
||||
self.assertFalse(self.driver.set_port_status_up.called)
|
||||
self.assertFalse(self.driver.set_port_status_down.called)
|
||||
|
||||
def test_port_binding_other_column_update_event(self):
|
||||
new_row_json = {"type": "", "up": True, "name": "foo-name",
|
||||
"addresses": ["10.0.0.2"]}
|
||||
old_row_json = {"type": "", "up": True, "name": "foo-name",
|
||||
"addresses": ["10.0.0.3"]}
|
||||
self._test_port_binding_helper('update', new_row_json,
|
||||
old_row_json=old_row_json)
|
||||
self.assertFalse(self.driver.set_port_status_up.called)
|
||||
self.assertFalse(self.driver.set_port_status_down.called)
|
||||
|
||||
def test_notify_other_table(self):
|
||||
new_row_json = {"type": "", "up": True, "logical_port": "foo-name"}
|
||||
self._test_port_binding_helper(
|
||||
'create', new_row_json,
|
||||
table=self.sb_idl.tables.get("Chassis"))
|
||||
self.assertFalse(self.driver.set_port_status_up.called)
|
||||
self.assertFalse(self.driver.set_port_status_down.called)
|
||||
|
||||
def test_unwatch_port_binding_create_events(self):
|
||||
self.sb_idl.unwatch_port_binding_create_events()
|
||||
row_data = {"type": "", "up": True, "logical_port": "foo-name"}
|
||||
self._test_port_binding_helper('create', row_data)
|
||||
self.assertFalse(self.driver.set_port_status_up.called)
|
||||
self.assertFalse(self.driver.set_port_status_down.called)
|
||||
|
||||
row_data["up"] = False
|
||||
self._test_port_binding_helper('create', row_data)
|
||||
self.assertFalse(self.driver.set_port_status_up.called)
|
||||
self.assertFalse(self.driver.set_port_status_down.called)
|
||||
|
||||
def test_post_connect(self):
|
||||
self.assertIsNotNone(self.sb_idl._pb_create_up_event)
|
||||
self.assertIsNotNone(self.sb_idl._pb_create_down_event)
|
||||
self.sb_idl.post_connect()
|
||||
self.assertIsNone(self.sb_idl._pb_create_up_event)
|
||||
self.assertIsNone(self.sb_idl._pb_create_down_event)
|
||||
|
||||
|
||||
class TestChassisEvent(base.BaseTestCase):
|
||||
|
||||
|
||||
Reference in New Issue
Block a user