Avoid use of ovn metadata port IP for HM checks

For every backend IP in the load balancer for which health
check is configured, a new row in the Service_Monitor table
is created and according to that ovn-controller will
periodically sends out the service monitor packets.

In this patch we create a new port for this purpose,
instead of use the ovn_metadata_port to configure the
backends in the field ip_port_mappings, this mapping is
the info used to be translated to Service_Monitor
entries (more details [1]).

[1] 24cd3267c4/northd/ovn-northd.8.xml (L1431)

Closes-Bug: #2004238
Change-Id: I11c4d9671eee002b15080d055a18a4d3f4d7c540
(cherry picked from commit 54d96ca072)
This commit is contained in:
Fernando Royo 2023-02-10 19:18:09 +01:00
parent c7b0c7ee9d
commit fac557f9f7
3 changed files with 302 additions and 42 deletions

View File

@ -16,6 +16,7 @@ from octavia_lib.common import constants
# TODO(mjozefcz): Use those variables from neutron-lib once released.
LRP_PREFIX = "lrp-"
OVN_NAME_PREFIX = "neutron-"
LB_HM_PORT_PREFIX = "ovn-lb-hm-"
LB_VIP_PORT_PREFIX = "ovn-lb-vip-"
OVN_PORT_NAME_EXT_ID_KEY = 'neutron:port_name'
OVN_ROUTER_NAME_EXT_ID_KEY = 'neutron:router_name'
@ -24,6 +25,7 @@ OVN_PORT_FIP_EXT_ID_KEY = 'neutron:port_fip'
OVN_SUBNET_EXT_ID_KEY = 'neutron:subnet_id'
OVN_SUBNET_EXT_IDS_KEY = 'neutron:subnet_ids'
OVN_NETWORK_NAME_EXT_ID_KEY = 'neutron:network_name'
OVN_PROJECT_EXT_ID_KEY = 'neutron:project_id'
OVN_SG_IDS_EXT_ID_KEY = 'neutron:security_group_ids'
OVN_DEVICE_OWNER_EXT_ID_KEY = 'neutron:device_owner'
OVN_FIP_EXT_ID_KEY = 'neutron:fip_id'

View File

@ -134,16 +134,63 @@ class OvnProviderHelper():
raise idlutils.RowNotFound(table=row._table.name,
col=col, match=key) from e
def _ensure_hm_ovn_port(self, network_id):
# We need to have a metadata or dhcp port, OVN should have created
# one when the network was created
def _create_hm_port(self, network_id, subnet_id, project_id):
port = {'port': {'name': ovn_const.LB_HM_PORT_PREFIX + str(subnet_id),
'network_id': network_id,
'fixed_ips': [{'subnet_id': subnet_id}],
'admin_state_up': True,
'port_security_enabled': False,
'device_owner': n_const.DEVICE_OWNER_DISTRIBUTED,
'project_id': project_id}}
neutron_client = clients.get_neutron_client()
try:
hm_port = neutron_client.create_port(port)
return hm_port['port'] if hm_port['port'] else None
except n_exc.NeutronClientException:
# NOTE (froyo): whatever other exception as e.g. Timeout
# we should try to ensure no leftover port remains
self._clean_up_hm_port(subnet_id)
return None
def _clean_up_hm_port(self, subnet_id):
# Method to delete the hm port created for subnet_id it there isn't any
# other health monitor using it
neutron_client = clients.get_neutron_client()
hm_port_ip = None
hm_checks_port = self._neutron_list_ports(neutron_client, **{
'name': f'{ovn_const.LB_HM_PORT_PREFIX}{subnet_id}'})
if hm_checks_port['ports']:
# NOTE(froyo): Just to cover the case that we have more than one
# hm-port created by a race condition on create_hm_port and we need
# to ensure no leftover ports remains
for hm_port in hm_checks_port['ports']:
for fixed_ip in hm_port.get('fixed_ips', []):
if fixed_ip['subnet_id'] == subnet_id:
hm_port_ip = fixed_ip['ip_address']
if hm_port_ip:
lbs = self.ovn_nbdb_api.db_find_rows(
'Load_Balancer', ('health_check', '!=', [])).execute()
for lb in lbs:
for k, v in lb.ip_port_mappings.items():
if hm_port_ip in v:
return
# Not found any other health monitor using the hm port
self.delete_port(hm_port['id'])
def _ensure_hm_ovn_port(self, network_id, subnet_id, project_id):
# We will use a dedicated port for this, so we should find the one
# related to the network id, if not found, create a new one and use it.
neutron_client = clients.get_neutron_client()
meta_dhcp_port = neutron_client.list_ports(
network_id=network_id,
device_owner=n_const.DEVICE_OWNER_DISTRIBUTED)
if meta_dhcp_port['ports']:
return meta_dhcp_port['ports'][0]
hm_checks_port = self._neutron_list_ports(neutron_client, **{
'network_id': network_id,
'name': f'{ovn_const.LB_HM_PORT_PREFIX}{subnet_id}'})
if hm_checks_port['ports']:
return hm_checks_port['ports'][0]
else:
return self._create_hm_port(network_id, subnet_id, project_id)
def _get_nw_router_info_on_interface_event(self, lrp):
"""Get the Router and Network information on an interface event
@ -388,6 +435,14 @@ class OvnProviderHelper():
def _find_ovn_lbs_with_retry(self, lb_id, protocol=None):
return self._find_ovn_lbs(lb_id, protocol=protocol)
@tenacity.retry(
retry=tenacity.retry_if_exception_type(n_exc.NeutronClientException),
wait=tenacity.wait_exponential(),
stop=tenacity.stop_after_delay(10),
reraise=True)
def _neutron_list_ports(self, neutron_client, **params):
return neutron_client.list_ports(**params)
def _find_ovn_lbs(self, lb_id, protocol=None):
"""Find the Loadbalancers in OVN with the given lb_id as its name
@ -926,8 +981,8 @@ class OvnProviderHelper():
break
elif (loadbalancer.get(constants.VIP_NETWORK_ID) and
loadbalancer.get(constants.VIP_ADDRESS)):
ports = neutron_client.list_ports(
network_id=loadbalancer[constants.VIP_NETWORK_ID])
ports = self._neutron_list_ports(neutron_client, **{
'network_id': loadbalancer[constants.VIP_NETWORK_ID]})
for p in ports['ports']:
for ip in p['fixed_ips']:
if ip['ip_address'] == loadbalancer[
@ -942,7 +997,7 @@ class OvnProviderHelper():
# Any Exception set the status to ERROR
if isinstance(port, dict):
try:
self.delete_vip_port(port.get('id'))
self.delete_port(port.get('id'))
LOG.warning("Deleting the VIP port %s since LB went into "
"ERROR state", str(port.get('id')))
except Exception:
@ -1059,7 +1114,7 @@ class OvnProviderHelper():
# Any Exception set the status to ERROR
if isinstance(port, dict):
try:
self.delete_vip_port(port.get('id'))
self.delete_port(port.get('id'))
LOG.warning("Deleting the VIP port %s since LB went into "
"ERROR state", str(port.get('id')))
except Exception:
@ -1103,7 +1158,7 @@ class OvnProviderHelper():
if vip_port_id:
LOG.warning("Deleting the VIP port %s associated to LB "
"missing in OVN DBs", str(vip_port_id))
self.delete_vip_port(vip_port_id)
self.delete_port(vip_port_id)
except Exception:
LOG.exception("Error deleting the VIP port %s",
str(vip_port_id))
@ -1123,7 +1178,7 @@ class OvnProviderHelper():
# https://cito.github.io/blog/never-iterate-a-changing-dict/
status = {key: value for key, value in status.items() if value}
# Delete VIP port from neutron.
self.delete_vip_port(port_id)
self.delete_port(port_id)
except Exception:
LOG.exception(ovn_const.EXCEPTION_MSG, "deletion of loadbalancer")
lbalancer_status[constants.PROVISIONING_STATUS] = constants.ERROR
@ -1133,6 +1188,8 @@ class OvnProviderHelper():
def _lb_delete(self, loadbalancer, ovn_lb, status):
commands = []
member_subnets = []
clean_up_hm_port_required = False
if loadbalancer['cascade']:
# Delete all pools
for key, value in ovn_lb.external_ids.items():
@ -1141,6 +1198,7 @@ class OvnProviderHelper():
# Delete all members in the pool
if value and len(value.split(',')) > 0:
for mem_info in value.split(','):
member_subnets.append(mem_info.split('_')[3])
status[constants.MEMBERS].append({
constants.ID: mem_info.split('_')[1],
constants.PROVISIONING_STATUS:
@ -1154,6 +1212,13 @@ class OvnProviderHelper():
constants.ID: key.split('_')[1],
constants.PROVISIONING_STATUS: constants.DELETED,
constants.OPERATING_STATUS: constants.OFFLINE})
if ovn_lb.health_check:
clean_up_hm_port_required = True
commands.append(
self.ovn_nbdb_api.db_clear('Load_Balancer', ovn_lb.uuid,
'health_check'))
ls_refs = ovn_lb.external_ids.get(ovn_const.LB_EXT_IDS_LS_REFS_KEY, {})
if ls_refs:
try:
@ -1213,6 +1278,12 @@ class OvnProviderHelper():
else:
raise
# NOTE(froyo): we should remove the hm-port if the LB was using a HM
# and no more LBs are using it
if clean_up_hm_port_required:
for subnet_id in list(set(member_subnets)):
self._clean_up_hm_port(subnet_id)
return status
def lb_update(self, loadbalancer):
@ -1895,8 +1966,14 @@ class OvnProviderHelper():
pool = {constants.ID: member[constants.POOL_ID],
constants.PROVISIONING_STATUS: constants.ACTIVE,
constants.OPERATING_STATUS: pool_status}
if pool_status == constants.ONLINE and ovn_lb.health_check:
if ovn_lb.health_check:
self._update_hm_members(ovn_lb, pool_key)
# NOTE(froyo): if the pool status is OFFLINE there are no more
# members. So we should ensure the hm-port is deleted if no
# more LB are using it. We need to do this call after the
# cleaning of the ip_port_mappings for the ovn LB.
if pool_status == constants.OFFLINE:
self._clean_up_hm_port(member['subnet_id'])
status = {
constants.POOLS: [pool],
constants.MEMBERS: [
@ -2051,9 +2128,9 @@ class OvnProviderHelper():
except n_exc.IpAddressAlreadyAllocatedClient as e:
# Sometimes the VIP is already created (race-conditions)
# Lets get the it from Neutron API.
ports = neutron_client.list_ports(
network_id=vip_d[constants.VIP_NETWORK_ID],
name=f'{ovn_const.LB_VIP_PORT_PREFIX}{lb_id}')
ports = self._neutron_list_ports(neutron_client, **{
'network_id': vip_d[constants.VIP_NETWORK_ID],
'name': f'{ovn_const.LB_VIP_PORT_PREFIX}{lb_id}'})
if not ports['ports']:
LOG.error('Cannot create/get LoadBalancer VIP port with '
'fixed IP: %s', vip_d[constants.VIP_ADDRESS])
@ -2065,14 +2142,14 @@ class OvnProviderHelper():
except n_exc.NeutronClientException as e:
# NOTE (froyo): whatever other exception as e.g. Timeout
# we should try to ensure no leftover port remains
ports = neutron_client.list_ports(
network_id=vip_d[constants.VIP_NETWORK_ID],
name=f'{ovn_const.LB_VIP_PORT_PREFIX}{lb_id}')
ports = self._neutron_list_ports(neutron_client, **{
'network_id': vip_d[constants.VIP_NETWORK_ID],
'name': f'{ovn_const.LB_VIP_PORT_PREFIX}{lb_id}'})
if ports['ports']:
port = ports['ports'][0]
LOG.debug('Leftover port %s has been found. Trying to '
'delete it', port['id'])
self.delete_vip_port(port['id'])
self.delete_port(port['id'])
raise e
@tenacity.retry(
@ -2081,7 +2158,7 @@ class OvnProviderHelper():
wait=tenacity.wait_exponential(max=75),
stop=tenacity.stop_after_attempt(15),
reraise=True)
def delete_vip_port(self, port_id):
def delete_port(self, port_id):
neutron_client = clients.get_neutron_client()
try:
neutron_client.delete_port(port_id)
@ -2328,7 +2405,10 @@ class OvnProviderHelper():
network_id = member_lsp.external_ids.get(
ovn_const.OVN_NETWORK_NAME_EXT_ID_KEY).split('neutron-')[1]
hm_port = self._ensure_hm_ovn_port(network_id)
project_id = member_lsp.external_ids.get(
ovn_const.OVN_PROJECT_EXT_ID_KEY)
hm_port = self._ensure_hm_ovn_port(
network_id, member_subnet, project_id)
if not hm_port:
LOG.error("No port on network %(network)s available for "
"health monitoring. Cannot create a Health Monitor "
@ -2372,10 +2452,20 @@ class OvnProviderHelper():
mappings[member_ip] = member_src
commands = []
# NOTE(froyo): This db_clear over field ip_port_mappings is needed just
# to clean the old values (including the removed member) and the
# following db_set will update the using the mappings calculated some
# lines above with reemaining members only.
# TODO(froyo): use the ovsdbapp commands to add/del members to
# ip_port_mappings field
commands.append(
self.ovn_nbdb_api.db_set(
'Load_Balancer', ovn_lb.uuid,
('ip_port_mappings', mappings)))
self.ovn_nbdb_api.db_clear('Load_Balancer', ovn_lb.uuid,
'ip_port_mappings'))
if mappings:
commands.append(
self.ovn_nbdb_api.db_set(
'Load_Balancer', ovn_lb.uuid,
('ip_port_mappings', mappings)))
self._execute_commands(commands)
return True
@ -2549,11 +2639,18 @@ class OvnProviderHelper():
return status
# Need to send pool info in status update to avoid immutable objects,
# the LB should have this info
# the LB should have this info. Also in order to delete the hm port
# used for health checks we need to get all subnets from the members
# on the pool
pool_id = None
pool_listeners = []
member_subnets = []
for k, v in ovn_lb.external_ids.items():
if ovn_const.LB_EXT_IDS_POOL_PREFIX in k:
members = self._extract_member_info(ovn_lb.external_ids[k])
member_subnets = list(
set([mem_subnet for (_, _, mem_subnet) in members])
)
pool_id = k.split('_')[1]
pool_listeners = self._get_pool_listeners(
ovn_lb, self._get_pool_key(pool_id))
@ -2583,6 +2680,11 @@ class OvnProviderHelper():
self.ovn_nbdb_api.db_destroy('Load_Balancer_Health_Check',
lbhc.uuid))
self._execute_commands(commands)
# Delete the hm port if not in use by other health monitors
for subnet in member_subnets:
self._clean_up_hm_port(subnet)
status = {
constants.LOADBALANCERS: [
{constants.ID: ovn_lb.name,

View File

@ -706,7 +706,7 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
constants.ERROR)
@mock.patch('ovn_octavia_provider.common.clients.get_neutron_client')
@mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_vip_port')
@mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_port')
def test_lb_create_exception(self, del_port, net_cli):
self.helper._find_ovn_lbs.side_effect = [RuntimeError]
net_cli.return_value.list_ports.return_value = self.ports
@ -724,7 +724,7 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
constants.ERROR)
@mock.patch('ovn_octavia_provider.common.clients.get_neutron_client')
@mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_vip_port')
@mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_port')
def test_lb_delete(self, del_port, net_cli):
net_cli.return_value.delete_port.return_value = None
status = self.helper.lb_delete(self.ovn_lb)
@ -738,7 +738,7 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
@mock.patch.object(ovn_helper.OvnProviderHelper,
'_get_vip_port_from_loadbalancer_id')
@mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_vip_port')
@mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_port')
def test_lb_delete_row_not_found(self, del_port, get_vip_port):
self.helper._find_ovn_lbs.side_effect = [idlutils.RowNotFound]
get_vip_port.return_value = None
@ -753,7 +753,7 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
@mock.patch.object(ovn_helper.OvnProviderHelper,
'_get_vip_port_from_loadbalancer_id')
@mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_vip_port')
@mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_port')
def test_lb_delete_row_not_found_port_leftover(
self, del_port, get_vip_port):
self.helper._find_ovn_lbs.side_effect = [idlutils.RowNotFound]
@ -770,7 +770,7 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
@mock.patch.object(ovn_helper.OvnProviderHelper,
'_get_vip_port_from_loadbalancer_id')
@mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_vip_port')
@mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_port')
def test_lb_delete_row_not_found_vip_leak(self, del_port, get_vip_port):
self.helper._find_ovn_lbs.side_effect = [idlutils.RowNotFound]
get_vip_port.return_value = 'foo_port'
@ -783,7 +783,7 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
del_port.assert_called_once_with('foo_port')
get_vip_port.assert_called_once_with(self.lb['id'])
@mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_vip_port')
@mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_port')
def test_lb_delete_exception(self, del_port):
self.helper.ovn_nbdb_api.lb_del.side_effect = [RuntimeError]
status = self.helper.lb_delete(self.lb)
@ -795,7 +795,7 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
self.ovn_lb.uuid)
del_port.assert_not_called()
@mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_vip_port')
@mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_port')
def test_lb_delete_step_by_step(self, del_port):
self.helper.ovn_nbdb_api.lr_lb_del.side_effect = [idlutils.RowNotFound]
status = self.helper.lb_delete(self.lb)
@ -807,7 +807,7 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
self.ovn_lb.uuid)
del_port.assert_called_once_with('foo_port')
@mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_vip_port')
@mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_port')
def test_lb_delete_step_by_step_exception(self, del_port):
self.helper.ovn_nbdb_api.lb_del.side_effect = [idlutils.RowNotFound]
status = self.helper.lb_delete(self.lb)
@ -820,7 +820,7 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
del_port.assert_not_called()
@mock.patch('ovn_octavia_provider.common.clients.get_neutron_client')
@mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_vip_port')
@mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_port')
def test_lb_delete_port_not_found(self, del_port, net_cli):
net_cli.return_value.delete_port.side_effect = (
[n_exc.PortNotFoundClient])
@ -833,7 +833,7 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
self.ovn_lb.uuid)
del_port.assert_called_once_with('foo_port')
@mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_vip_port')
@mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_port')
def test_lb_delete_port_exception(self, del_port):
del_port.side_effect = [Exception]
status = self.helper.lb_delete(self.ovn_lb)
@ -2905,10 +2905,10 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
self.mock_add_request.assert_not_called()
@mock.patch('ovn_octavia_provider.common.clients.get_neutron_client')
def test_delete_vip_port_not_found(self, net_cli):
def test_delete_port_not_found(self, net_cli):
net_cli.return_value.delete_port.side_effect = (
[n_exc.PortNotFoundClient])
self.helper.delete_vip_port('foo')
self.helper.delete_port('foo')
@mock.patch('ovn_octavia_provider.helper.OvnProviderHelper.'
'_find_ovn_lbs')
@ -3274,7 +3274,7 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
self.helper._update_status_to_octavia.assert_not_called()
@mock.patch('ovn_octavia_provider.common.clients.get_neutron_client')
@mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_vip_port')
@mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_port')
def test_create_vip_port_vip_neutron_client_other_exception(
self, del_port, net_cli):
net_cli.return_value.create_port.side_effect = [
@ -3715,8 +3715,11 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
self.ovn_hm.uuid,
('options', options))
def test_hm_delete(self):
@mock.patch.object(ovn_helper.OvnProviderHelper, '_clean_up_hm_port')
def test_hm_delete(self, del_hm_port):
self._get_pool_listeners.stop()
pool_key = 'pool_%s' % self.pool_id
self.ovn_hm_lb.external_ids[pool_key] = self.member_line
self.helper.ovn_nbdb_api.db_list_rows.return_value.\
execute.side_effect = [[self.ovn_hm_lb], [self.ovn_hm]]
status = self.helper.hm_delete(self.health_monitor)
@ -3738,6 +3741,7 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
self.ovn_hm.uuid)]
expected_destroy_calls = [
mock.call('Load_Balancer_Health_Check', self.ovn_hm.uuid)]
del_hm_port.assert_called_once_with(self.member_subnet_id)
self.helper.ovn_nbdb_api.db_clear.assert_has_calls(
expected_clear_calls)
self.helper.ovn_nbdb_api.db_remove.assert_has_calls(
@ -3974,6 +3978,158 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
ovn_const.OVN_MEMBER_STATUS_KEY] = member_status
return member
def test__create_hm_port(self):
expected_dict = {
'port': {'name': '%s%s' % (ovn_const.LB_HM_PORT_PREFIX,
self.vip_dict['vip_subnet_id']),
'network_id': self.vip_dict['vip_network_id'],
'fixed_ips': [{'subnet_id':
self.vip_dict['vip_subnet_id']}],
'admin_state_up': True,
'port_security_enabled': False,
'device_owner': n_const.DEVICE_OWNER_DISTRIBUTED,
'project_id': self.project_id
}}
with mock.patch.object(clients, 'get_neutron_client') as net_cli:
hm_port = self.helper._create_hm_port(
self.vip_dict['vip_network_id'],
self.vip_dict['vip_subnet_id'],
self.project_id)
expected_call = [
mock.call().create_port(expected_dict)]
net_cli.assert_has_calls(expected_call)
self.assertIsNotNone(hm_port)
@mock.patch('ovn_octavia_provider.common.clients.get_neutron_client')
def test__create_hm_port_neutron_client_exception(
self, net_cli):
net_cli.return_value.create_port.side_effect = [
n_exc.NeutronClientException]
net_cli.return_value.list_ports.return_value = {
'ports': []}
expected_dict = {
'port': {'name': '%s%s' % (ovn_const.LB_HM_PORT_PREFIX,
self.vip_dict['vip_subnet_id']),
'network_id': self.vip_dict['vip_network_id'],
'fixed_ips': [{'subnet_id':
self.vip_dict['vip_subnet_id']}],
'admin_state_up': True,
'port_security_enabled': False,
'device_owner': n_const.DEVICE_OWNER_DISTRIBUTED,
'project_id': self.project_id
}}
hm_port = self.helper._create_hm_port(
self.vip_dict['vip_network_id'],
self.vip_dict['vip_subnet_id'],
self.project_id)
expected_call = [
mock.call(),
mock.call().create_port(expected_dict),
mock.call(),
mock.call().list_ports(
name='%s%s' % (ovn_const.LB_HM_PORT_PREFIX,
self.vip_dict['vip_subnet_id']))]
net_cli.assert_has_calls(expected_call)
self.assertIsNone(hm_port)
@mock.patch('ovn_octavia_provider.common.clients.get_neutron_client')
@mock.patch.object(ovn_helper.OvnProviderHelper, '_clean_up_hm_port')
def test__create_hm_port_neutron_client_exception_clean_up_hm_port(
self, del_hm_port, net_cli):
net_cli.return_value.create_port.side_effect = [
n_exc.NeutronClientException]
net_cli.return_value.list_ports.return_value = {
'ports': [
{'name': '%s%s' % (ovn_const.LB_HM_PORT_PREFIX,
self.vip_dict['vip_subnet_id']),
'id': 'fake_uuid'}]}
expected_dict = {
'port': {'name': '%s%s' % (ovn_const.LB_HM_PORT_PREFIX,
self.vip_dict['vip_subnet_id']),
'network_id': self.vip_dict['vip_network_id'],
'fixed_ips': [{
'subnet_id': self.vip_dict['vip_subnet_id']}],
'admin_state_up': True,
'port_security_enabled': False,
'device_owner': n_const.DEVICE_OWNER_DISTRIBUTED,
'project_id': self.project_id
}}
hm_port = self.helper._create_hm_port(
self.vip_dict['vip_network_id'],
self.vip_dict['vip_subnet_id'],
self.project_id)
expected_call = [
mock.call(),
mock.call().create_port(expected_dict)]
net_cli.assert_has_calls(expected_call)
del_hm_port.assert_called_once_with(self.vip_dict['vip_subnet_id'])
self.assertIsNone(hm_port)
@mock.patch('ovn_octavia_provider.common.clients.get_neutron_client')
@mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_port')
def test__clean_up_hm_port(self, del_port, net_cli):
net_cli.return_value.list_ports.return_value = {
'ports': [
{'name': '%s%s' % (ovn_const.LB_HM_PORT_PREFIX,
self.vip_dict['vip_subnet_id']),
'id': 'fake_uuid',
'fixed_ips': [{'subnet_id': 'another_subnet_id',
'ip_address': '10.1.2.3'},
{'subnet_id': self.vip_dict['vip_subnet_id'],
'ip_address': '10.0.0.3'}]}]}
self.helper._clean_up_hm_port(self.vip_dict['vip_subnet_id'])
expected_call = [
mock.call(),
mock.call().list_ports(
name='%s%s' % (ovn_const.LB_HM_PORT_PREFIX,
self.vip_dict['vip_subnet_id']))]
net_cli.assert_has_calls(expected_call)
del_port.assert_called_once_with('fake_uuid')
@mock.patch('ovn_octavia_provider.common.clients.get_neutron_client')
@mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_port')
def test__clean_up_hm_port_in_use(self, del_port, net_cli):
net_cli.return_value.list_ports.return_value = {
'ports': [
{'name': '%s%s' % (ovn_const.LB_HM_PORT_PREFIX,
self.vip_dict['vip_subnet_id']),
'id': 'fake_uuid',
'fixed_ips': [{'subnet_id': 'another_subnet_id',
'ip_address': '10.1.2.3'},
{'subnet_id': self.vip_dict['vip_subnet_id'],
'ip_address': '10.0.0.3'}]}]}
fake_lb_unrelated = fakes.FakeOvsdbRow.create_one_ovsdb_row(
attrs={
'ip_port_mappings': {'10.1.2.4': 'fake_member_lgp:10.1.2.3'}})
fake_lb_hm_port_in_use = fakes.FakeOvsdbRow.create_one_ovsdb_row(
attrs={
'ip_port_mappings': {'10.1.2.4': 'fake_member_lgp:10.1.2.3',
'10.0.0.4': 'fake_member_lgp:10.0.0.3'}})
self.helper.ovn_nbdb_api.db_find_rows.return_value.\
execute.return_value = [fake_lb_unrelated, fake_lb_hm_port_in_use]
self.helper._clean_up_hm_port(self.vip_dict['vip_subnet_id'])
expected_call = [
mock.call(),
mock.call().list_ports(
name='%s%s' % (ovn_const.LB_HM_PORT_PREFIX,
self.vip_dict['vip_subnet_id']))]
net_cli.assert_has_calls(expected_call)
del_port.assert_not_called()
@mock.patch('ovn_octavia_provider.common.clients.get_neutron_client')
@mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_port')
def test__clean_up_hm_port_not_found(self, del_port, net_cli):
net_cli.return_value.list_ports.return_value = {
'ports': []}
self.helper._clean_up_hm_port(self.vip_dict['vip_subnet_id'])
expected_call = [
mock.call(),
mock.call().list_ports(
name='%s%s' % (ovn_const.LB_HM_PORT_PREFIX,
self.vip_dict['vip_subnet_id']))]
net_cli.assert_has_calls(expected_call)
del_port.assert_not_called()
def test_hm_update_status_offline(self):
fake_subnet = fakes.FakeSubnet.create_one_subnet()
member = self._add_member(self.ovn_hm_lb, fake_subnet, 8080)