Use ovsdbapp commands to add/del backends to ip_port_mappings
LB ip_port_mapping is updated just adding and deleting every member
after any related operation over the LB-HM, this operation was done
in two steps, a db_clear and a db_set.
This patch takes ovsdbapp specific commands for add/del backends to
the ip_port_mapping in a more appropiate way, reducing any further
operation from OVN DBs not related to the member added/deleted. Also
taking care about the possibility of the same backend_ip could be
pointed by other member, under a different HM.
ovsdbapp bumps to 2.1.0 to be able to use those new functionalities [1]
[1] f3c5da5402
Closes-Bug: 2007835
Change-Id: I5705c490bcd36e7e2edcc62954a3ffa0ff645519
This commit is contained in:
parent
1878eb4c21
commit
e40e0d62ac
@ -425,7 +425,7 @@ class OvnProviderDriver(driver_base.ProviderDriver):
|
||||
|
||||
for member in members_to_delete:
|
||||
member_info = member.split('_')
|
||||
member_ip, member_port, subnet_id = (
|
||||
member_ip, member_port, subnet_id, member_id = (
|
||||
self._ovn_helper._extract_member_info(member)[0])
|
||||
request_info = {'id': member_info[1],
|
||||
'address': member_ip,
|
||||
|
@ -842,10 +842,11 @@ class OvnProviderHelper():
|
||||
if member:
|
||||
for mem in member.split(','):
|
||||
mem_split = mem.split('_')
|
||||
mem_id = mem_split[1]
|
||||
mem_ip_port = mem_split[2]
|
||||
mem_ip, mem_port = mem_ip_port.rsplit(':', 1)
|
||||
mem_subnet = mem_split[3]
|
||||
mem_info.append((mem_ip, mem_port, mem_subnet))
|
||||
mem_info.append((mem_ip, mem_port, mem_subnet, mem_id))
|
||||
return mem_info
|
||||
|
||||
def _get_member_info(self, member):
|
||||
@ -922,12 +923,12 @@ class OvnProviderHelper():
|
||||
continue
|
||||
|
||||
ips = []
|
||||
for member_ip, member_port, subnet in self._extract_member_info(
|
||||
for mb_ip, mb_port, mb_subnet, mb_id in self._extract_member_info(
|
||||
lb_external_ids[pool_id]):
|
||||
if netaddr.IPNetwork(member_ip).version == 6:
|
||||
ips.append(f'[{member_ip}]:{member_port}')
|
||||
if netaddr.IPNetwork(mb_ip).version == 6:
|
||||
ips.append(f'[{mb_ip}]:{mb_port}')
|
||||
else:
|
||||
ips.append(f'{member_ip}:{member_port}')
|
||||
ips.append(f'{mb_ip}:{mb_port}')
|
||||
|
||||
if netaddr.IPNetwork(lb_vip).version == 6:
|
||||
lb_vip = f'[{lb_vip}]'
|
||||
@ -1941,7 +1942,9 @@ class OvnProviderHelper():
|
||||
operating_status = constants.NO_MONITOR
|
||||
if new_member and ovn_lb.health_check:
|
||||
operating_status = constants.ONLINE
|
||||
if not self._update_hm_members(ovn_lb, pool_key):
|
||||
mb_ip, mb_port, mb_subnet, mb_id = self._extract_member_info(
|
||||
new_member)[0]
|
||||
if not self._update_hm_member(ovn_lb, pool_key, mb_ip):
|
||||
operating_status = constants.ERROR
|
||||
member_status[constants.OPERATING_STATUS] = operating_status
|
||||
|
||||
@ -1957,6 +1960,13 @@ class OvnProviderHelper():
|
||||
existing_members = external_ids[pool_key].split(",")
|
||||
member_info = self._get_member_info(member)
|
||||
if member_info in existing_members:
|
||||
|
||||
if ovn_lb.health_check:
|
||||
self._update_hm_member(ovn_lb,
|
||||
pool_key,
|
||||
member.get(constants.ADDRESS),
|
||||
delete=True)
|
||||
|
||||
commands = []
|
||||
existing_members.remove(member_info)
|
||||
|
||||
@ -1992,14 +2002,13 @@ class OvnProviderHelper():
|
||||
pool = {constants.ID: member[constants.POOL_ID],
|
||||
constants.PROVISIONING_STATUS: constants.ACTIVE,
|
||||
constants.OPERATING_STATUS: pool_status}
|
||||
if ovn_lb.health_check:
|
||||
self._update_hm_members(ovn_lb, pool_key)
|
||||
if ovn_lb.health_check and pool_status == constants.OFFLINE:
|
||||
# NOTE(froyo): if the pool status is OFFLINE there are no more
|
||||
# members. So we should ensure the hm-port is deleted if no
|
||||
# more LB are using it. We need to do this call after the
|
||||
# cleaning of the ip_port_mappings for the ovn LB.
|
||||
if pool_status == constants.OFFLINE:
|
||||
self._clean_up_hm_port(member['subnet_id'])
|
||||
self._clean_up_hm_port(member[constants.SUBNET_ID])
|
||||
|
||||
status = {
|
||||
constants.POOLS: [pool],
|
||||
constants.MEMBERS: [
|
||||
@ -2492,86 +2501,114 @@ class OvnProviderHelper():
|
||||
self._execute_commands(commands)
|
||||
return True
|
||||
|
||||
def _update_hm_members(self, ovn_lb, pool_key):
|
||||
mappings = {}
|
||||
# For each member, set it's HM
|
||||
for member_ip, member_port, member_subnet in self._extract_member_info(
|
||||
ovn_lb.external_ids[pool_key]):
|
||||
member_lsp = self._get_member_lsp(member_ip, member_subnet)
|
||||
if not member_lsp:
|
||||
# NOTE(froyo): In order to continue evaluating the rest of
|
||||
# the members, we just warn about the member issue,
|
||||
# assuming that it will be in OFFLINE status as soon as the
|
||||
# HM does the first evaluation.
|
||||
LOG.error("Member %(member)s Logical_Switch_Port not found, "
|
||||
"when creating a Health Monitor for pool %(pool)s.",
|
||||
{'member': member_ip, 'pool': pool_key})
|
||||
continue
|
||||
def _update_ip_port_mappings(self, ovn_lb, backend_ip, port_name, src_ip,
|
||||
delete=False):
|
||||
|
||||
network_id = member_lsp.external_ids.get(
|
||||
ovn_const.OVN_NETWORK_NAME_EXT_ID_KEY).split('neutron-')[1]
|
||||
project_id = member_lsp.external_ids.get(
|
||||
ovn_const.OVN_PROJECT_EXT_ID_KEY)
|
||||
hm_port = self._ensure_hm_ovn_port(
|
||||
network_id, member_subnet, project_id)
|
||||
if not hm_port:
|
||||
LOG.error("No port on network %(network)s available for "
|
||||
"health monitoring. Cannot create a Health Monitor "
|
||||
"for pool %(pool)s.",
|
||||
{'network': network_id,
|
||||
'pool': pool_key})
|
||||
return False
|
||||
hm_source_ip = None
|
||||
for fixed_ip in hm_port['fixed_ips']:
|
||||
if fixed_ip['subnet_id'] == member_subnet:
|
||||
hm_source_ip = fixed_ip['ip_address']
|
||||
break
|
||||
if not hm_source_ip:
|
||||
LOG.error("No port on subnet %(subnet)s available for "
|
||||
"health monitoring member IP %(member)s. Cannot "
|
||||
"create a Health Monitor for pool %(pool)s.",
|
||||
{'subnet': member_subnet,
|
||||
'member': member_ip,
|
||||
'pool': pool_key})
|
||||
return False
|
||||
# ovn-nbctl set load_balancer ${OVN_LB_ID}
|
||||
# ip_port_mappings:${MEMBER_IP}=${LSP_NAME_MEMBER}:${HEALTH_SRC}
|
||||
# where:
|
||||
# OVN_LB_ID: id of LB
|
||||
# MEMBER_IP: IP of member_lsp
|
||||
# HEALTH_SRC: source IP of hm_port
|
||||
# ip_port_mappings:${MEMBER_IP}=${LSP_NAME_MEMBER}:${HEALTH_SRC}
|
||||
# where:
|
||||
# MEMBER_IP: IP of member_lsp
|
||||
# LSP_NAME_MEMBER: Logical switch port
|
||||
# HEALTH_SRC: source IP of hm_port
|
||||
|
||||
# need output like this
|
||||
# vips: {"172.24.4.246:80"="10.0.0.10:80"}
|
||||
# ip_port_mappings: {"10.0.0.10"="ID:10.0.0.2"}
|
||||
# ip_port_mappings: {"MEMBER_IP"="LSP_NAME_MEMBER:HEALTH_SRC"}
|
||||
# OVN does not support IPv6 Health Checks, but we check anyways
|
||||
member_src = f'{member_lsp.name}:'
|
||||
if netaddr.IPNetwork(hm_source_ip).version == 6:
|
||||
member_src += f'[{hm_source_ip}]'
|
||||
else:
|
||||
member_src += f'{hm_source_ip}'
|
||||
if delete:
|
||||
self.ovn_nbdb_api.lb_del_ip_port_mapping(ovn_lb.uuid,
|
||||
backend_ip).execute()
|
||||
else:
|
||||
self.ovn_nbdb_api.lb_add_ip_port_mapping(ovn_lb.uuid,
|
||||
backend_ip,
|
||||
port_name,
|
||||
src_ip).execute()
|
||||
|
||||
if netaddr.IPNetwork(member_ip).version == 6:
|
||||
member_ip = f'[{member_ip}]'
|
||||
mappings[member_ip] = member_src
|
||||
|
||||
commands = []
|
||||
# NOTE(froyo): This db_clear over field ip_port_mappings is needed just
|
||||
# to clean the old values (including the removed member) and the
|
||||
# following db_set will update the using the mappings calculated some
|
||||
# lines above with reemaining members only.
|
||||
# TODO(froyo): use the ovsdbapp commands to add/del members to
|
||||
# ip_port_mappings field
|
||||
commands.append(
|
||||
def _clean_ip_port_mappings(self, ovn_lb, pool_key=None):
|
||||
if not pool_key:
|
||||
self.ovn_nbdb_api.db_clear('Load_Balancer', ovn_lb.uuid,
|
||||
'ip_port_mappings'))
|
||||
if mappings:
|
||||
commands.append(
|
||||
self.ovn_nbdb_api.db_set(
|
||||
'Load_Balancer', ovn_lb.uuid,
|
||||
('ip_port_mappings', mappings)))
|
||||
self._execute_commands(commands)
|
||||
'ip_port_mappings').execute()
|
||||
else:
|
||||
# NOTE(froyo): before removing a member from the ip_port_mappings
|
||||
# list, we need to ensure that the member is not being monitored by
|
||||
# any other existing HM. To prevent accidentally removing the
|
||||
# member we can use the neutron:member_status to search for any
|
||||
# other members with the same address
|
||||
members_try_remove = self._extract_member_info(
|
||||
ovn_lb.external_ids[pool_key])
|
||||
other_members = []
|
||||
for k, v in ovn_lb.external_ids.items():
|
||||
if ovn_const.LB_EXT_IDS_POOL_PREFIX in k and k != pool_key:
|
||||
other_members.extend(self._extract_member_info(
|
||||
ovn_lb.external_ids[k]))
|
||||
|
||||
member_statuses = ovn_lb.external_ids.get(
|
||||
ovn_const.OVN_MEMBER_STATUS_KEY)
|
||||
|
||||
try:
|
||||
member_statuses = jsonutils.loads(member_statuses)
|
||||
except TypeError:
|
||||
LOG.debug("no member status on external_ids: %s",
|
||||
str(member_statuses))
|
||||
member_statuses = {}
|
||||
|
||||
for (mb_ip, mb_port, mb_subnet, mb_id) in members_try_remove:
|
||||
delete = True
|
||||
for member_id in [item[3] for item in other_members
|
||||
if item[0] == mb_ip]:
|
||||
if member_statuses.get(
|
||||
member_id, '') != constants.NO_MONITOR:
|
||||
# same address being monitorized by another HM
|
||||
delete = False
|
||||
|
||||
if delete:
|
||||
self.ovn_nbdb_api.lb_del_ip_port_mapping(
|
||||
ovn_lb.uuid, mb_ip).execute()
|
||||
|
||||
def _update_hm_member(self, ovn_lb, pool_key, backend_ip, delete=False):
|
||||
# Update just the backend_ip member
|
||||
for mb_ip, mb_port, mb_subnet, mb_id in self._extract_member_info(
|
||||
ovn_lb.external_ids[pool_key]):
|
||||
member_lsp = self._get_member_lsp(mb_ip, mb_subnet)
|
||||
if mb_ip == backend_ip:
|
||||
if not member_lsp:
|
||||
# NOTE(froyo): In order to continue evaluating the rest of
|
||||
# the members, we just warn about the member issue,
|
||||
# assuming that it will be in OFFLINE status as soon as the
|
||||
# HM does the first evaluation.
|
||||
LOG.error("Member %(member)s Logical_Switch_Port not "
|
||||
"found, when creating a Health Monitor for "
|
||||
"pool %(pool)s.",
|
||||
{'member': mb_ip, 'pool': pool_key})
|
||||
break
|
||||
|
||||
network_id = member_lsp.external_ids.get(
|
||||
ovn_const.OVN_NETWORK_NAME_EXT_ID_KEY).split('neutron-')[1]
|
||||
project_id = member_lsp.external_ids.get(
|
||||
ovn_const.OVN_PROJECT_EXT_ID_KEY)
|
||||
hm_port = self._ensure_hm_ovn_port(
|
||||
network_id, mb_subnet, project_id)
|
||||
if not hm_port:
|
||||
LOG.error("No port on network %(network)s available for "
|
||||
"health monitoring. Cannot find a Health "
|
||||
"Monitor for pool %(pool)s.",
|
||||
{'network': network_id, 'pool': pool_key})
|
||||
return False
|
||||
hm_source_ip = None
|
||||
for fixed_ip in hm_port['fixed_ips']:
|
||||
if fixed_ip['subnet_id'] == mb_subnet:
|
||||
hm_source_ip = fixed_ip['ip_address']
|
||||
break
|
||||
if not hm_source_ip:
|
||||
LOG.error("No port on subnet %(subnet)s available for "
|
||||
"health monitoring member IP %(member)s. Cannot "
|
||||
"find a Health Monitor for pool %(pool)s.",
|
||||
{'subnet': mb_subnet,
|
||||
'member': mb_ip,
|
||||
'pool': pool_key})
|
||||
return False
|
||||
self._update_ip_port_mappings(ovn_lb, backend_ip,
|
||||
member_lsp.name, hm_source_ip,
|
||||
delete)
|
||||
return True
|
||||
|
||||
# NOTE(froyo): If the backend is not located or just one member but not
|
||||
# found the lsp
|
||||
return True
|
||||
|
||||
def _lookup_lbhcs_by_hm_id(self, hm_id):
|
||||
@ -2658,9 +2695,13 @@ class OvnProviderHelper():
|
||||
# from info object passed-in
|
||||
hm_status = self._add_lbhc(ovn_lb, pool_key, info)
|
||||
if hm_status[constants.PROVISIONING_STATUS] == constants.ACTIVE:
|
||||
if not self._update_hm_members(ovn_lb, pool_key):
|
||||
hm_status[constants.PROVISIONING_STATUS] = constants.ERROR
|
||||
hm_status[constants.OPERATING_STATUS] = constants.ERROR
|
||||
for mb_ip, mb_port, mb_subnet, mb_id in self._extract_member_info(
|
||||
ovn_lb.external_ids[pool_key]):
|
||||
if not self._update_hm_member(ovn_lb, pool_key, mb_ip):
|
||||
hm_status[constants.PROVISIONING_STATUS] = constants.ERROR
|
||||
hm_status[constants.OPERATING_STATUS] = constants.ERROR
|
||||
self._clean_ip_port_mappings(ovn_lb, pool_key)
|
||||
break
|
||||
status[constants.HEALTHMONITORS] = [hm_status]
|
||||
return status
|
||||
|
||||
@ -2748,7 +2789,8 @@ class OvnProviderHelper():
|
||||
if ovn_const.LB_EXT_IDS_POOL_PREFIX in k:
|
||||
members = self._extract_member_info(ovn_lb.external_ids[k])
|
||||
member_subnets = list(
|
||||
set([mem_subnet for (_, _, mem_subnet) in members])
|
||||
set([mb_subnet
|
||||
for (mb_ip, mb_port, mb_subnet, mb_id) in members])
|
||||
)
|
||||
pool_id = k.split('_')[1]
|
||||
pool_listeners = self._get_pool_listeners(
|
||||
@ -2769,10 +2811,11 @@ class OvnProviderHelper():
|
||||
hms_key = jsonutils.loads(hms_key)
|
||||
if hm_id in hms_key:
|
||||
hms_key.remove(hm_id)
|
||||
|
||||
self._clean_ip_port_mappings(ovn_lb, ovn_const.LB_EXT_IDS_POOL_PREFIX +
|
||||
str(pool_id_related))
|
||||
|
||||
commands = []
|
||||
commands.append(
|
||||
self.ovn_nbdb_api.db_clear('Load_Balancer', ovn_lb.uuid,
|
||||
'ip_port_mappings'))
|
||||
for lbhc in lbhcs:
|
||||
commands.append(
|
||||
self.ovn_nbdb_api.db_remove('Load_Balancer', ovn_lb.uuid,
|
||||
@ -3005,17 +3048,15 @@ class OvnProviderHelper():
|
||||
if ovn_const.LB_EXT_IDS_POOL_PREFIX not in k:
|
||||
continue
|
||||
for (
|
||||
member_ip,
|
||||
member_port,
|
||||
subnet,
|
||||
mb_ip, mb_port, mb_subnet, mb_id,
|
||||
) in self._extract_member_info(v):
|
||||
if info['ip'] != member_ip:
|
||||
if info['ip'] != mb_ip:
|
||||
continue
|
||||
if info['port'] != member_port:
|
||||
if info['port'] != mb_port:
|
||||
continue
|
||||
# match
|
||||
member_id = [mb.split('_')[1] for mb in v.split(',')
|
||||
if member_ip in mb and member_port in mb][0]
|
||||
if mb_ip in mb and mb_port in mb][0]
|
||||
break
|
||||
|
||||
# found it in inner loop
|
||||
|
@ -224,6 +224,128 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
(self.helper.ovn_nbdb_api.ls_get.return_value.
|
||||
execute.return_value) = self.network
|
||||
|
||||
def test__update_hm_member_no_members(self):
|
||||
pool_key = 'pool_%s' % self.pool_id
|
||||
self.ovn_lb.external_ids[pool_key] = ''
|
||||
self.assertTrue(
|
||||
self.helper._update_hm_member(self.ovn_lb,
|
||||
pool_key,
|
||||
'10.0.0.4'))
|
||||
|
||||
def test__update_hm_member_backend_ip_not_match(self):
|
||||
pool_key = 'pool_%s' % self.pool_id
|
||||
self.ovn_lb.external_ids[pool_key] = self.member_line
|
||||
with mock.patch.object(ovn_helper.OvnProviderHelper,
|
||||
'_get_member_lsp'):
|
||||
self.assertTrue(
|
||||
self.helper._update_hm_member(self.ovn_lb,
|
||||
pool_key,
|
||||
'10.0.0.4'))
|
||||
|
||||
@mock.patch.object(ovn_helper.OvnProviderHelper, '_ensure_hm_ovn_port')
|
||||
def test__update_hm_member_hm_port_multiple_ip(self, ensure_hm_port):
|
||||
hm_port = {
|
||||
'fixed_ips': [{
|
||||
'subnet_id': 'ipv6_foo',
|
||||
'ip_address': '2001:db8::199'}, {
|
||||
'subnet_id': self.member_subnet_id,
|
||||
'ip_address': '10.0.0.4'}]}
|
||||
ensure_hm_port.return_value = hm_port
|
||||
pool_key = 'pool_%s' % self.pool_id
|
||||
with mock.patch.object(ovn_helper.OvnProviderHelper,
|
||||
'_get_member_lsp'):
|
||||
self.assertTrue(
|
||||
self.helper._update_hm_member(self.ovn_lb,
|
||||
pool_key,
|
||||
self.member_address))
|
||||
|
||||
@mock.patch.object(ovn_helper.OvnProviderHelper, '_ensure_hm_ovn_port')
|
||||
def test__update_hm_member_hm_port_not_found(self, ensure_hm_port):
|
||||
ensure_hm_port.return_value = None
|
||||
pool_key = 'pool_%s' % self.pool_id
|
||||
with mock.patch.object(ovn_helper.OvnProviderHelper,
|
||||
'_get_member_lsp'):
|
||||
self.assertFalse(
|
||||
self.helper._update_hm_member(self.ovn_lb,
|
||||
pool_key,
|
||||
self.member_address))
|
||||
|
||||
def test__clean_ip_port_mappings(self):
|
||||
self.helper._clean_ip_port_mappings(self.ovn_hm_lb)
|
||||
self.helper.ovn_nbdb_api.db_clear.assert_called_once_with(
|
||||
'Load_Balancer', self.ovn_hm_lb.uuid, 'ip_port_mappings')
|
||||
|
||||
def test__clean_ip_port_mappings_two_hm_pools_sharing_members(self):
|
||||
self.member_line_pool1 = 'member_uuid1_address1:port1_subnet1, \
|
||||
member_uuid2_address2:port2_subnet1'
|
||||
self.member_line_pool2 = 'member_uuid3_address1:port3_subnet1, \
|
||||
member_uuid4_address4:port4_subnet1'
|
||||
self.ovn_hm_lb.external_ids['pool_1'] = self.member_line_pool1
|
||||
self.ovn_hm_lb.external_ids['pool_2'] = self.member_line_pool2
|
||||
self.ovn_hm_lb.external_ids[ovn_const.OVN_MEMBER_STATUS_KEY] = \
|
||||
'{"uuid1": "ONLINE", "uuid2": "ONLINE", \
|
||||
"uuid3": "ONLINE", "uuid4": "ONLINE"}'
|
||||
self.helper._clean_ip_port_mappings(self.ovn_hm_lb, 'pool_1')
|
||||
self.helper.ovn_nbdb_api.db_clear.assert_not_called()
|
||||
self.helper.ovn_nbdb_api.lb_del_ip_port_mapping.\
|
||||
assert_called_once_with(self.ovn_hm_lb.uuid, 'address2')
|
||||
|
||||
def test__clean_ip_port_mappings_one_hm_pools_sharing_members(self):
|
||||
self.member_line_pool1 = 'member_uuid1_address1:port1_subnet1, \
|
||||
member_uuid2_address2:port2_subnet1'
|
||||
self.member_line_pool2 = 'member_uuid3_address1:port3_subnet1, \
|
||||
member_uuid4_address2:port4_subnet1'
|
||||
self.ovn_hm_lb.external_ids['pool_1'] = self.member_line_pool1
|
||||
self.ovn_hm_lb.external_ids['pool_2'] = self.member_line_pool2
|
||||
self.ovn_hm_lb.external_ids[ovn_const.OVN_MEMBER_STATUS_KEY] = \
|
||||
'{"uuid1": "ONLINE", "uuid2": "ONLINE", \
|
||||
"uuid3": "NO_MONITOR", "uuid4": "NO_MONITOR"}'
|
||||
self.helper._clean_ip_port_mappings(self.ovn_hm_lb, 'pool_1')
|
||||
self.helper.ovn_nbdb_api.db_clear.assert_not_called()
|
||||
self.helper.ovn_nbdb_api.lb_del_ip_port_mapping.\
|
||||
assert_has_calls([mock.call(self.ovn_hm_lb.uuid, 'address1'),
|
||||
mock.ANY,
|
||||
mock.call(self.ovn_hm_lb.uuid, 'address2'),
|
||||
mock.ANY])
|
||||
|
||||
def test__clean_ip_port_mappings_two_hm_pools_not_sharing_members(self):
|
||||
self.member_line_pool1 = 'member_uuid1_address1:port1_subnet1, \
|
||||
member_uuid2_address2:port2_subnet1'
|
||||
self.member_line_pool2 = 'member_uuid3_address3:port3_subnet1, \
|
||||
member_uuid4_address4:port4_subnet1'
|
||||
self.ovn_hm_lb.external_ids['pool_1'] = self.member_line_pool1
|
||||
self.ovn_hm_lb.external_ids['pool_2'] = self.member_line_pool2
|
||||
self.ovn_hm_lb.external_ids[ovn_const.OVN_MEMBER_STATUS_KEY] = \
|
||||
'{"uuid1": "ONLINE", "uuid2": "ONLINE", \
|
||||
"uuid3": "ONLINE", "uuid4": "ONLINE"}'
|
||||
self.helper._clean_ip_port_mappings(self.ovn_hm_lb, 'pool_1')
|
||||
self.helper.ovn_nbdb_api.db_clear.assert_not_called()
|
||||
self.helper.ovn_nbdb_api.lb_del_ip_port_mapping.\
|
||||
assert_has_calls([mock.call(self.ovn_hm_lb.uuid, 'address1'),
|
||||
mock.ANY,
|
||||
mock.call(self.ovn_hm_lb.uuid, 'address2'),
|
||||
mock.ANY])
|
||||
|
||||
def test__update_ip_port_mappings(self):
|
||||
src_ip = '10.22.33.4'
|
||||
fakes.FakeOvsdbRow.create_one_ovsdb_row(
|
||||
attrs={'ip': self.member_address,
|
||||
'logical_port': 'a-logical-port',
|
||||
'src_ip': src_ip,
|
||||
'port': self.member_port,
|
||||
'protocol': self.ovn_hm_lb.protocol,
|
||||
'status': ovn_const.HM_EVENT_MEMBER_PORT_ONLINE})
|
||||
self.helper._update_ip_port_mappings(
|
||||
self.ovn_lb, self.member_address, 'a-logical-port', src_ip)
|
||||
self.helper.ovn_nbdb_api.lb_add_ip_port_mapping.\
|
||||
assert_called_once_with(self.ovn_lb.uuid, self.member_address,
|
||||
'a-logical-port', src_ip)
|
||||
self.helper._update_ip_port_mappings(
|
||||
self.ovn_lb, self.member_address, 'a-logical-port', src_ip,
|
||||
delete=True)
|
||||
self.helper.ovn_nbdb_api.lb_del_ip_port_mapping.\
|
||||
assert_called_once_with(self.ovn_lb.uuid, self.member_address)
|
||||
|
||||
def test__update_external_ids_member_status(self):
|
||||
self.helper._update_external_ids_member_status(
|
||||
self.ovn_lb, self.member_id, constants.NO_MONITOR)
|
||||
@ -1865,6 +1987,20 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
self.assertEqual(status['pools'][0]['provisioning_status'],
|
||||
constants.ACTIVE)
|
||||
|
||||
@mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_by_pool_id')
|
||||
@mock.patch.object(ovn_helper.OvnProviderHelper, '_update_hm_member')
|
||||
def test_member_delete_hm(self, uhm, folbpi):
|
||||
pool_key = 'pool_%s' % self.pool_id
|
||||
self.ovn_hm_lb.external_ids[pool_key] = self.member_line
|
||||
self.ovn_hm_lb.external_ids[ovn_const.OVN_MEMBER_STATUS_KEY] = \
|
||||
'{"%s": "%s"}' % (self.member_id, constants.ONLINE)
|
||||
folbpi.return_value = (pool_key, self.ovn_hm_lb)
|
||||
self.helper.member_delete(self.member)
|
||||
uhm.assert_called_once_with(self.ovn_hm_lb,
|
||||
pool_key,
|
||||
self.member_address,
|
||||
delete=True)
|
||||
|
||||
def test_member_delete_none(self):
|
||||
self.ovn_lb.external_ids.update({'pool_' + self.pool_id: ''})
|
||||
status = self.helper.member_delete(self.member)
|
||||
@ -3496,7 +3632,7 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
self.assertFalse(ret)
|
||||
|
||||
@mock.patch('ovn_octavia_provider.common.clients.get_neutron_client')
|
||||
@mock.patch.object(ovn_helper.OvnProviderHelper, '_update_hm_members')
|
||||
@mock.patch.object(ovn_helper.OvnProviderHelper, '_update_hm_member')
|
||||
@mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_by_pool_id')
|
||||
def _test_hm_create(self, protocol, members, fip, folbpi, uhm,
|
||||
net_cli):
|
||||
@ -3913,9 +4049,6 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
constants.ACTIVE)
|
||||
self.assertEqual(status['listeners'][0]['provisioning_status'],
|
||||
constants.ACTIVE)
|
||||
expected_clear_calls = [
|
||||
mock.call('Load_Balancer', self.ovn_hm_lb.uuid,
|
||||
'ip_port_mappings')]
|
||||
expected_remove_calls = [
|
||||
mock.call('Load_Balancer', self.ovn_hm_lb.uuid, 'health_check',
|
||||
self.ovn_hm.uuid),
|
||||
@ -3924,8 +4057,6 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
expected_destroy_calls = [
|
||||
mock.call('Load_Balancer_Health_Check', self.ovn_hm.uuid)]
|
||||
del_hm_port.assert_called_once_with(self.member_subnet_id)
|
||||
self.helper.ovn_nbdb_api.db_clear.assert_has_calls(
|
||||
expected_clear_calls)
|
||||
self.helper.ovn_nbdb_api.db_remove.assert_has_calls(
|
||||
expected_remove_calls)
|
||||
self.helper.ovn_nbdb_api.db_destroy.assert_has_calls(
|
||||
@ -3949,17 +4080,14 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
constants.ACTIVE)
|
||||
self.assertEqual(status['listeners'][0]['provisioning_status'],
|
||||
constants.ACTIVE)
|
||||
expected_clear_calls = [
|
||||
mock.call('Load_Balancer', self.ovn_hm_lb.uuid,
|
||||
'ip_port_mappings')]
|
||||
expected_remove_calls = [
|
||||
mock.call('Load_Balancer', self.ovn_hm_lb.uuid, 'health_check',
|
||||
self.ovn_hm.uuid)]
|
||||
self.ovn_hm.uuid),
|
||||
mock.call('Load_Balancer', self.ovn_hm_lb.uuid,
|
||||
'external_ids', ovn_const.LB_EXT_IDS_HMS_KEY)]
|
||||
expected_destroy_calls = [
|
||||
mock.call('Load_Balancer_Health_Check', self.ovn_hm.uuid)]
|
||||
del_hm_port.assert_not_called()
|
||||
self.helper.ovn_nbdb_api.db_clear.assert_has_calls(
|
||||
expected_clear_calls)
|
||||
self.helper.ovn_nbdb_api.db_remove.assert_has_calls(
|
||||
expected_remove_calls)
|
||||
self.helper.ovn_nbdb_api.db_destroy.assert_has_calls(
|
||||
|
@ -15,7 +15,7 @@ oslo.messaging>=12.4.0 # Apache-2.0
|
||||
oslo.serialization>=2.28.1 # Apache-2.0
|
||||
oslo.utils>=4.5.0 # Apache-2.0
|
||||
ovs>=2.10.0 # Apache-2.0
|
||||
ovsdbapp>=1.7.0 # Apache-2.0
|
||||
ovsdbapp>=2.1.0 # Apache-2.0
|
||||
pbr>=4.0.0 # Apache-2.0
|
||||
SQLAlchemy>=1.4.23 # MIT
|
||||
tenacity>=6.0.0 # Apache-2.0
|
||||
|
Loading…
Reference in New Issue
Block a user