Fix port for Load Balancer Health Check for FIP
Currently when a FIP is attached to LB VIP after a HM is already created, the LB_HC created for the new FIP is not including the port in the vip field. At this way, when a member is in ERROR operating status, request over the FIP are still distribute to the ERROR'ed members. This patch add the port when the FIP is associated to the LB VIP. Related-Bug: #1997418 Change-Id: Iefe5d67b5a8fc47972b14c4247c381d625efcc09
This commit is contained in:
parent
ed02dba2bc
commit
ebfbd848b1
@ -39,6 +39,8 @@ LB_EXT_IDS_POOL_PREFIX = 'pool_'
|
||||
LB_EXT_IDS_LISTENER_PREFIX = 'listener_'
|
||||
LB_EXT_IDS_MEMBER_PREFIX = 'member_'
|
||||
LB_EXT_IDS_HM_KEY = 'octavia:healthmonitor'
|
||||
LB_EXT_IDS_HM_POOL_KEY = 'octavia:pool_id'
|
||||
LB_EXT_IDS_HM_VIP = 'octavia:vip'
|
||||
LB_EXT_IDS_HMS_KEY = 'octavia:healthmonitors'
|
||||
LB_EXT_IDS_VIP_KEY = 'neutron:vip'
|
||||
LB_EXT_IDS_VIP_FIP_KEY = 'neutron:vip_fip'
|
||||
|
@ -1415,10 +1415,14 @@ class OvnProviderHelper():
|
||||
if not listener.get(constants.ADMIN_STATE_UP, True):
|
||||
operating_status = constants.OFFLINE
|
||||
|
||||
if (ovn_lb.health_check and
|
||||
not self._update_lbhc_vip(ovn_lb,
|
||||
listener[constants.PROTOCOL_PORT])):
|
||||
operating_status = constants.ERROR
|
||||
if pool_key:
|
||||
for lb_hc in ovn_lb.health_check:
|
||||
if pool_key[len(ovn_const.LB_EXT_IDS_POOL_PREFIX):] == (
|
||||
lb_hc.external_ids.get(
|
||||
ovn_const.LB_EXT_IDS_HM_POOL_KEY)):
|
||||
if not self._update_lbhc_vip_port(
|
||||
lb_hc, listener[constants.PROTOCOL_PORT]):
|
||||
operating_status = constants.ERROR
|
||||
|
||||
status = {
|
||||
constants.LISTENERS: [
|
||||
@ -1585,8 +1589,6 @@ class OvnProviderHelper():
|
||||
constants.LOADBALANCERS: [
|
||||
{constants.ID: listener[constants.LOADBALANCER_ID],
|
||||
constants.PROVISIONING_STATUS: constants.ACTIVE}]}
|
||||
if ovn_lb.health_check:
|
||||
self._update_lbhc_vip(ovn_lb, listener[constants.PROTOCOL_PORT])
|
||||
return status
|
||||
|
||||
def pool_create(self, pool):
|
||||
@ -2273,6 +2275,16 @@ class OvnProviderHelper():
|
||||
"check Neutron logs. Perhaps port "
|
||||
"was already deleted.", port_id)
|
||||
|
||||
# NOTE(froyo): This could be removed in some cycles after Bobcat, this
|
||||
# check is created to ensure that LB HC vip field is correctly format like
|
||||
# IP:PORT
|
||||
def _check_lbhc_vip_format(self, vip):
|
||||
if vip:
|
||||
ip_port = vip.rsplit(':', 1)
|
||||
if len(ip_port) == 2 and ip_port[1].isdigit():
|
||||
return True
|
||||
return False
|
||||
|
||||
def handle_vip_fip(self, fip_info):
|
||||
ovn_lb = fip_info['ovn_lb']
|
||||
external_ids = copy.deepcopy(ovn_lb.external_ids)
|
||||
@ -2286,15 +2298,22 @@ class OvnProviderHelper():
|
||||
commands.append(
|
||||
self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb.uuid,
|
||||
('external_ids', vip_fip_info)))
|
||||
if ovn_lb.health_check:
|
||||
for lb_hc in ovn_lb.health_check:
|
||||
vip = fip_info['vip_fip']
|
||||
lb_hc_external_ids = copy.deepcopy(lb_hc.external_ids)
|
||||
lb_hc_external_ids[ovn_const.LB_EXT_IDS_HM_VIP] = vip
|
||||
if self._check_lbhc_vip_format(lb_hc.vip):
|
||||
port = lb_hc.vip.rsplit(':')[-1]
|
||||
vip += ':' + port
|
||||
else:
|
||||
vip = ''
|
||||
kwargs = {
|
||||
'vip': fip_info['vip_fip'],
|
||||
'options': ovn_lb.health_check[0].options,
|
||||
'external_ids': ovn_lb.health_check[0].external_ids}
|
||||
'vip': vip,
|
||||
'options': lb_hc.options,
|
||||
'external_ids': lb_hc_external_ids}
|
||||
with self.ovn_nbdb_api.transaction(check_error=True) as txn:
|
||||
fip_lbhc = txn.add(
|
||||
self.ovn_nbdb_api.db_create(
|
||||
'Load_Balancer_Health_Check', **kwargs))
|
||||
fip_lbhc = txn.add(self.ovn_nbdb_api.db_create(
|
||||
'Load_Balancer_Health_Check', **kwargs))
|
||||
txn.add(self.ovn_nbdb_api.db_add(
|
||||
'Load_Balancer', ovn_lb.uuid,
|
||||
'health_check', fip_lbhc))
|
||||
@ -2439,6 +2458,19 @@ class OvnProviderHelper():
|
||||
hm_id, ovn_lb.external_ids)
|
||||
return status
|
||||
vip_port = self._get_pool_listener_port(ovn_lb, pool_key)
|
||||
|
||||
# This is to enable lookups by Octavia DB ID value
|
||||
external_ids = {
|
||||
ovn_const.LB_EXT_IDS_HM_KEY: hm_id,
|
||||
ovn_const.LB_EXT_IDS_HM_VIP: vip,
|
||||
ovn_const.LB_EXT_IDS_HM_POOL_KEY: pool_key[
|
||||
len(ovn_const.LB_EXT_IDS_POOL_PREFIX):],
|
||||
}
|
||||
|
||||
if fip:
|
||||
external_ids_fip = copy.deepcopy(external_ids)
|
||||
external_ids_fip[ovn_const.LB_EXT_IDS_HM_VIP] = fip
|
||||
|
||||
if not vip_port:
|
||||
# This is not fatal as we can add it when a listener is created
|
||||
vip = []
|
||||
@ -2460,9 +2492,6 @@ class OvnProviderHelper():
|
||||
'success_count': str(info['success_count']),
|
||||
'failure_count': str(info['failure_count'])}
|
||||
|
||||
# This is to enable lookups by Octavia DB ID value
|
||||
external_ids = {ovn_const.LB_EXT_IDS_HM_KEY: hm_id}
|
||||
|
||||
# Just seems like this needs ovsdbapp support, see:
|
||||
# ovsdbapp/schema/ovn_northbound/impl_idl.py - lb_add()
|
||||
# ovsdbapp/schema/ovn_northbound/commands.py - LbAddCommand()
|
||||
@ -2475,7 +2504,7 @@ class OvnProviderHelper():
|
||||
fip_kwargs = {
|
||||
'vip': fip,
|
||||
'options': options,
|
||||
'external_ids': external_ids}
|
||||
'external_ids': external_ids_fip}
|
||||
|
||||
operating_status = constants.ONLINE
|
||||
if not info['admin_state_up']:
|
||||
@ -2514,60 +2543,20 @@ class OvnProviderHelper():
|
||||
LOG.exception(ovn_const.EXCEPTION_MSG, "set of health check")
|
||||
return status
|
||||
|
||||
def _update_lbhc_vip(self, ovn_lb, vip_port):
|
||||
vip = ovn_lb.external_ids.get(ovn_const.LB_EXT_IDS_VIP_KEY)
|
||||
if not vip:
|
||||
LOG.error("Could not find VIP for LB external_ids: %s",
|
||||
ovn_lb.external_ids)
|
||||
return False
|
||||
|
||||
vip_version = netaddr.IPAddress(vip).version
|
||||
if vip_version == 6:
|
||||
vip_lbhc = [lbhc for lbhc in ovn_lb.health_check
|
||||
if lbhc.vip == [] or lbhc.vip[1:].split("]")[0] == vip]
|
||||
def _update_lbhc_vip_port(self, lbhc, vip_port):
|
||||
if lbhc.vip:
|
||||
vip = lbhc.vip.rsplit(":")[0] + ':' + str(vip_port)
|
||||
else:
|
||||
vip_lbhc = [lbhc for lbhc in ovn_lb.health_check
|
||||
if lbhc.vip == [] or lbhc.vip.split(":")[0] == vip]
|
||||
if not vip_lbhc:
|
||||
LOG.error("Could not find HC associated to VIP: %s", vip)
|
||||
return False
|
||||
|
||||
vip = vip + ':' + str(vip_port)
|
||||
fip = ovn_lb.external_ids.get(ovn_const.LB_EXT_IDS_VIP_FIP_KEY)
|
||||
create_fip_lbhc = False
|
||||
if fip:
|
||||
fip = fip + ':' + str(vip_port)
|
||||
if len(ovn_lb.health_check) != 2:
|
||||
LOG.warning("There should be two HCs associated to the Load "
|
||||
"Balancer %s as it has a FIP associated to it",
|
||||
ovn_lb.uuid)
|
||||
create_fip_lbhc = True
|
||||
|
||||
# If initially the lbhc was created with no port info, vip field
|
||||
# will be empty, so get it from lbhc external_ids
|
||||
vip = lbhc.external_ids.get(ovn_const.LB_EXT_IDS_HM_VIP, '')
|
||||
if vip:
|
||||
vip = vip + ':' + str(vip_port)
|
||||
commands = []
|
||||
commands.append(
|
||||
self.ovn_nbdb_api.db_set(
|
||||
'Load_Balancer_Health_Check', ovn_lb.health_check[0].uuid,
|
||||
'Load_Balancer_Health_Check', lbhc.uuid,
|
||||
('vip', vip)))
|
||||
if fip:
|
||||
if create_fip_lbhc:
|
||||
# For upgrades purposes we need to recover from this situation
|
||||
# and create the health_check for the FIP
|
||||
kwargs = {
|
||||
'vip': fip,
|
||||
'options': vip_lbhc[0].options,
|
||||
'external_ids': vip_lbhc[0].external_ids}
|
||||
with self.ovn_nbdb_api.transaction(check_error=True) as txn:
|
||||
fip_lbhc = txn.add(
|
||||
self.ovn_nbdb_api.db_create(
|
||||
'Load_Balancer_Health_Check', **kwargs))
|
||||
txn.add(self.ovn_nbdb_api.db_add(
|
||||
'Load_Balancer', ovn_lb.uuid,
|
||||
'health_check', fip_lbhc))
|
||||
else:
|
||||
commands.append(
|
||||
self.ovn_nbdb_api.db_set(
|
||||
'Load_Balancer_Health_Check',
|
||||
ovn_lb.health_check[1].uuid, ('vip', fip)))
|
||||
self._execute_commands(commands)
|
||||
return True
|
||||
|
||||
|
@ -108,7 +108,9 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
self.ovn_hm = mock.MagicMock()
|
||||
self.ovn_hm.uuid = self.healthmonitor_id
|
||||
self.ovn_hm.external_ids = {
|
||||
ovn_const.LB_EXT_IDS_HM_KEY: self.ovn_hm.uuid}
|
||||
ovn_const.LB_EXT_IDS_HM_KEY: self.ovn_hm.uuid,
|
||||
ovn_const.LB_EXT_IDS_HM_POOL_KEY: self.pool_id,
|
||||
ovn_const.LB_EXT_IDS_HM_VIP: '10.22.33.99'}
|
||||
self.ovn_hm_lb.health_check = [self.ovn_hm.uuid]
|
||||
self.member_line = (
|
||||
'member_%s_%s:%s_%s' %
|
||||
@ -537,6 +539,23 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
found = f(self.pool_id)
|
||||
self.assertEqual(found, (lb.vip_subnet_id, '10.22.33.0/24'))
|
||||
|
||||
def test__check_lbhc_vip_format(self):
|
||||
vip = "192.168.0.1:8080"
|
||||
result = self.helper._check_lbhc_vip_format(vip)
|
||||
self.assertTrue(result)
|
||||
vip = "192.168.0.1"
|
||||
result = self.helper._check_lbhc_vip_format(vip)
|
||||
self.assertFalse(result)
|
||||
vip = "[2001:db8:3333:4444:5555:6666:7777:8888]:8080"
|
||||
result = self.helper._check_lbhc_vip_format(vip)
|
||||
self.assertTrue(result)
|
||||
vip = "[2001:db8:3333:4444:5555:6666:7777:8888]"
|
||||
result = self.helper._check_lbhc_vip_format(vip)
|
||||
self.assertFalse(result)
|
||||
vip = ""
|
||||
result = self.helper._check_lbhc_vip_format(vip)
|
||||
self.assertFalse(result)
|
||||
|
||||
def test__get_subnet_from_pool_lb_no_vip_subnet_id(self):
|
||||
f = self.helper._get_subnet_from_pool
|
||||
|
||||
@ -3209,14 +3228,6 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
|
||||
self.helper.handle_vip_fip(fip_info)
|
||||
|
||||
kwargs = {
|
||||
'vip': fip_info['vip_fip'],
|
||||
'options': lb.health_check[0].options,
|
||||
'external_ids': lb.health_check[0].external_ids}
|
||||
self.helper.ovn_nbdb_api.db_create.assert_called_once_with(
|
||||
'Load_Balancer_Health_Check', **kwargs)
|
||||
self.helper.ovn_nbdb_api.db_add.assert_called_once_with(
|
||||
'Load_Balancer', lb.uuid, 'health_check', mock.ANY)
|
||||
expected_db_set_calls = [
|
||||
mock.call('Load_Balancer', lb.uuid,
|
||||
('external_ids', {'neutron:vip_fip': '10.0.0.123'})),
|
||||
@ -3669,14 +3680,21 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
'timeout': '7',
|
||||
'failure_count': '5',
|
||||
'success_count': '3'}
|
||||
external_ids = {ovn_const.LB_EXT_IDS_HM_KEY: self.healthmonitor_id}
|
||||
external_ids = {
|
||||
ovn_const.LB_EXT_IDS_HM_KEY: self.healthmonitor_id,
|
||||
ovn_const.LB_EXT_IDS_HM_VIP:
|
||||
self.ovn_hm_lb.external_ids[ovn_const.LB_EXT_IDS_VIP_KEY],
|
||||
ovn_const.LB_EXT_IDS_HM_POOL_KEY: self.pool_id}
|
||||
kwargs = {'vip': vip,
|
||||
'options': options,
|
||||
'external_ids': external_ids}
|
||||
if fip:
|
||||
external_ids_fips = copy.deepcopy(external_ids)
|
||||
external_ids_fips[ovn_const.LB_EXT_IDS_HM_VIP] = (
|
||||
self.ovn_hm_lb.external_ids[ovn_const.LB_EXT_IDS_VIP_FIP_KEY])
|
||||
fip_kwargs = {'vip': fip,
|
||||
'options': options,
|
||||
'external_ids': external_ids}
|
||||
'external_ids': external_ids_fips}
|
||||
|
||||
expected_lbhc_calls = [
|
||||
mock.call('Load_Balancer_Health_Check', **kwargs)]
|
||||
@ -3877,28 +3895,29 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
constants.ERROR)
|
||||
|
||||
@mock.patch.object(ovn_helper.OvnProviderHelper, '_get_or_create_ovn_lb')
|
||||
def test_hm_create_then_listener_create(self, get_ovn_lb):
|
||||
def test_hm_existing_lbhc_update_on_listener_create(self, get_ovn_lb):
|
||||
vip = (self.ovn_hm_lb.external_ids[ovn_const.LB_EXT_IDS_VIP_KEY] +
|
||||
':' + str(self.listener['protocol_port']))
|
||||
fip = (self.ovn_hm_lb.external_ids[ovn_const.LB_EXT_IDS_VIP_FIP_KEY] +
|
||||
':' + str(self.listener['protocol_port']))
|
||||
self.ovn_hm.vip = []
|
||||
self.ovn_hm_lb.health_check = [self.ovn_hm]
|
||||
self.ovn_hm_fip = copy.deepcopy(self.ovn_hm)
|
||||
self.ovn_hm_fip.external_ids[ovn_const.LB_EXT_IDS_HM_VIP] = (
|
||||
self.ovn_hm_lb.external_ids[ovn_const.LB_EXT_IDS_VIP_FIP_KEY])
|
||||
self.ovn_hm_lb.health_check = [self.ovn_hm, self.ovn_hm_fip]
|
||||
get_ovn_lb.return_value = self.ovn_hm_lb
|
||||
self.listener['admin_state_up'] = True
|
||||
kwargs = {
|
||||
'vip': fip,
|
||||
'options': self.ovn_hm.options,
|
||||
'external_ids': self.ovn_hm.external_ids}
|
||||
|
||||
status = self.helper.listener_create(self.listener)
|
||||
|
||||
self.helper.ovn_nbdb_api.db_set.assert_called_with(
|
||||
'Load_Balancer_Health_Check', self.ovn_hm.uuid, ('vip', vip))
|
||||
self.helper.ovn_nbdb_api.db_create.assert_called_with(
|
||||
'Load_Balancer_Health_Check', **kwargs)
|
||||
self.helper.ovn_nbdb_api.db_add.assert_called_with(
|
||||
'Load_Balancer', self.ovn_hm_lb.uuid, 'health_check', mock.ANY)
|
||||
expected_set_external_ids_calls = [
|
||||
mock.call('Load_Balancer_Health_Check', self.ovn_hm.uuid,
|
||||
('vip', vip)),
|
||||
mock.call('Load_Balancer_Health_Check', self.ovn_hm.uuid,
|
||||
('vip', fip))]
|
||||
self.helper.ovn_nbdb_api.db_set.assert_has_calls(
|
||||
expected_set_external_ids_calls)
|
||||
|
||||
self.assertEqual(status['listeners'][0]['provisioning_status'],
|
||||
constants.ACTIVE)
|
||||
self.assertEqual(status['listeners'][0]['operating_status'],
|
||||
@ -3925,19 +3944,6 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
self.assertEqual(status['listeners'][0]['operating_status'],
|
||||
constants.ONLINE)
|
||||
|
||||
@mock.patch.object(ovn_helper.OvnProviderHelper, '_lookup_lbhcs_by_hm_id')
|
||||
@mock.patch.object(ovn_helper.OvnProviderHelper, '_get_or_create_ovn_lb')
|
||||
def test_hm_create_then_listener_create_no_hm(self, get_ovn_lb, lookup_hm):
|
||||
get_ovn_lb.return_value = self.ovn_hm_lb
|
||||
lookup_hm.return_value = []
|
||||
self.ovn_hm_lb.health_check = [self.ovn_hm]
|
||||
self.listener['admin_state_up'] = True
|
||||
status = self.helper.listener_create(self.listener)
|
||||
self.assertEqual(status['listeners'][0]['provisioning_status'],
|
||||
constants.ACTIVE)
|
||||
self.assertEqual(status['listeners'][0]['operating_status'],
|
||||
constants.ERROR)
|
||||
|
||||
@mock.patch.object(ovn_helper.OvnProviderHelper, '_refresh_lb_vips')
|
||||
@mock.patch.object(ovn_helper.OvnProviderHelper, '_lookup_lbhcs_by_hm_id')
|
||||
@mock.patch.object(ovn_helper.OvnProviderHelper, '_get_or_create_ovn_lb')
|
||||
@ -3952,16 +3958,7 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
self.assertEqual(status['listeners'][0]['provisioning_status'],
|
||||
constants.ACTIVE)
|
||||
self.assertEqual(status['listeners'][0]['operating_status'],
|
||||
constants.ERROR)
|
||||
|
||||
@mock.patch.object(ovn_helper.OvnProviderHelper, '_update_lbhc_vip')
|
||||
@mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lbs')
|
||||
def test_hm_create_then_listener_update(self, find_ovn_lbs,
|
||||
update_lbhc_vip):
|
||||
find_ovn_lbs.return_value = self.ovn_hm_lb
|
||||
self.helper.listener_update(self.listener)
|
||||
update_lbhc_vip.assert_called_once_with(
|
||||
self.ovn_hm_lb, self.listener[constants.PROTOCOL_PORT])
|
||||
constants.ONLINE)
|
||||
|
||||
@mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_from_hm_id')
|
||||
def test_hm_update(self, folbfhi):
|
||||
|
16
releasenotes/notes/lbhc-fix-fip-05820d5a9d94a919.yaml
Normal file
16
releasenotes/notes/lbhc-fix-fip-05820d5a9d94a919.yaml
Normal file
@ -0,0 +1,16 @@
|
||||
---
|
||||
issues:
|
||||
- |
|
||||
Load Balancer Health Check for Floating IPs are not populated with the
|
||||
protocol port. At this way, when a backend is detected on ERROR state
|
||||
requests to the Floating IP are still distribute to the ERROR'ed members.
|
||||
In order to fix the existing Load Balancer Health Checks it is required to
|
||||
recreate the entire Octavia Health Monitor, which will recreate the
|
||||
associated OVN Load Balancer Health Check(s).
|
||||
fixes:
|
||||
- |
|
||||
[`bug 1997418 <https://bugs.launchpad.net/neutron/+bug/1997418>`_]
|
||||
Added the protocol port to the Load Balancer Health Check associated with
|
||||
the Floating IP, additional fields have been introduced to the external_ids
|
||||
to provide more accuracy information about the entities affected by any
|
||||
change over the Floating IP or LB VIP.
|
Loading…
x
Reference in New Issue
Block a user