Reset member provisioning status to NO_MONITOR when a HM is deleted

At present, when a health monitor (HM) is created for a pool,
the members of that pool are automatically set to ONLINE
provisioning status, unless the HM identifies an ERROR during
health checks.

This patch addresses an issue where, after deleting an HM,
the members should be reset to NO_MONITOR provisioning status,
regardless of whether the HM had previously set them to ONLINE
or ERROR status.

Closes-Bug: #2007985
Change-Id: I02bcba61d0cbc9202a6e50b849f8d781fb825d49
This commit is contained in:
Fernando Royo 2023-02-21 16:35:10 +01:00
parent ba4ea1134b
commit 569c9c011a
4 changed files with 91 additions and 33 deletions

View File

@ -532,7 +532,8 @@ class OvnProviderDriver(driver_base.ProviderDriver):
self._ovn_helper.add_request(request)
def health_monitor_delete(self, healthmonitor):
request_info = {'id': healthmonitor.healthmonitor_id}
request_info = {'id': healthmonitor.healthmonitor_id,
'pool_id': healthmonitor.pool_id}
request = {'type': ovn_const.REQ_TYPE_HM_DELETE,
'info': request_info}
self._ovn_helper.add_request(request)

View File

@ -1780,7 +1780,25 @@ class OvnProviderHelper():
str(member_id))
return constants.NO_MONITOR
def _update_member_status(self, ovn_lb, member, status=None, delete=False):
def _update_member_statuses(self, ovn_lb, pool_id, provisioning_status,
operating_status):
member_statuses = []
existing_members = ovn_lb.external_ids.get(
ovn_const.LB_EXT_IDS_POOL_PREFIX + str(pool_id))
if len(existing_members) > 0:
for mem_info in existing_members.split(','):
member_statuses.append({
constants.ID: mem_info.split('_')[1],
constants.PROVISIONING_STATUS: provisioning_status,
constants.OPERATING_STATUS: operating_status})
self._update_external_ids_member_status(
ovn_lb,
mem_info.split('_')[1],
operating_status)
return member_statuses
def _update_external_ids_member_status(self, ovn_lb, member, status=None,
delete=False):
existing_members = ovn_lb.external_ids.get(
ovn_const.OVN_MEMBER_STATUS_KEY)
try:
@ -1921,7 +1939,7 @@ class OvnProviderHelper():
operating_status = constants.ERROR
member_status[constants.OPERATING_STATUS] = operating_status
self._update_member_status(
self._update_external_ids_member_status(
ovn_lb,
member[constants.ID],
operating_status)
@ -1997,7 +2015,7 @@ class OvnProviderHelper():
{constants.ID: ovn_lb.name,
constants.PROVISIONING_STATUS: constants.ACTIVE}]}
self._update_member_status(
self._update_external_ids_member_status(
ovn_lb, member[constants.ID], None, delete=True)
listener_status = []
@ -2074,7 +2092,7 @@ class OvnProviderHelper():
constants.PROVISIONING_STATUS: constants.ACTIVE}]}
if constants.OPERATING_STATUS in member_status:
self._update_member_status(
self._update_external_ids_member_status(
ovn_lb,
member[constants.ID],
member_status[constants.OPERATING_STATUS])
@ -2621,21 +2639,9 @@ class OvnProviderHelper():
constants.OPERATING_STATUS: constants.ONLINE})
# Update status for all members in the pool
member_status = []
existing_members = ovn_lb.external_ids[pool_key]
if len(existing_members) > 0:
for mem_info in existing_members.split(','):
member_status.append({
constants.ID: mem_info.split('_')[1],
constants.PROVISIONING_STATUS: constants.ACTIVE,
constants.OPERATING_STATUS: constants.ONLINE})
# NOTE (froyo): to sync local info with the HM initial one for
# previously created members, in case HM detects any change we
# will be event triggered about the change one.
self._update_member_status(
ovn_lb,
mem_info.split('_')[1],
constants.ONLINE)
member_status = self._update_member_statuses(ovn_lb, pool_id,
constants.ACTIVE,
constants.ONLINE)
status[constants.MEMBERS] = member_status
# MONITOR_PRT = 80
@ -2710,6 +2716,8 @@ class OvnProviderHelper():
def hm_delete(self, info):
hm_id = info[constants.ID]
pool_id_related = info[constants.POOL_ID]
status = {
constants.HEALTHMONITORS: [
{constants.ID: hm_id,
@ -2745,6 +2753,12 @@ class OvnProviderHelper():
# ovn-nbctl clear load_balancer ${OVN_LB_ID} health_check
# TODO(haleyb) remove just the ip_port_mappings for this hm
hms_key = ovn_lb.external_ids.get(ovn_const.LB_EXT_IDS_HMS_KEY, [])
# Update status for members in the pool related to HM
member_status = self._update_member_statuses(ovn_lb, pool_id_related,
constants.ACTIVE,
constants.NO_MONITOR)
if hms_key:
hms_key = jsonutils.loads(hms_key)
if hm_id in hms_key:
@ -2785,6 +2799,9 @@ class OvnProviderHelper():
{constants.ID: pool_id,
constants.PROVISIONING_STATUS: constants.ACTIVE}]
if member_status:
status[constants.MEMBERS] = member_status
status[constants.LISTENERS] = []
for listener in pool_listeners:
status[constants.LISTENERS].append(
@ -2999,7 +3016,8 @@ class OvnProviderHelper():
if info['status'] == ovn_const.HM_EVENT_MEMBER_PORT_OFFLINE:
member_status = constants.ERROR
self._update_member_status(ovn_lb, member_id, member_status)
self._update_external_ids_member_status(ovn_lb, member_id,
member_status)
statuses.append(self._get_current_operating_statuses(ovn_lb))
if not statuses:

View File

@ -1074,7 +1074,8 @@ class TestOvnProviderDriver(ovn_base.TestOvnOctaviaBase):
self.mock_add_request.assert_called_once_with(expected_dict)
def test_health_monitor_delete(self):
info = {'id': self.ref_health_monitor.healthmonitor_id}
info = {'id': self.ref_health_monitor.healthmonitor_id,
'pool_id': self.ref_health_monitor.pool_id}
expected_dict = {'type': ovn_const.REQ_TYPE_HM_DELETE,
'info': info}
self.driver.health_monitor_delete(self.ref_health_monitor)

View File

@ -224,8 +224,8 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
(self.helper.ovn_nbdb_api.ls_get.return_value.
execute.return_value) = self.network
def test__update_member_status(self):
self.helper._update_member_status(
def test__update_external_ids_member_status(self):
self.helper._update_external_ids_member_status(
self.ovn_lb, self.member_id, constants.NO_MONITOR)
member_status = {
ovn_const.OVN_MEMBER_STATUS_KEY: '{"%s": "%s"}'
@ -233,15 +233,15 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
self.helper.ovn_nbdb_api.db_set.assert_called_once_with(
'Load_Balancer', self.ovn_lb.uuid, ('external_ids', member_status))
def test__update_member_status_delete(self):
self.helper._update_member_status(
def test__update_external_ids_member_status_delete(self):
self.helper._update_external_ids_member_status(
self.ovn_lb, self.member_id, None, True)
self.helper.ovn_nbdb_api.db_remove.assert_called_once_with(
'Load_Balancer', self.ovn_lb.uuid, 'external_ids',
ovn_const.OVN_MEMBER_STATUS_KEY)
def test__update_member_status_delete_not_found(self):
self.helper._update_member_status(
def test__update_external_ids_member_status_delete_not_found(self):
self.helper._update_external_ids_member_status(
self.ovn_lb, 'fool', None, True)
member_status = {
ovn_const.OVN_MEMBER_STATUS_KEY: '{"%s": "%s"}'
@ -3929,6 +3929,40 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
self.helper.ovn_nbdb_api.db_destroy.assert_has_calls(
expected_destroy_calls)
@mock.patch.object(ovn_helper.OvnProviderHelper, '_clean_up_hm_port')
def test_hm_delete_without_members_in_pool(self, del_hm_port):
self._get_pool_listeners.stop()
pool_key = 'pool_%s' % self.pool_id
self.ovn_hm_lb.external_ids[pool_key] = ''
self.helper.ovn_nbdb_api.db_list_rows.return_value.\
execute.side_effect = [[self.ovn_hm_lb], [self.ovn_hm]]
status = self.helper.hm_delete(self.health_monitor)
self.assertEqual(status['healthmonitors'][0]['provisioning_status'],
constants.DELETED)
self.assertEqual(status['healthmonitors'][0]['operating_status'],
constants.NO_MONITOR)
self.assertEqual(status['loadbalancers'][0]['provisioning_status'],
constants.ACTIVE)
self.assertEqual(status['pools'][0]['provisioning_status'],
constants.ACTIVE)
self.assertEqual(status['listeners'][0]['provisioning_status'],
constants.ACTIVE)
expected_clear_calls = [
mock.call('Load_Balancer', self.ovn_hm_lb.uuid,
'ip_port_mappings')]
expected_remove_calls = [
mock.call('Load_Balancer', self.ovn_hm_lb.uuid, 'health_check',
self.ovn_hm.uuid)]
expected_destroy_calls = [
mock.call('Load_Balancer_Health_Check', self.ovn_hm.uuid)]
del_hm_port.assert_not_called()
self.helper.ovn_nbdb_api.db_clear.assert_has_calls(
expected_clear_calls)
self.helper.ovn_nbdb_api.db_remove.assert_has_calls(
expected_remove_calls)
self.helper.ovn_nbdb_api.db_destroy.assert_has_calls(
expected_destroy_calls)
def test_hm_delete_row_not_found(self):
self.helper.ovn_nbdb_api.db_list_rows.return_value.\
execute.return_value = [self.ovn_hm]
@ -4092,11 +4126,12 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
'port': port,
'protocol': ovn_lbs[0].protocol,
'status': [member_status]}
self._update_member_status(self.ovn_hm_lb, member_id, member_status)
self._update_external_ids_member_status(self.ovn_hm_lb, member_id,
member_status)
status = self.helper.hm_update_event(info)
return status
def _update_member_status(self, lb, member_id, member_status):
def _update_external_ids_member_status(self, lb, member_id, member_status):
status = constants.ONLINE
if member_status == 'offline':
status = constants.ERROR
@ -4346,8 +4381,10 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
'port': '8080',
'protocol': self.ovn_hm_lb.protocol,
'status': ovn_const.HM_EVENT_MEMBER_PORT_OFFLINE}
self._update_member_status(self.ovn_hm_lb, member['id'], 'offline')
self._update_member_status(ovn_hm_lb_2, member_2['id'], 'offline')
self._update_external_ids_member_status(self.ovn_hm_lb, member['id'],
'offline')
self._update_external_ids_member_status(ovn_hm_lb_2, member_2['id'],
'offline')
status = self.helper.hm_update_event(info)
self.assertEqual(status['pools'][0]['provisioning_status'],
@ -4457,7 +4494,8 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
# member lb2 OFFLINE, so lb2 operating_status should be ERROR
# for Pool and Loadbalancer, but lb1 should keep ONLINE
self._update_member_status(ovn_hm_lb2, member_lb2['id'], 'offline')
self._update_external_ids_member_status(ovn_hm_lb2, member_lb2['id'],
'offline')
info = {
'ovn_lbs': [self.ovn_hm_lb, ovn_hm_lb2],