From 01309d067c800c3bb07978c1db7e62a1c8b49a73 Mon Sep 17 00:00:00 2001 From: Fernando Royo Date: Tue, 18 Apr 2023 13:12:53 +0200 Subject: [PATCH] Apply admin_state_up on a new member creation If a new member is added with the admin_state_up set to False, they should not participate in load balancing requests over the LB VIP. However, the member still receives requests, even though the Octavia API applies the member's operation_status correctly, This patch fixes this issue by not adding the member to the vips (at OVN NB) so that request over LB VIP are not taking into account that member. Closes-Bug: 2016862 Change-Id: Iec7f6b1da8548a29eb9cc0e2544e77e1a6c6fb1e --- ovn_octavia_provider/helper.py | 21 ++++++++++++------- ovn_octavia_provider/tests/functional/base.py | 3 ++- .../tests/unit/test_helper.py | 15 +++++++++++-- 3 files changed, 28 insertions(+), 11 deletions(-) diff --git a/ovn_octavia_provider/helper.py b/ovn_octavia_provider/helper.py index f0ba8d62..639c59dc 100644 --- a/ovn_octavia_provider/helper.py +++ b/ovn_octavia_provider/helper.py @@ -1872,7 +1872,11 @@ class OvnProviderHelper(): ('external_ids', pool_data))) external_ids[pool_key] = pool_data[pool_key] - commands.extend(self._refresh_lb_vips(ovn_lb, external_ids)) + + # NOTE(froyo): Add the member to the vips if it is enabled + if member.get(constants.ADMIN_STATE_UP, False): + commands.extend(self._refresh_lb_vips(ovn_lb, external_ids)) + # Note (froyo): commands are now splitted to separate atomic process, # leaving outside the not mandatory ones to allow add_member # finish correctly @@ -1922,13 +1926,8 @@ class OvnProviderHelper(): pool = {constants.ID: member[constants.POOL_ID], constants.PROVISIONING_STATUS: constants.ACTIVE, constants.OPERATING_STATUS: constants.ONLINE} - member_status = {constants.ID: member[constants.ID], - constants.PROVISIONING_STATUS: constants.ACTIVE} - if not member[constants.ADMIN_STATE_UP]: - member_status[constants.OPERATING_STATUS] = constants.OFFLINE status = { constants.POOLS: [pool], - constants.MEMBERS: [member_status], constants.LOADBALANCERS: [ {constants.ID: ovn_lb.name, constants.PROVISIONING_STATUS: constants.ACTIVE}]} @@ -1953,13 +1952,19 @@ class OvnProviderHelper(): status[constants.LISTENERS] = listener_status operating_status = constants.NO_MONITOR - if new_member and ovn_lb.health_check: + if not member[constants.ADMIN_STATE_UP]: + operating_status = constants.OFFLINE + elif (new_member and operating_status == constants.NO_MONITOR and + ovn_lb.health_check): operating_status = constants.ONLINE mb_ip, mb_port, mb_subnet, mb_id = self._extract_member_info( new_member)[0] if not self._update_hm_member(ovn_lb, pool_key, mb_ip): operating_status = constants.ERROR - member_status[constants.OPERATING_STATUS] = operating_status + member_status = {constants.ID: member[constants.ID], + constants.PROVISIONING_STATUS: constants.ACTIVE, + constants.OPERATING_STATUS: operating_status} + status[constants.MEMBERS] = [member_status] self._update_external_ids_member_status( ovn_lb, diff --git a/ovn_octavia_provider/tests/functional/base.py b/ovn_octavia_provider/tests/functional/base.py index 4db2776b..5ba88768 100644 --- a/ovn_octavia_provider/tests/functional/base.py +++ b/ovn_octavia_provider/tests/functional/base.py @@ -867,7 +867,8 @@ class TestOvnOctaviaBase(base.TestOVNFunctionalBase, expected_status = { 'pools': [pool_status], 'members': [{"id": m_member.member_id, - "provisioning_status": "ACTIVE"}], + "provisioning_status": "ACTIVE", + "operating_status": o_constants.NO_MONITOR}], 'loadbalancers': [{'id': pool.loadbalancer_id, 'provisioning_status': 'ACTIVE'}], 'listeners': expected_listener_status diff --git a/ovn_octavia_provider/tests/unit/test_helper.py b/ovn_octavia_provider/tests/unit/test_helper.py index 228a229f..31047da8 100644 --- a/ovn_octavia_provider/tests/unit/test_helper.py +++ b/ovn_octavia_provider/tests/unit/test_helper.py @@ -1681,8 +1681,7 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase): @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') def test_member_create(self, net_cli): - net_cli.return_value.get_subnet.side_effect = [ - idlutils.RowNotFound, idlutils.RowNotFound] + net_cli.return_value.show_subnet.side_effect = [idlutils.RowNotFound] self.ovn_lb.external_ids = mock.MagicMock() status = self.helper.member_create(self.member) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], @@ -1691,6 +1690,17 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase): constants.ACTIVE) self.assertEqual(status['members'][0]['provisioning_status'], constants.ACTIVE) + self.assertEqual(status['members'][0]['operating_status'], + constants.NO_MONITOR) + calls = [ + mock.call.db_clear('Load_Balancer', self.ovn_lb.uuid, 'vips'), + mock.call.db_set('Load_Balancer', self.ovn_lb.uuid, ('vips', {}))] + self.helper.ovn_nbdb_api.assert_has_calls(calls) + + @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') + def test_member_create_disabled(self, net_cli): + net_cli.return_value.show_subnet.side_effect = [idlutils.RowNotFound] + self.ovn_lb.external_ids = mock.MagicMock() self.member['admin_state_up'] = False status = self.helper.member_create(self.member) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], @@ -1701,6 +1711,7 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase): constants.ACTIVE) self.assertEqual(status['members'][0]['operating_status'], constants.OFFLINE) + self.helper.ovn_nbdb_api.db_clear.assert_not_called() @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_by_pool_id') @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_lr_of_ls')