From 89cc378b7c7c292a8e1d58b088d6c44f7a9775c9 Mon Sep 17 00:00:00 2001 From: Gregory Thiemonge Date: Fri, 20 Nov 2020 08:22:35 +0100 Subject: [PATCH] Fix operating status for empty UDP pools Fix empty UDP pools status: a UDP pool without members went OFFLINE instead of ONLINE. This commit changes the keepalived configuration with empty pools: the configuration now contains a virtual_server and a comment about the existing pool. This comment is used by the get_udp_listener_pool_status to detect that the pool exists and is not offline. Story 2007984 Task 40610 Conflicts: octavia/tests/unit/common/sample_configs/sample_configs_combined.py Change-Id: I30e23ca13d033d77c8ebdabbfdc7b54556a9466b (cherry picked from commit 2954370e386d54a88763128e2dbfb9c4b2bca96d) --- .../backends/utils/keepalivedlvs_query.py | 2 +- octavia/common/jinja/lvs/templates/macros.j2 | 12 +++-- .../utils/test_keepalivedlvs_query.py | 4 +- .../common/jinja/lvs/test_lvs_jinja_cfg.py | 44 +++++++++++++++++++ .../sample_configs/sample_configs_combined.py | 32 ++++++++++---- ...mpty-udp-pool-status-3171950628898468.yaml | 5 +++ 6 files changed, 81 insertions(+), 18 deletions(-) create mode 100644 releasenotes/notes/fix-empty-udp-pool-status-3171950628898468.yaml diff --git a/octavia/amphorae/backends/utils/keepalivedlvs_query.py b/octavia/amphorae/backends/utils/keepalivedlvs_query.py index 390bb4eb93..92efedaf2b 100644 --- a/octavia/amphorae/backends/utils/keepalivedlvs_query.py +++ b/octavia/amphorae/backends/utils/keepalivedlvs_query.py @@ -220,7 +220,7 @@ def get_udp_listener_pool_status(listener_id): if 'Members' not in resource_ipport_mapping: return {'lvs': { 'uuid': resource_ipport_mapping['Pool']['id'], - 'status': constants.DOWN, + 'status': constants.UP, 'members': {} }} diff --git a/octavia/common/jinja/lvs/templates/macros.j2 b/octavia/common/jinja/lvs/templates/macros.j2 index 73d5a5cb7d..c8087dbdf4 100644 --- a/octavia/common/jinja/lvs/templates/macros.j2 +++ b/octavia/common/jinja/lvs/templates/macros.j2 @@ -93,13 +93,7 @@ TCP_CHECK { {% endmacro %} {% macro virtualserver_macro(constants, listener, lb_vip_address, default_pool) %} -{% set need_render = [] %} -{% if default_pool and default_pool.enabled and default_pool.members %} - {% for member in default_pool.members %} - {% do need_render.append(member.enabled) %} - {% endfor %} -{% endif %} -{% if need_render|length > 0 %} +{% if default_pool %} virtual_server {{ lb_vip_address }} {{ listener.protocol_port }} { {{ lb_algo_macro(default_pool) }} lb_kind NAT @@ -121,7 +115,11 @@ virtual_server {{ lb_vip_address }} {{ listener.protocol_port }} { {{ health_monitor_vs_macro(default_pool) }} {% if default_pool.protocol.lower() == "udp" %} + {% if default_pool.enabled %} # Configuration for Pool {{ default_pool.id }} + {% else %} + # Pool {{ default_pool.id }} is disabled + {% endif %} {% if default_pool.health_monitor and default_pool.health_monitor.enabled %} # Configuration for HealthMonitor {{ default_pool.health_monitor.id }} {% endif %} diff --git a/octavia/tests/unit/amphorae/backends/utils/test_keepalivedlvs_query.py b/octavia/tests/unit/amphorae/backends/utils/test_keepalivedlvs_query.py index 54a060ce0a..e4e307fabf 100644 --- a/octavia/tests/unit/amphorae/backends/utils/test_keepalivedlvs_query.py +++ b/octavia/tests/unit/amphorae/backends/utils/test_keepalivedlvs_query.py @@ -331,7 +331,7 @@ class LvsQueryTestCase(base.TestCase): # the returned resource_ipport_mapping doesn't contains the 'Members' # resources, that means the pool of listener doesn't have a enabled # pool resource, so the pool is not usable, then the pool status will - # return DOWN. + # return UP. mock_get_resource_ipports.return_value = ( { 'Listener': {'id': self.listener_id_v4, @@ -341,7 +341,7 @@ class LvsQueryTestCase(base.TestCase): res = lvs_query.get_udp_listener_pool_status(self.listener_id_v4) expected = {'lvs': { 'uuid': self.pool_id_v4, - 'status': constants.DOWN, + 'status': constants.UP, 'members': {} }} self.assertEqual(expected, res) diff --git a/octavia/tests/unit/common/jinja/lvs/test_lvs_jinja_cfg.py b/octavia/tests/unit/common/jinja/lvs/test_lvs_jinja_cfg.py index 200777ba11..aec53b6c32 100644 --- a/octavia/tests/unit/common/jinja/lvs/test_lvs_jinja_cfg.py +++ b/octavia/tests/unit/common/jinja/lvs/test_lvs_jinja_cfg.py @@ -222,6 +222,50 @@ class TestLvsCfg(base.TestCase): persistence=False, alloc_default_pool=False)) self.assertEqual(exp, rendered_obj) + def test_render_template_udp_with_pool_no_member(self): + exp = ("# Configuration for Loadbalancer sample_loadbalancer_id_1\n" + "# Configuration for Listener sample_listener_id_1\n\n" + "net_namespace amphora-haproxy\n\n" + "virtual_server 10.0.0.2 80 {\n" + " lb_algo rr\n" + " lb_kind NAT\n" + " protocol UDP\n\n\n" + " # Configuration for Pool sample_pool_id_0\n" + "}\n\n") + + rendered_obj = self.udp_jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_listener_tuple( + proto=constants.PROTOCOL_UDP, monitor=False, + persistence=False, alloc_default_pool=True, + sample_default_pool=0)) + self.assertEqual(exp, rendered_obj) + + def test_render_template_udp_with_disabled_pool(self): + exp = ("# Configuration for Loadbalancer sample_loadbalancer_id_1\n" + "# Configuration for Listener sample_listener_id_1\n\n" + "net_namespace amphora-haproxy\n\n" + "virtual_server 10.0.0.2 80 {\n" + " lb_algo rr\n" + " lb_kind NAT\n" + " protocol UDP\n\n\n" + " # Pool sample_pool_id_1 is disabled\n" + " # Configuration for Member sample_member_id_1\n" + " real_server 10.0.0.99 82 {\n" + " weight 13\n\n" + " }\n\n" + " # Configuration for Member sample_member_id_2\n" + " real_server 10.0.0.98 82 {\n" + " weight 13\n\n" + " }\n\n" + "}\n\n") + + rendered_obj = self.udp_jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_listener_tuple( + proto=constants.PROTOCOL_UDP, monitor=False, + persistence=False, alloc_default_pool=True, + pool_enabled=False)) + self.assertEqual(exp, rendered_obj) + def test_udp_transform_session_persistence(self): persistence_src_ip = ( sample_configs_combined.sample_session_persistence_tuple( diff --git a/octavia/tests/unit/common/sample_configs/sample_configs_combined.py b/octavia/tests/unit/common/sample_configs/sample_configs_combined.py index 844d205ba3..9e6fc491d0 100644 --- a/octavia/tests/unit/common/sample_configs/sample_configs_combined.py +++ b/octavia/tests/unit/common/sample_configs/sample_configs_combined.py @@ -602,7 +602,9 @@ def sample_listener_tuple(proto=None, monitor=True, alloc_default_pool=True, id='sample_listener_id_1', recursive_nest=False, provisioning_status=constants.ACTIVE, tls_ciphers=constants.CIPHERS_OWASP_SUITE_B, - backend_tls_ciphers=None): + backend_tls_ciphers=None, + sample_default_pool=1, + pool_enabled=True): proto = 'HTTP' if proto is None else proto if be_proto is None: be_proto = 'HTTP' if proto == 'TERMINATED_HTTPS' else proto @@ -634,7 +636,8 @@ def sample_listener_tuple(proto=None, monitor=True, alloc_default_pool=True, pool_crl=pool_crl, tls_enabled=tls_enabled, hm_host_http_check=hm_host_http_check, listener_id='sample_listener_id_1', - tls_ciphers=backend_tls_ciphers), + tls_ciphers=backend_tls_ciphers, + enabled=pool_enabled), sample_pool_tuple( proto=be_proto, monitor=monitor, persistence=persistence, persistence_type=persistence_type, @@ -644,7 +647,8 @@ def sample_listener_tuple(proto=None, monitor=True, alloc_default_pool=True, pool_crl=pool_crl, tls_enabled=tls_enabled, hm_host_http_check=hm_host_http_check, listener_id='sample_listener_id_1', - tls_ciphers=backend_tls_ciphers)] + tls_ciphers=backend_tls_ciphers, + enabled=pool_enabled)] l7policies = [ sample_l7policy_tuple('sample_l7policy_id_1', sample_policy=1), sample_l7policy_tuple('sample_l7policy_id_2', sample_policy=2), @@ -668,7 +672,8 @@ def sample_listener_tuple(proto=None, monitor=True, alloc_default_pool=True, pool_crl=pool_crl, tls_enabled=tls_enabled, hm_host_http_check=hm_host_http_check, listener_id='sample_listener_id_1', - tls_ciphers=backend_tls_ciphers)] + tls_ciphers=backend_tls_ciphers, + enabled=pool_enabled)] l7policies = [] listener = in_listener( id=id, @@ -692,7 +697,9 @@ def sample_listener_tuple(proto=None, monitor=True, alloc_default_pool=True, pool_ca_cert=pool_ca_cert, pool_crl=pool_crl, tls_enabled=tls_enabled, - hm_host_http_check=hm_host_http_check + hm_host_http_check=hm_host_http_check, + sample_pool=sample_default_pool, + enabled=pool_enabled ) if alloc_default_pool else '', connection_limit=connection_limit, tls_certificate_id='cont_id_1' if tls else '', @@ -777,7 +784,8 @@ def sample_pool_tuple(listener_id=None, proto=None, monitor=True, pool_crl=False, tls_enabled=False, hm_host_http_check=False, provisioning_status=constants.ACTIVE, - tls_ciphers=constants.CIPHERS_OWASP_SUITE_B): + tls_ciphers=constants.CIPHERS_OWASP_SUITE_B, + enabled=True): proto = 'HTTP' if proto is None else proto if not tls_enabled: tls_ciphers = None @@ -799,7 +807,15 @@ def sample_pool_tuple(listener_id=None, proto=None, monitor=True, 'persistence_cookie': persistence_cookie} persis = sample_session_persistence_tuple(**kwargs) mon = None - if sample_pool == 1: + if sample_pool == 0: + id = 'sample_pool_id_0' + members = [] + if monitor is True: + mon = sample_health_monitor_tuple( + proto=monitor_proto, + host_http_check=hm_host_http_check, + expected_codes=monitor_expected_codes) + elif sample_pool == 1: id = 'sample_pool_id_1' members = [sample_member_tuple('sample_member_id_1', '10.0.0.99', monitor_ip_port=monitor_ip_port), @@ -828,7 +844,7 @@ def sample_pool_tuple(listener_id=None, proto=None, monitor=True, members=members, health_monitor=mon, session_persistence=persis if persistence is True else None, - enabled=True, + enabled=enabled, operating_status='ACTIVE', has_http_reuse=has_http_reuse, tls_certificate_id='pool_cont_1' if pool_cert else None, ca_tls_certificate_id='pool_ca_1' if pool_ca_cert else None, diff --git a/releasenotes/notes/fix-empty-udp-pool-status-3171950628898468.yaml b/releasenotes/notes/fix-empty-udp-pool-status-3171950628898468.yaml new file mode 100644 index 0000000000..5c668cbacd --- /dev/null +++ b/releasenotes/notes/fix-empty-udp-pool-status-3171950628898468.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fix an incorrect ``operating_status`` with empty UDP pools. A UDP pool + without any member is now ``ONLINE`` instead of ``OFFLINE``.