From a69562e4932f42ed8be7f8c4715f742cb04927d4 Mon Sep 17 00:00:00 2001 From: Gregory Thiemonge Date: Fri, 20 Nov 2020 08:22:35 +0100 Subject: [PATCH] Fix operating status for empty UDP pools Fix empty UDP pools status: a UDP pool without members went OFFLINE instead of ONLINE. This commit changes the keepalived configuration with empty pools: the configuration now contains a virtual_server and a comment about the existing pool. This comment is used by the get_udp_listener_pool_status to detect that the pool exists and is not offline. Story 2007984 Task 40610 Conflicts: octavia/tests/unit/common/sample_configs/sample_configs_combined.py Change-Id: I30e23ca13d033d77c8ebdabbfdc7b54556a9466b (cherry picked from commit 2954370e386d54a88763128e2dbfb9c4b2bca96d) --- .../backends/utils/keepalivedlvs_query.py | 2 +- octavia/common/jinja/lvs/templates/macros.j2 | 12 +++-- .../utils/test_keepalivedlvs_query.py | 4 +- .../common/jinja/lvs/test_lvs_jinja_cfg.py | 44 +++++++++++++++++++ .../sample_configs/sample_configs_combined.py | 30 +++++++++---- ...mpty-udp-pool-status-3171950628898468.yaml | 5 +++ 6 files changed, 79 insertions(+), 18 deletions(-) create mode 100644 releasenotes/notes/fix-empty-udp-pool-status-3171950628898468.yaml diff --git a/octavia/amphorae/backends/utils/keepalivedlvs_query.py b/octavia/amphorae/backends/utils/keepalivedlvs_query.py index 1a36f1b5ca..5237542f1a 100644 --- a/octavia/amphorae/backends/utils/keepalivedlvs_query.py +++ b/octavia/amphorae/backends/utils/keepalivedlvs_query.py @@ -221,7 +221,7 @@ def get_udp_listener_pool_status(listener_id): if 'Members' not in resource_ipport_mapping: return {'lvs': { 'uuid': resource_ipport_mapping['Pool']['id'], - 'status': constants.DOWN, + 'status': constants.UP, 'members': {} }} diff --git a/octavia/common/jinja/lvs/templates/macros.j2 b/octavia/common/jinja/lvs/templates/macros.j2 index 2c719c3cd4..4035381548 100644 --- a/octavia/common/jinja/lvs/templates/macros.j2 +++ b/octavia/common/jinja/lvs/templates/macros.j2 @@ -65,13 +65,7 @@ MISC_CHECK { {% endmacro %} {% macro virtualserver_macro(constants, listener, lb_vip_address, default_pool) %} -{% set need_render = [] %} -{% if default_pool and default_pool.enabled and default_pool.members %} - {% for member in default_pool.members %} - {% do need_render.append(member.enabled) %} - {% endfor %} -{% endif %} -{% if need_render|length > 0 %} +{% if default_pool %} virtual_server {{ lb_vip_address }} {{ listener.protocol_port }} { {{ lb_algo_macro(default_pool) }} lb_kind NAT @@ -93,7 +87,11 @@ virtual_server {{ lb_vip_address }} {{ listener.protocol_port }} { {{ health_monitor_vs_macro(default_pool) }} {% if default_pool.protocol.lower() == "udp" %} + {% if default_pool.enabled %} # Configuration for Pool {{ default_pool.id }} + {% else %} + # Pool {{ default_pool.id }} is disabled + {% endif %} {% if default_pool.health_monitor and default_pool.health_monitor.enabled %} # Configuration for HealthMonitor {{ default_pool.health_monitor.id }} {% endif %} diff --git a/octavia/tests/unit/amphorae/backends/utils/test_keepalivedlvs_query.py b/octavia/tests/unit/amphorae/backends/utils/test_keepalivedlvs_query.py index 0ed767f644..106c2a942b 100644 --- a/octavia/tests/unit/amphorae/backends/utils/test_keepalivedlvs_query.py +++ b/octavia/tests/unit/amphorae/backends/utils/test_keepalivedlvs_query.py @@ -331,7 +331,7 @@ class LvsQueryTestCase(base.TestCase): # the returned resource_ipport_mapping doesn't contains the 'Members' # resources, that means the pool of listener doesn't have a enabled # pool resource, so the pool is not usable, then the pool status will - # return DOWN. + # return UP. mock_get_resource_ipports.return_value = ( { 'Listener': {'id': self.listener_id_v4, @@ -341,7 +341,7 @@ class LvsQueryTestCase(base.TestCase): res = lvs_query.get_udp_listener_pool_status(self.listener_id_v4) expected = {'lvs': { 'uuid': self.pool_id_v4, - 'status': constants.DOWN, + 'status': constants.UP, 'members': {} }} self.assertEqual(expected, res) diff --git a/octavia/tests/unit/common/jinja/lvs/test_lvs_jinja_cfg.py b/octavia/tests/unit/common/jinja/lvs/test_lvs_jinja_cfg.py index 83a0c3bd71..b57baee498 100644 --- a/octavia/tests/unit/common/jinja/lvs/test_lvs_jinja_cfg.py +++ b/octavia/tests/unit/common/jinja/lvs/test_lvs_jinja_cfg.py @@ -222,6 +222,50 @@ class TestLvsCfg(base.TestCase): persistence=False, alloc_default_pool=False)) self.assertEqual(exp, rendered_obj) + def test_render_template_udp_with_pool_no_member(self): + exp = ("# Configuration for Loadbalancer sample_loadbalancer_id_1\n" + "# Configuration for Listener sample_listener_id_1\n\n" + "net_namespace amphora-haproxy\n\n" + "virtual_server 10.0.0.2 80 {\n" + " lb_algo rr\n" + " lb_kind NAT\n" + " protocol UDP\n\n\n" + " # Configuration for Pool sample_pool_id_0\n" + "}\n\n") + + rendered_obj = self.udp_jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_listener_tuple( + proto=constants.PROTOCOL_UDP, monitor=False, + persistence=False, alloc_default_pool=True, + sample_default_pool=0)) + self.assertEqual(exp, rendered_obj) + + def test_render_template_udp_with_disabled_pool(self): + exp = ("# Configuration for Loadbalancer sample_loadbalancer_id_1\n" + "# Configuration for Listener sample_listener_id_1\n\n" + "net_namespace amphora-haproxy\n\n" + "virtual_server 10.0.0.2 80 {\n" + " lb_algo rr\n" + " lb_kind NAT\n" + " protocol UDP\n\n\n" + " # Pool sample_pool_id_1 is disabled\n" + " # Configuration for Member sample_member_id_1\n" + " real_server 10.0.0.99 82 {\n" + " weight 13\n\n" + " }\n\n" + " # Configuration for Member sample_member_id_2\n" + " real_server 10.0.0.98 82 {\n" + " weight 13\n\n" + " }\n\n" + "}\n\n") + + rendered_obj = self.udp_jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_listener_tuple( + proto=constants.PROTOCOL_UDP, monitor=False, + persistence=False, alloc_default_pool=True, + pool_enabled=False)) + self.assertEqual(exp, rendered_obj) + def test_udp_transform_session_persistence(self): persistence_src_ip = ( sample_configs_combined.sample_session_persistence_tuple( diff --git a/octavia/tests/unit/common/sample_configs/sample_configs_combined.py b/octavia/tests/unit/common/sample_configs/sample_configs_combined.py index 57f145141a..d59949f944 100644 --- a/octavia/tests/unit/common/sample_configs/sample_configs_combined.py +++ b/octavia/tests/unit/common/sample_configs/sample_configs_combined.py @@ -600,7 +600,9 @@ def sample_listener_tuple(proto=None, monitor=True, alloc_default_pool=True, ssl_type_l7=False, pool_cert=False, pool_ca_cert=False, pool_crl=False, tls_enabled=False, hm_host_http_check=False, - id='sample_listener_id_1', recursive_nest=False): + id='sample_listener_id_1', recursive_nest=False, + sample_default_pool=1, + pool_enabled=True): proto = 'HTTP' if proto is None else proto if be_proto is None: be_proto = 'HTTP' if proto is 'TERMINATED_HTTPS' else proto @@ -628,7 +630,8 @@ def sample_listener_tuple(proto=None, monitor=True, alloc_default_pool=True, pool_cert=pool_cert, pool_ca_cert=pool_ca_cert, pool_crl=pool_crl, tls_enabled=tls_enabled, hm_host_http_check=hm_host_http_check, - listener_id='sample_listener_id_1'), + listener_id='sample_listener_id_1', + enabled=pool_enabled), sample_pool_tuple( proto=be_proto, monitor=monitor, persistence=persistence, persistence_type=persistence_type, @@ -637,7 +640,8 @@ def sample_listener_tuple(proto=None, monitor=True, alloc_default_pool=True, pool_cert=pool_cert, pool_ca_cert=pool_ca_cert, pool_crl=pool_crl, tls_enabled=tls_enabled, hm_host_http_check=hm_host_http_check, - listener_id='sample_listener_id_1')] + listener_id='sample_listener_id_1', + enabled=pool_enabled)] l7policies = [ sample_l7policy_tuple('sample_l7policy_id_1', sample_policy=1), sample_l7policy_tuple('sample_l7policy_id_2', sample_policy=2), @@ -660,7 +664,8 @@ def sample_listener_tuple(proto=None, monitor=True, alloc_default_pool=True, pool_cert=pool_cert, pool_ca_cert=pool_ca_cert, pool_crl=pool_crl, tls_enabled=tls_enabled, hm_host_http_check=hm_host_http_check, - listener_id='sample_listener_id_1')] + listener_id='sample_listener_id_1', + enabled=pool_enabled)] l7policies = [] listener = in_listener( id=id, @@ -683,7 +688,9 @@ def sample_listener_tuple(proto=None, monitor=True, alloc_default_pool=True, pool_ca_cert=pool_ca_cert, pool_crl=pool_crl, tls_enabled=tls_enabled, - hm_host_http_check=hm_host_http_check + hm_host_http_check=hm_host_http_check, + sample_pool=sample_default_pool, + enabled=pool_enabled ) if alloc_default_pool else '', connection_limit=connection_limit, tls_certificate_id='cont_id_1' if tls else '', @@ -764,7 +771,8 @@ def sample_pool_tuple(listener_id=None, proto=None, monitor=True, has_http_reuse=True, pool_cert=False, pool_ca_cert=False, pool_crl=False, tls_enabled=False, hm_host_http_check=False, - provisioning_status=constants.ACTIVE): + provisioning_status=constants.ACTIVE, + enabled=True): proto = 'HTTP' if proto is None else proto monitor_proto = proto if monitor_proto is None else monitor_proto in_pool = collections.namedtuple( @@ -783,7 +791,13 @@ def sample_pool_tuple(listener_id=None, proto=None, monitor=True, 'persistence_cookie': persistence_cookie} persis = sample_session_persistence_tuple(**kwargs) mon = None - if sample_pool == 1: + if sample_pool == 0: + id = 'sample_pool_id_0' + members = [] + if monitor is True: + mon = sample_health_monitor_tuple( + proto=monitor_proto, host_http_check=hm_host_http_check) + elif sample_pool == 1: id = 'sample_pool_id_1' members = [sample_member_tuple('sample_member_id_1', '10.0.0.99', monitor_ip_port=monitor_ip_port), @@ -809,7 +823,7 @@ def sample_pool_tuple(listener_id=None, proto=None, monitor=True, members=members, health_monitor=mon, session_persistence=persis if persistence is True else None, - enabled=True, + enabled=enabled, operating_status='ACTIVE', has_http_reuse=has_http_reuse, tls_certificate_id='pool_cont_1' if pool_cert else None, ca_tls_certificate_id='pool_ca_1' if pool_ca_cert else None, diff --git a/releasenotes/notes/fix-empty-udp-pool-status-3171950628898468.yaml b/releasenotes/notes/fix-empty-udp-pool-status-3171950628898468.yaml new file mode 100644 index 0000000000..5c668cbacd --- /dev/null +++ b/releasenotes/notes/fix-empty-udp-pool-status-3171950628898468.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fix an incorrect ``operating_status`` with empty UDP pools. A UDP pool + without any member is now ``ONLINE`` instead of ``OFFLINE``.