diff --git a/octavia/amphorae/backends/utils/keepalivedlvs_query.py b/octavia/amphorae/backends/utils/keepalivedlvs_query.py index fb02e15b7f..34cc1240c2 100644 --- a/octavia/amphorae/backends/utils/keepalivedlvs_query.py +++ b/octavia/amphorae/backends/utils/keepalivedlvs_query.py @@ -233,7 +233,7 @@ def get_udp_listener_pool_status(listener_id): if 'Members' not in resource_ipport_mapping: return {'lvs': { 'uuid': resource_ipport_mapping['Pool']['id'], - 'status': constants.DOWN, + 'status': constants.UP, 'members': {} }} diff --git a/octavia/common/jinja/lvs/templates/macros.j2 b/octavia/common/jinja/lvs/templates/macros.j2 index 73d5a5cb7d..c8087dbdf4 100644 --- a/octavia/common/jinja/lvs/templates/macros.j2 +++ b/octavia/common/jinja/lvs/templates/macros.j2 @@ -93,13 +93,7 @@ TCP_CHECK { {% endmacro %} {% macro virtualserver_macro(constants, listener, lb_vip_address, default_pool) %} -{% set need_render = [] %} -{% if default_pool and default_pool.enabled and default_pool.members %} - {% for member in default_pool.members %} - {% do need_render.append(member.enabled) %} - {% endfor %} -{% endif %} -{% if need_render|length > 0 %} +{% if default_pool %} virtual_server {{ lb_vip_address }} {{ listener.protocol_port }} { {{ lb_algo_macro(default_pool) }} lb_kind NAT @@ -121,7 +115,11 @@ virtual_server {{ lb_vip_address }} {{ listener.protocol_port }} { {{ health_monitor_vs_macro(default_pool) }} {% if default_pool.protocol.lower() == "udp" %} + {% if default_pool.enabled %} # Configuration for Pool {{ default_pool.id }} + {% else %} + # Pool {{ default_pool.id }} is disabled + {% endif %} {% if default_pool.health_monitor and default_pool.health_monitor.enabled %} # Configuration for HealthMonitor {{ default_pool.health_monitor.id }} {% endif %} diff --git a/octavia/tests/unit/amphorae/backends/utils/test_keepalivedlvs_query.py b/octavia/tests/unit/amphorae/backends/utils/test_keepalivedlvs_query.py index dfb3050306..4d66846052 100644 --- a/octavia/tests/unit/amphorae/backends/utils/test_keepalivedlvs_query.py +++ b/octavia/tests/unit/amphorae/backends/utils/test_keepalivedlvs_query.py @@ -385,7 +385,7 @@ class LvsQueryTestCase(base.TestCase): # the returned resource_ipport_mapping doesn't contains the 'Members' # resources, that means the pool of listener doesn't have a enabled # pool resource, so the pool is not usable, then the pool status will - # return DOWN. + # return UP. mock_get_resource_ipports.return_value = ( { 'Listener': {'id': self.listener_id_v4, @@ -395,7 +395,7 @@ class LvsQueryTestCase(base.TestCase): res = lvs_query.get_udp_listener_pool_status(self.listener_id_v4) expected = {'lvs': { 'uuid': self.pool_id_v4, - 'status': constants.DOWN, + 'status': constants.UP, 'members': {} }} self.assertEqual(expected, res) diff --git a/octavia/tests/unit/common/jinja/lvs/test_lvs_jinja_cfg.py b/octavia/tests/unit/common/jinja/lvs/test_lvs_jinja_cfg.py index 5f9e23c900..c6207beba4 100644 --- a/octavia/tests/unit/common/jinja/lvs/test_lvs_jinja_cfg.py +++ b/octavia/tests/unit/common/jinja/lvs/test_lvs_jinja_cfg.py @@ -222,6 +222,50 @@ class TestLvsCfg(base.TestCase): persistence=False, alloc_default_pool=False)) self.assertEqual(exp, rendered_obj) + def test_render_template_udp_with_pool_no_member(self): + exp = ("# Configuration for Loadbalancer sample_loadbalancer_id_1\n" + "# Configuration for Listener sample_listener_id_1\n\n" + "net_namespace amphora-haproxy\n\n" + "virtual_server 10.0.0.2 80 {\n" + " lb_algo rr\n" + " lb_kind NAT\n" + " protocol UDP\n\n\n" + " # Configuration for Pool sample_pool_id_0\n" + "}\n\n") + + rendered_obj = self.udp_jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_listener_tuple( + proto=constants.PROTOCOL_UDP, monitor=False, + persistence=False, alloc_default_pool=True, + sample_default_pool=0)) + self.assertEqual(exp, rendered_obj) + + def test_render_template_udp_with_disabled_pool(self): + exp = ("# Configuration for Loadbalancer sample_loadbalancer_id_1\n" + "# Configuration for Listener sample_listener_id_1\n\n" + "net_namespace amphora-haproxy\n\n" + "virtual_server 10.0.0.2 80 {\n" + " lb_algo rr\n" + " lb_kind NAT\n" + " protocol UDP\n\n\n" + " # Pool sample_pool_id_1 is disabled\n" + " # Configuration for Member sample_member_id_1\n" + " real_server 10.0.0.99 82 {\n" + " weight 13\n\n" + " }\n\n" + " # Configuration for Member sample_member_id_2\n" + " real_server 10.0.0.98 82 {\n" + " weight 13\n\n" + " }\n\n" + "}\n\n") + + rendered_obj = self.udp_jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_listener_tuple( + proto=constants.PROTOCOL_UDP, monitor=False, + persistence=False, alloc_default_pool=True, + pool_enabled=False)) + self.assertEqual(exp, rendered_obj) + def test_udp_transform_session_persistence(self): persistence_src_ip = ( sample_configs_combined.sample_session_persistence_tuple( diff --git a/octavia/tests/unit/common/sample_configs/sample_configs_combined.py b/octavia/tests/unit/common/sample_configs/sample_configs_combined.py index ca0c111f78..aed4f733dc 100644 --- a/octavia/tests/unit/common/sample_configs/sample_configs_combined.py +++ b/octavia/tests/unit/common/sample_configs/sample_configs_combined.py @@ -609,7 +609,9 @@ def sample_listener_tuple(proto=None, monitor=True, alloc_default_pool=True, backend_tls_versions=constants. TLS_VERSIONS_OWASP_SUITE_B, alpn_protocols=constants. - AMPHORA_SUPPORTED_ALPN_PROTOCOLS): + AMPHORA_SUPPORTED_ALPN_PROTOCOLS, + sample_default_pool=1, + pool_enabled=True): proto = 'HTTP' if proto is None else proto if be_proto is None: be_proto = 'HTTP' if proto == 'TERMINATED_HTTPS' else proto @@ -646,7 +648,8 @@ def sample_listener_tuple(proto=None, monitor=True, alloc_default_pool=True, hm_host_http_check=hm_host_http_check, listener_id='sample_listener_id_1', tls_ciphers=backend_tls_ciphers, - tls_versions=backend_tls_versions), + tls_versions=backend_tls_versions, + enabled=pool_enabled), sample_pool_tuple( proto=be_proto, monitor=monitor, persistence=persistence, persistence_type=persistence_type, @@ -657,7 +660,8 @@ def sample_listener_tuple(proto=None, monitor=True, alloc_default_pool=True, hm_host_http_check=hm_host_http_check, listener_id='sample_listener_id_1', tls_ciphers=backend_tls_ciphers, - tls_versions=None)] + tls_versions=None, + enabled=pool_enabled)] l7policies = [ sample_l7policy_tuple('sample_l7policy_id_1', sample_policy=1), sample_l7policy_tuple('sample_l7policy_id_2', sample_policy=2), @@ -682,7 +686,8 @@ def sample_listener_tuple(proto=None, monitor=True, alloc_default_pool=True, hm_host_http_check=hm_host_http_check, listener_id='sample_listener_id_1', tls_ciphers=backend_tls_ciphers, - tls_versions=backend_tls_versions)] + tls_versions=backend_tls_versions, + enabled=pool_enabled)] l7policies = [] listener = in_listener( id=id, @@ -706,7 +711,9 @@ def sample_listener_tuple(proto=None, monitor=True, alloc_default_pool=True, pool_ca_cert=pool_ca_cert, pool_crl=pool_crl, tls_enabled=tls_enabled, - hm_host_http_check=hm_host_http_check + hm_host_http_check=hm_host_http_check, + sample_pool=sample_default_pool, + enabled=pool_enabled ) if alloc_default_pool else '', connection_limit=connection_limit, tls_certificate_id='cont_id_1' if tls else '', @@ -795,7 +802,8 @@ def sample_pool_tuple(listener_id=None, proto=None, monitor=True, provisioning_status=constants.ACTIVE, tls_ciphers=constants.CIPHERS_OWASP_SUITE_B, tls_versions=constants.TLS_VERSIONS_OWASP_SUITE_B, - lb_algorithm=constants.LB_ALGORITHM_ROUND_ROBIN): + lb_algorithm=constants.LB_ALGORITHM_ROUND_ROBIN, + enabled=True): proto = 'HTTP' if proto is None else proto if not tls_enabled: tls_ciphers = None @@ -818,7 +826,15 @@ def sample_pool_tuple(listener_id=None, proto=None, monitor=True, 'persistence_cookie': persistence_cookie} persis = sample_session_persistence_tuple(**kwargs) mon = None - if sample_pool == 1: + if sample_pool == 0: + id = 'sample_pool_id_0' + members = [] + if monitor is True: + mon = sample_health_monitor_tuple( + proto=monitor_proto, + host_http_check=hm_host_http_check, + expected_codes=monitor_expected_codes) + elif sample_pool == 1: id = 'sample_pool_id_1' members = [sample_member_tuple('sample_member_id_1', '10.0.0.99', monitor_ip_port=monitor_ip_port), @@ -847,7 +863,7 @@ def sample_pool_tuple(listener_id=None, proto=None, monitor=True, members=members, health_monitor=mon, session_persistence=persis if persistence is True else None, - enabled=True, + enabled=enabled, operating_status='ACTIVE', has_http_reuse=has_http_reuse, tls_certificate_id='pool_cont_1' if pool_cert else None, ca_tls_certificate_id='pool_ca_1' if pool_ca_cert else None, diff --git a/releasenotes/notes/fix-empty-udp-pool-status-3171950628898468.yaml b/releasenotes/notes/fix-empty-udp-pool-status-3171950628898468.yaml new file mode 100644 index 0000000000..5c668cbacd --- /dev/null +++ b/releasenotes/notes/fix-empty-udp-pool-status-3171950628898468.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fix an incorrect ``operating_status`` with empty UDP pools. A UDP pool + without any member is now ``ONLINE`` instead of ``OFFLINE``.