diff --git a/octavia/amphorae/backends/utils/keepalivedlvs_query.py b/octavia/amphorae/backends/utils/keepalivedlvs_query.py index 09ab941646..75bbdf3385 100644 --- a/octavia/amphorae/backends/utils/keepalivedlvs_query.py +++ b/octavia/amphorae/backends/utils/keepalivedlvs_query.py @@ -233,7 +233,7 @@ def get_udp_listener_pool_status(listener_id): elif 'Members' not in resource_ipport_mapping: return {'lvs': { 'uuid': resource_ipport_mapping['Pool']['id'], - 'status': constants.DOWN, + 'status': constants.UP, 'members': {} }} diff --git a/octavia/common/jinja/lvs/templates/macros.j2 b/octavia/common/jinja/lvs/templates/macros.j2 index 2c719c3cd4..4035381548 100644 --- a/octavia/common/jinja/lvs/templates/macros.j2 +++ b/octavia/common/jinja/lvs/templates/macros.j2 @@ -65,13 +65,7 @@ MISC_CHECK { {% endmacro %} {% macro virtualserver_macro(constants, listener, lb_vip_address, default_pool) %} -{% set need_render = [] %} -{% if default_pool and default_pool.enabled and default_pool.members %} - {% for member in default_pool.members %} - {% do need_render.append(member.enabled) %} - {% endfor %} -{% endif %} -{% if need_render|length > 0 %} +{% if default_pool %} virtual_server {{ lb_vip_address }} {{ listener.protocol_port }} { {{ lb_algo_macro(default_pool) }} lb_kind NAT @@ -93,7 +87,11 @@ virtual_server {{ lb_vip_address }} {{ listener.protocol_port }} { {{ health_monitor_vs_macro(default_pool) }} {% if default_pool.protocol.lower() == "udp" %} + {% if default_pool.enabled %} # Configuration for Pool {{ default_pool.id }} + {% else %} + # Pool {{ default_pool.id }} is disabled + {% endif %} {% if default_pool.health_monitor and default_pool.health_monitor.enabled %} # Configuration for HealthMonitor {{ default_pool.health_monitor.id }} {% endif %} diff --git a/octavia/tests/unit/amphorae/backends/utils/test_keepalivedlvs_query.py b/octavia/tests/unit/amphorae/backends/utils/test_keepalivedlvs_query.py index 06f06f090d..291b5a7a6b 100644 --- a/octavia/tests/unit/amphorae/backends/utils/test_keepalivedlvs_query.py +++ b/octavia/tests/unit/amphorae/backends/utils/test_keepalivedlvs_query.py @@ -351,7 +351,7 @@ class LvsQueryTestCase(base.TestCase): # the returned resource_ipport_mapping doesn't contains the 'Members' # resources, that means the pool of listener doesn't have a enabled # pool resource, so the pool is not usable, then the pool status will - # return DOWN. + # return UP. mock_get_resource_ipports.return_value = ( { 'Listener': {'id': self.listener_id_v4, @@ -361,7 +361,7 @@ class LvsQueryTestCase(base.TestCase): res = lvs_query.get_udp_listener_pool_status(self.listener_id_v4) expected = {'lvs': { 'uuid': self.pool_id_v4, - 'status': constants.DOWN, + 'status': constants.UP, 'members': {} }} self.assertEqual(expected, res) diff --git a/octavia/tests/unit/common/jinja/lvs/test_lvs_jinja_cfg.py b/octavia/tests/unit/common/jinja/lvs/test_lvs_jinja_cfg.py index 35a9dbc8d9..1ac9c34f32 100644 --- a/octavia/tests/unit/common/jinja/lvs/test_lvs_jinja_cfg.py +++ b/octavia/tests/unit/common/jinja/lvs/test_lvs_jinja_cfg.py @@ -222,6 +222,50 @@ class TestLvsCfg(base.TestCase): persistence=False, alloc_default_pool=False)) self.assertEqual(exp, rendered_obj) + def test_render_template_udp_with_pool_no_member(self): + exp = ("# Configuration for Loadbalancer sample_loadbalancer_id_1\n" + "# Configuration for Listener sample_listener_id_1\n\n" + "net_namespace amphora-haproxy\n\n" + "virtual_server 10.0.0.2 80 {\n" + " lb_algo rr\n" + " lb_kind NAT\n" + " protocol UDP\n\n\n" + " # Configuration for Pool sample_pool_id_0\n" + "}\n\n") + + rendered_obj = self.udp_jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_listener_tuple( + proto=constants.PROTOCOL_UDP, monitor=False, + persistence=False, alloc_default_pool=True, + sample_default_pool=0)) + self.assertEqual(exp, rendered_obj) + + def test_render_template_udp_with_disabled_pool(self): + exp = ("# Configuration for Loadbalancer sample_loadbalancer_id_1\n" + "# Configuration for Listener sample_listener_id_1\n\n" + "net_namespace amphora-haproxy\n\n" + "virtual_server 10.0.0.2 80 {\n" + " lb_algo rr\n" + " lb_kind NAT\n" + " protocol UDP\n\n\n" + " # Pool sample_pool_id_1 is disabled\n" + " # Configuration for Member sample_member_id_1\n" + " real_server 10.0.0.99 82 {\n" + " weight 13\n\n" + " }\n\n" + " # Configuration for Member sample_member_id_2\n" + " real_server 10.0.0.98 82 {\n" + " weight 13\n\n" + " }\n\n" + "}\n\n") + + rendered_obj = self.udp_jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_listener_tuple( + proto=constants.PROTOCOL_UDP, monitor=False, + persistence=False, alloc_default_pool=True, + pool_enabled=False)) + self.assertEqual(exp, rendered_obj) + def test_udp_transform_session_persistence(self): persistence_src_ip = ( sample_configs_combined.sample_session_persistence_tuple( diff --git a/octavia/tests/unit/common/sample_configs/sample_configs_combined.py b/octavia/tests/unit/common/sample_configs/sample_configs_combined.py index 23940001ac..5258d97efe 100644 --- a/octavia/tests/unit/common/sample_configs/sample_configs_combined.py +++ b/octavia/tests/unit/common/sample_configs/sample_configs_combined.py @@ -593,7 +593,9 @@ def sample_listener_tuple(proto=None, monitor=True, alloc_default_pool=True, ssl_type_l7=False, pool_cert=False, pool_ca_cert=False, pool_crl=False, tls_enabled=False, hm_host_http_check=False, - id='sample_listener_id_1', recursive_nest=False): + id='sample_listener_id_1', recursive_nest=False, + sample_default_pool=1, + pool_enabled=True): proto = 'HTTP' if proto is None else proto if be_proto is None: be_proto = 'HTTP' if proto is 'TERMINATED_HTTPS' else proto @@ -621,7 +623,8 @@ def sample_listener_tuple(proto=None, monitor=True, alloc_default_pool=True, pool_cert=pool_cert, pool_ca_cert=pool_ca_cert, pool_crl=pool_crl, tls_enabled=tls_enabled, hm_host_http_check=hm_host_http_check, - listener_id='sample_listener_id_1'), + listener_id='sample_listener_id_1', + enabled=pool_enabled), sample_pool_tuple( proto=be_proto, monitor=monitor, persistence=persistence, persistence_type=persistence_type, @@ -630,7 +633,8 @@ def sample_listener_tuple(proto=None, monitor=True, alloc_default_pool=True, pool_cert=pool_cert, pool_ca_cert=pool_ca_cert, pool_crl=pool_crl, tls_enabled=tls_enabled, hm_host_http_check=hm_host_http_check, - listener_id='sample_listener_id_1')] + listener_id='sample_listener_id_1', + enabled=pool_enabled)] l7policies = [ sample_l7policy_tuple('sample_l7policy_id_1', sample_policy=1), sample_l7policy_tuple('sample_l7policy_id_2', sample_policy=2), @@ -653,7 +657,8 @@ def sample_listener_tuple(proto=None, monitor=True, alloc_default_pool=True, pool_cert=pool_cert, pool_ca_cert=pool_ca_cert, pool_crl=pool_crl, tls_enabled=tls_enabled, hm_host_http_check=hm_host_http_check, - listener_id='sample_listener_id_1')] + listener_id='sample_listener_id_1', + enabled=pool_enabled)] l7policies = [] listener = in_listener( id=id, @@ -676,7 +681,9 @@ def sample_listener_tuple(proto=None, monitor=True, alloc_default_pool=True, pool_ca_cert=pool_ca_cert, pool_crl=pool_crl, tls_enabled=tls_enabled, - hm_host_http_check=hm_host_http_check + hm_host_http_check=hm_host_http_check, + sample_pool=sample_default_pool, + enabled=pool_enabled ) if alloc_default_pool else '', connection_limit=connection_limit, tls_certificate_id='cont_id_1' if tls else '', @@ -757,7 +764,8 @@ def sample_pool_tuple(listener_id=None, proto=None, monitor=True, has_http_reuse=True, pool_cert=False, pool_ca_cert=False, pool_crl=False, tls_enabled=False, hm_host_http_check=False, - provisioning_status=constants.ACTIVE): + provisioning_status=constants.ACTIVE, + enabled=True): proto = 'HTTP' if proto is None else proto monitor_proto = proto if monitor_proto is None else monitor_proto in_pool = collections.namedtuple( @@ -776,7 +784,13 @@ def sample_pool_tuple(listener_id=None, proto=None, monitor=True, 'persistence_cookie': persistence_cookie} persis = sample_session_persistence_tuple(**kwargs) mon = None - if sample_pool == 1: + if sample_pool == 0: + id = 'sample_pool_id_0' + members = [] + if monitor is True: + mon = sample_health_monitor_tuple( + proto=monitor_proto, host_http_check=hm_host_http_check) + elif sample_pool == 1: id = 'sample_pool_id_1' members = [sample_member_tuple('sample_member_id_1', '10.0.0.99', monitor_ip_port=monitor_ip_port), @@ -802,7 +816,7 @@ def sample_pool_tuple(listener_id=None, proto=None, monitor=True, members=members, health_monitor=mon, session_persistence=persis if persistence is True else None, - enabled=True, + enabled=enabled, operating_status='ACTIVE', has_http_reuse=has_http_reuse, tls_certificate_id='pool_cont_1' if pool_cert else None, ca_tls_certificate_id='pool_ca_1' if pool_ca_cert else None, diff --git a/releasenotes/notes/fix-empty-udp-pool-status-3171950628898468.yaml b/releasenotes/notes/fix-empty-udp-pool-status-3171950628898468.yaml new file mode 100644 index 0000000000..5c668cbacd --- /dev/null +++ b/releasenotes/notes/fix-empty-udp-pool-status-3171950628898468.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fix an incorrect ``operating_status`` with empty UDP pools. A UDP pool + without any member is now ``ONLINE`` instead of ``OFFLINE``.