Fix operating status for empty UDP pools
Fix empty UDP pools status: a UDP pool without members went OFFLINE
instead of ONLINE.
This commit changes the keepalived configuration with empty pools:
the configuration now contains a virtual_server and a comment about the
existing pool. This comment is used by the get_udp_listener_pool_status
to detect that the pool exists and is not offline.
Story 2007984
Task 40610
Conflicts:
octavia/tests/unit/common/sample_configs/sample_configs_combined.py
Change-Id: I30e23ca13d033d77c8ebdabbfdc7b54556a9466b
(cherry picked from commit 2954370e38
)
This commit is contained in:
parent
4398d0f405
commit
a69562e493
|
@ -221,7 +221,7 @@ def get_udp_listener_pool_status(listener_id):
|
|||
if 'Members' not in resource_ipport_mapping:
|
||||
return {'lvs': {
|
||||
'uuid': resource_ipport_mapping['Pool']['id'],
|
||||
'status': constants.DOWN,
|
||||
'status': constants.UP,
|
||||
'members': {}
|
||||
}}
|
||||
|
||||
|
|
|
@ -65,13 +65,7 @@ MISC_CHECK {
|
|||
{% endmacro %}
|
||||
|
||||
{% macro virtualserver_macro(constants, listener, lb_vip_address, default_pool) %}
|
||||
{% set need_render = [] %}
|
||||
{% if default_pool and default_pool.enabled and default_pool.members %}
|
||||
{% for member in default_pool.members %}
|
||||
{% do need_render.append(member.enabled) %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% if need_render|length > 0 %}
|
||||
{% if default_pool %}
|
||||
virtual_server {{ lb_vip_address }} {{ listener.protocol_port }} {
|
||||
{{ lb_algo_macro(default_pool) }}
|
||||
lb_kind NAT
|
||||
|
@ -93,7 +87,11 @@ virtual_server {{ lb_vip_address }} {{ listener.protocol_port }} {
|
|||
{{ health_monitor_vs_macro(default_pool) }}
|
||||
|
||||
{% if default_pool.protocol.lower() == "udp" %}
|
||||
{% if default_pool.enabled %}
|
||||
# Configuration for Pool {{ default_pool.id }}
|
||||
{% else %}
|
||||
# Pool {{ default_pool.id }} is disabled
|
||||
{% endif %}
|
||||
{% if default_pool.health_monitor and default_pool.health_monitor.enabled %}
|
||||
# Configuration for HealthMonitor {{ default_pool.health_monitor.id }}
|
||||
{% endif %}
|
||||
|
|
|
@ -331,7 +331,7 @@ class LvsQueryTestCase(base.TestCase):
|
|||
# the returned resource_ipport_mapping doesn't contains the 'Members'
|
||||
# resources, that means the pool of listener doesn't have a enabled
|
||||
# pool resource, so the pool is not usable, then the pool status will
|
||||
# return DOWN.
|
||||
# return UP.
|
||||
mock_get_resource_ipports.return_value = (
|
||||
{
|
||||
'Listener': {'id': self.listener_id_v4,
|
||||
|
@ -341,7 +341,7 @@ class LvsQueryTestCase(base.TestCase):
|
|||
res = lvs_query.get_udp_listener_pool_status(self.listener_id_v4)
|
||||
expected = {'lvs': {
|
||||
'uuid': self.pool_id_v4,
|
||||
'status': constants.DOWN,
|
||||
'status': constants.UP,
|
||||
'members': {}
|
||||
}}
|
||||
self.assertEqual(expected, res)
|
||||
|
|
|
@ -222,6 +222,50 @@ class TestLvsCfg(base.TestCase):
|
|||
persistence=False, alloc_default_pool=False))
|
||||
self.assertEqual(exp, rendered_obj)
|
||||
|
||||
def test_render_template_udp_with_pool_no_member(self):
|
||||
exp = ("# Configuration for Loadbalancer sample_loadbalancer_id_1\n"
|
||||
"# Configuration for Listener sample_listener_id_1\n\n"
|
||||
"net_namespace amphora-haproxy\n\n"
|
||||
"virtual_server 10.0.0.2 80 {\n"
|
||||
" lb_algo rr\n"
|
||||
" lb_kind NAT\n"
|
||||
" protocol UDP\n\n\n"
|
||||
" # Configuration for Pool sample_pool_id_0\n"
|
||||
"}\n\n")
|
||||
|
||||
rendered_obj = self.udp_jinja_cfg.render_loadbalancer_obj(
|
||||
sample_configs_combined.sample_listener_tuple(
|
||||
proto=constants.PROTOCOL_UDP, monitor=False,
|
||||
persistence=False, alloc_default_pool=True,
|
||||
sample_default_pool=0))
|
||||
self.assertEqual(exp, rendered_obj)
|
||||
|
||||
def test_render_template_udp_with_disabled_pool(self):
|
||||
exp = ("# Configuration for Loadbalancer sample_loadbalancer_id_1\n"
|
||||
"# Configuration for Listener sample_listener_id_1\n\n"
|
||||
"net_namespace amphora-haproxy\n\n"
|
||||
"virtual_server 10.0.0.2 80 {\n"
|
||||
" lb_algo rr\n"
|
||||
" lb_kind NAT\n"
|
||||
" protocol UDP\n\n\n"
|
||||
" # Pool sample_pool_id_1 is disabled\n"
|
||||
" # Configuration for Member sample_member_id_1\n"
|
||||
" real_server 10.0.0.99 82 {\n"
|
||||
" weight 13\n\n"
|
||||
" }\n\n"
|
||||
" # Configuration for Member sample_member_id_2\n"
|
||||
" real_server 10.0.0.98 82 {\n"
|
||||
" weight 13\n\n"
|
||||
" }\n\n"
|
||||
"}\n\n")
|
||||
|
||||
rendered_obj = self.udp_jinja_cfg.render_loadbalancer_obj(
|
||||
sample_configs_combined.sample_listener_tuple(
|
||||
proto=constants.PROTOCOL_UDP, monitor=False,
|
||||
persistence=False, alloc_default_pool=True,
|
||||
pool_enabled=False))
|
||||
self.assertEqual(exp, rendered_obj)
|
||||
|
||||
def test_udp_transform_session_persistence(self):
|
||||
persistence_src_ip = (
|
||||
sample_configs_combined.sample_session_persistence_tuple(
|
||||
|
|
|
@ -600,7 +600,9 @@ def sample_listener_tuple(proto=None, monitor=True, alloc_default_pool=True,
|
|||
ssl_type_l7=False, pool_cert=False,
|
||||
pool_ca_cert=False, pool_crl=False,
|
||||
tls_enabled=False, hm_host_http_check=False,
|
||||
id='sample_listener_id_1', recursive_nest=False):
|
||||
id='sample_listener_id_1', recursive_nest=False,
|
||||
sample_default_pool=1,
|
||||
pool_enabled=True):
|
||||
proto = 'HTTP' if proto is None else proto
|
||||
if be_proto is None:
|
||||
be_proto = 'HTTP' if proto is 'TERMINATED_HTTPS' else proto
|
||||
|
@ -628,7 +630,8 @@ def sample_listener_tuple(proto=None, monitor=True, alloc_default_pool=True,
|
|||
pool_cert=pool_cert, pool_ca_cert=pool_ca_cert,
|
||||
pool_crl=pool_crl, tls_enabled=tls_enabled,
|
||||
hm_host_http_check=hm_host_http_check,
|
||||
listener_id='sample_listener_id_1'),
|
||||
listener_id='sample_listener_id_1',
|
||||
enabled=pool_enabled),
|
||||
sample_pool_tuple(
|
||||
proto=be_proto, monitor=monitor, persistence=persistence,
|
||||
persistence_type=persistence_type,
|
||||
|
@ -637,7 +640,8 @@ def sample_listener_tuple(proto=None, monitor=True, alloc_default_pool=True,
|
|||
pool_cert=pool_cert, pool_ca_cert=pool_ca_cert,
|
||||
pool_crl=pool_crl, tls_enabled=tls_enabled,
|
||||
hm_host_http_check=hm_host_http_check,
|
||||
listener_id='sample_listener_id_1')]
|
||||
listener_id='sample_listener_id_1',
|
||||
enabled=pool_enabled)]
|
||||
l7policies = [
|
||||
sample_l7policy_tuple('sample_l7policy_id_1', sample_policy=1),
|
||||
sample_l7policy_tuple('sample_l7policy_id_2', sample_policy=2),
|
||||
|
@ -660,7 +664,8 @@ def sample_listener_tuple(proto=None, monitor=True, alloc_default_pool=True,
|
|||
pool_cert=pool_cert, pool_ca_cert=pool_ca_cert,
|
||||
pool_crl=pool_crl, tls_enabled=tls_enabled,
|
||||
hm_host_http_check=hm_host_http_check,
|
||||
listener_id='sample_listener_id_1')]
|
||||
listener_id='sample_listener_id_1',
|
||||
enabled=pool_enabled)]
|
||||
l7policies = []
|
||||
listener = in_listener(
|
||||
id=id,
|
||||
|
@ -683,7 +688,9 @@ def sample_listener_tuple(proto=None, monitor=True, alloc_default_pool=True,
|
|||
pool_ca_cert=pool_ca_cert,
|
||||
pool_crl=pool_crl,
|
||||
tls_enabled=tls_enabled,
|
||||
hm_host_http_check=hm_host_http_check
|
||||
hm_host_http_check=hm_host_http_check,
|
||||
sample_pool=sample_default_pool,
|
||||
enabled=pool_enabled
|
||||
) if alloc_default_pool else '',
|
||||
connection_limit=connection_limit,
|
||||
tls_certificate_id='cont_id_1' if tls else '',
|
||||
|
@ -764,7 +771,8 @@ def sample_pool_tuple(listener_id=None, proto=None, monitor=True,
|
|||
has_http_reuse=True, pool_cert=False, pool_ca_cert=False,
|
||||
pool_crl=False, tls_enabled=False,
|
||||
hm_host_http_check=False,
|
||||
provisioning_status=constants.ACTIVE):
|
||||
provisioning_status=constants.ACTIVE,
|
||||
enabled=True):
|
||||
proto = 'HTTP' if proto is None else proto
|
||||
monitor_proto = proto if monitor_proto is None else monitor_proto
|
||||
in_pool = collections.namedtuple(
|
||||
|
@ -783,7 +791,13 @@ def sample_pool_tuple(listener_id=None, proto=None, monitor=True,
|
|||
'persistence_cookie': persistence_cookie}
|
||||
persis = sample_session_persistence_tuple(**kwargs)
|
||||
mon = None
|
||||
if sample_pool == 1:
|
||||
if sample_pool == 0:
|
||||
id = 'sample_pool_id_0'
|
||||
members = []
|
||||
if monitor is True:
|
||||
mon = sample_health_monitor_tuple(
|
||||
proto=monitor_proto, host_http_check=hm_host_http_check)
|
||||
elif sample_pool == 1:
|
||||
id = 'sample_pool_id_1'
|
||||
members = [sample_member_tuple('sample_member_id_1', '10.0.0.99',
|
||||
monitor_ip_port=monitor_ip_port),
|
||||
|
@ -809,7 +823,7 @@ def sample_pool_tuple(listener_id=None, proto=None, monitor=True,
|
|||
members=members,
|
||||
health_monitor=mon,
|
||||
session_persistence=persis if persistence is True else None,
|
||||
enabled=True,
|
||||
enabled=enabled,
|
||||
operating_status='ACTIVE', has_http_reuse=has_http_reuse,
|
||||
tls_certificate_id='pool_cont_1' if pool_cert else None,
|
||||
ca_tls_certificate_id='pool_ca_1' if pool_ca_cert else None,
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
---
|
||||
fixes:
|
||||
- |
|
||||
Fix an incorrect ``operating_status`` with empty UDP pools. A UDP pool
|
||||
without any member is now ``ONLINE`` instead of ``OFFLINE``.
|
Loading…
Reference in New Issue