From dd5d3cca6cd7ec27cd388d7ce4939c9c38af8c83 Mon Sep 17 00:00:00 2001 From: Omer Date: Mon, 3 Oct 2022 10:24:12 +0300 Subject: [PATCH] Fix persistence_granularity default value So far we assiged an ipv4 default value to persistence_granularity when we created a UDP pool with session persistence. This patch adds the condition of ipv6 and assigns its matching netmask value. Story 2009922 Task 44780 Change-Id: I21746515e4ceb0352b4e77708f58873e16d4cdfe --- octavia/common/jinja/lvs/templates/macros.j2 | 4 ++- .../unit/common/jinja/lvs/test_jinja_cfg.py | 34 +++++++++++++++++++ .../sample_configs/sample_configs_combined.py | 9 ++--- ...larity-default-value-540093bbf6518ed8.yaml | 5 +++ 4 files changed, 47 insertions(+), 5 deletions(-) create mode 100644 releasenotes/notes/fix-persistence-granularity-default-value-540093bbf6518ed8.yaml diff --git a/octavia/common/jinja/lvs/templates/macros.j2 b/octavia/common/jinja/lvs/templates/macros.j2 index b8b7a5ee27..f31008e701 100644 --- a/octavia/common/jinja/lvs/templates/macros.j2 +++ b/octavia/common/jinja/lvs/templates/macros.j2 @@ -133,8 +133,10 @@ virtual_server group ipv{{ ip_version }}-group { {% endif %} {% if default_pool.session_persistence.persistence_granularity %} persistence_granularity {{ default_pool.session_persistence.persistence_granularity }} - {% else %} + {% elif ip_version == 4 %} persistence_granularity 255.255.255.255 + {% else %} + persistence_granularity 128 {% endif %} {% endif %} {{ health_monitor_vs_macro(default_pool) }} diff --git a/octavia/tests/unit/common/jinja/lvs/test_jinja_cfg.py b/octavia/tests/unit/common/jinja/lvs/test_jinja_cfg.py index 11bc836aad..e43e6e818d 100644 --- a/octavia/tests/unit/common/jinja/lvs/test_jinja_cfg.py +++ b/octavia/tests/unit/common/jinja/lvs/test_jinja_cfg.py @@ -86,6 +86,40 @@ class TestLvsCfg(base.TestCase): connection_limit=98)) self.assertEqual(exp, rendered_obj) + def test_render_template_udp_ipv6_session_persistence_default_values(self): + # The session persistence default values refer to + # persistence_timeout and persistence_granularity + exp = ("# Configuration for Loadbalancer sample_loadbalancer_id_1\n" + "# Configuration for Listener sample_listener_id_1\n\n" + "net_namespace amphora-haproxy\n\n" + "virtual_server_group ipv6-group {\n" + " 2001:db8::2 80\n" + "}\n\n" + "virtual_server group ipv6-group {\n" + " lb_algo wrr\n" + " lb_kind NAT\n" + " protocol UDP\n" + " persistence_timeout 360\n" + " persistence_granularity 128\n" + " delay_loop 30\n" + " delay_before_retry 30\n" + " retry 3\n\n\n" + " # Configuration for Pool sample_pool_id_1\n" + " # Configuration for HealthMonitor sample_monitor_id_1\n" + "}\n\n") + udp_sample = sample_configs_combined.sample_lb_with_udp_listener_tuple( + listeners=[sample_configs_combined.sample_listener_tuple( + proto=constants.PROTOCOL_UDP, + persistence_type=constants.SESSION_PERSISTENCE_SOURCE_IP, + monitor_proto=constants.HEALTH_MONITOR_UDP_CONNECT)] + ) + udp_listener = udp_sample.listeners[0] + ipv6_lb = sample_configs_combined.sample_listener_loadbalancer_tuple( + vip=sample_configs_combined.sample_vip_tuple('2001:db8::2')) + udp_listener = udp_listener._replace(load_balancer=ipv6_lb) + rendered_obj = self.lvs_jinja_cfg.render_loadbalancer_obj(udp_listener) + self.assertEqual(exp, rendered_obj) + def test_render_template_udp_one_packet(self): exp = ("# Configuration for Loadbalancer sample_loadbalancer_id_1\n" "# Configuration for Listener sample_listener_id_1\n\n" diff --git a/octavia/tests/unit/common/sample_configs/sample_configs_combined.py b/octavia/tests/unit/common/sample_configs/sample_configs_combined.py index c01e26da72..90d2c4413a 100644 --- a/octavia/tests/unit/common/sample_configs/sample_configs_combined.py +++ b/octavia/tests/unit/common/sample_configs/sample_configs_combined.py @@ -586,7 +586,8 @@ RET_SCTP_LISTENER = { def sample_listener_loadbalancer_tuple( - topology=None, enabled=True, pools=None, additional_vips=False): + topology=None, enabled=True, vip=None, pools=None, + additional_vips=False): if topology and topology in ['ACTIVE_STANDBY', 'ACTIVE_ACTIVE']: more_amp = True else: @@ -598,7 +599,7 @@ def sample_listener_loadbalancer_tuple( return in_lb( id='sample_loadbalancer_id_1', name='test-lb', - vip=sample_vip_tuple(), + vip=vip or sample_vip_tuple(), amphorae=[sample_amphora_tuple(role=constants.ROLE_MASTER), sample_amphora_tuple( id='sample_amphora_id_2', @@ -618,13 +619,13 @@ def sample_listener_loadbalancer_tuple( def sample_lb_with_udp_listener_tuple( - topology=None, enabled=True, pools=None): + topology=None, enabled=True, listeners=None, pools=None): if topology and topology in ['ACTIVE_STANDBY', 'ACTIVE_ACTIVE']: more_amp = True else: more_amp = False topology = constants.TOPOLOGY_SINGLE - listeners = [sample_listener_tuple( + listeners = listeners or [sample_listener_tuple( proto=constants.PROTOCOL_UDP, persistence_type=constants.SESSION_PERSISTENCE_SOURCE_IP, persistence_timeout=33, diff --git a/releasenotes/notes/fix-persistence-granularity-default-value-540093bbf6518ed8.yaml b/releasenotes/notes/fix-persistence-granularity-default-value-540093bbf6518ed8.yaml new file mode 100644 index 0000000000..89298ddb4b --- /dev/null +++ b/releasenotes/notes/fix-persistence-granularity-default-value-540093bbf6518ed8.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Modified default Keepalived LVS persistence granularity + configuration value so it would be ipv6 compatible.