Fix persistence_granularity default value

So far we assiged an ipv4 default value to persistence_granularity when
we created a UDP pool with session persistence.

This patch adds the condition of ipv6 and assigns its matching netmask
value.

Story 2009922
Task 44780

Change-Id: I21746515e4ceb0352b4e77708f58873e16d4cdfe
This commit is contained in:
Omer 2022-10-03 10:24:12 +03:00
parent 95adbbe618
commit dd5d3cca6c
4 changed files with 47 additions and 5 deletions

View File

@ -133,8 +133,10 @@ virtual_server group ipv{{ ip_version }}-group {
{% endif %}
{% if default_pool.session_persistence.persistence_granularity %}
persistence_granularity {{ default_pool.session_persistence.persistence_granularity }}
{% else %}
{% elif ip_version == 4 %}
persistence_granularity 255.255.255.255
{% else %}
persistence_granularity 128
{% endif %}
{% endif %}
{{ health_monitor_vs_macro(default_pool) }}

View File

@ -86,6 +86,40 @@ class TestLvsCfg(base.TestCase):
connection_limit=98))
self.assertEqual(exp, rendered_obj)
def test_render_template_udp_ipv6_session_persistence_default_values(self):
# The session persistence default values refer to
# persistence_timeout and persistence_granularity
exp = ("# Configuration for Loadbalancer sample_loadbalancer_id_1\n"
"# Configuration for Listener sample_listener_id_1\n\n"
"net_namespace amphora-haproxy\n\n"
"virtual_server_group ipv6-group {\n"
" 2001:db8::2 80\n"
"}\n\n"
"virtual_server group ipv6-group {\n"
" lb_algo wrr\n"
" lb_kind NAT\n"
" protocol UDP\n"
" persistence_timeout 360\n"
" persistence_granularity 128\n"
" delay_loop 30\n"
" delay_before_retry 30\n"
" retry 3\n\n\n"
" # Configuration for Pool sample_pool_id_1\n"
" # Configuration for HealthMonitor sample_monitor_id_1\n"
"}\n\n")
udp_sample = sample_configs_combined.sample_lb_with_udp_listener_tuple(
listeners=[sample_configs_combined.sample_listener_tuple(
proto=constants.PROTOCOL_UDP,
persistence_type=constants.SESSION_PERSISTENCE_SOURCE_IP,
monitor_proto=constants.HEALTH_MONITOR_UDP_CONNECT)]
)
udp_listener = udp_sample.listeners[0]
ipv6_lb = sample_configs_combined.sample_listener_loadbalancer_tuple(
vip=sample_configs_combined.sample_vip_tuple('2001:db8::2'))
udp_listener = udp_listener._replace(load_balancer=ipv6_lb)
rendered_obj = self.lvs_jinja_cfg.render_loadbalancer_obj(udp_listener)
self.assertEqual(exp, rendered_obj)
def test_render_template_udp_one_packet(self):
exp = ("# Configuration for Loadbalancer sample_loadbalancer_id_1\n"
"# Configuration for Listener sample_listener_id_1\n\n"

View File

@ -586,7 +586,8 @@ RET_SCTP_LISTENER = {
def sample_listener_loadbalancer_tuple(
topology=None, enabled=True, pools=None, additional_vips=False):
topology=None, enabled=True, vip=None, pools=None,
additional_vips=False):
if topology and topology in ['ACTIVE_STANDBY', 'ACTIVE_ACTIVE']:
more_amp = True
else:
@ -598,7 +599,7 @@ def sample_listener_loadbalancer_tuple(
return in_lb(
id='sample_loadbalancer_id_1',
name='test-lb',
vip=sample_vip_tuple(),
vip=vip or sample_vip_tuple(),
amphorae=[sample_amphora_tuple(role=constants.ROLE_MASTER),
sample_amphora_tuple(
id='sample_amphora_id_2',
@ -618,13 +619,13 @@ def sample_listener_loadbalancer_tuple(
def sample_lb_with_udp_listener_tuple(
topology=None, enabled=True, pools=None):
topology=None, enabled=True, listeners=None, pools=None):
if topology and topology in ['ACTIVE_STANDBY', 'ACTIVE_ACTIVE']:
more_amp = True
else:
more_amp = False
topology = constants.TOPOLOGY_SINGLE
listeners = [sample_listener_tuple(
listeners = listeners or [sample_listener_tuple(
proto=constants.PROTOCOL_UDP,
persistence_type=constants.SESSION_PERSISTENCE_SOURCE_IP,
persistence_timeout=33,

View File

@ -0,0 +1,5 @@
---
fixes:
- |
Modified default Keepalived LVS persistence granularity
configuration value so it would be ipv6 compatible.