Fix persistence_granularity default value
So far we assiged an ipv4 default value to persistence_granularity when we created a UDP pool with session persistence. This patch adds the condition of ipv6 and assigns its matching netmask value. Story 2009922 Task 44780 Change-Id: I21746515e4ceb0352b4e77708f58873e16d4cdfe (cherry picked from commitdd5d3cca6c) (cherry picked from commit3a1e35da43) (cherry picked from commit195394d9c2)
This commit is contained in:
@@ -117,8 +117,10 @@ virtual_server {{ lb_vip_address }} {{ listener.protocol_port }} {
|
|||||||
{% endif %}
|
{% endif %}
|
||||||
{% if default_pool.session_persistence.persistence_granularity %}
|
{% if default_pool.session_persistence.persistence_granularity %}
|
||||||
persistence_granularity {{ default_pool.session_persistence.persistence_granularity }}
|
persistence_granularity {{ default_pool.session_persistence.persistence_granularity }}
|
||||||
{% else %}
|
{% elif ip_version == 4 %}
|
||||||
persistence_granularity 255.255.255.255
|
persistence_granularity 255.255.255.255
|
||||||
|
{% else %}
|
||||||
|
persistence_granularity 128
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{{ health_monitor_vs_macro(default_pool) }}
|
{{ health_monitor_vs_macro(default_pool) }}
|
||||||
|
|||||||
@@ -83,6 +83,58 @@ class TestLvsCfg(base.TestCase):
|
|||||||
connection_limit=98))
|
connection_limit=98))
|
||||||
self.assertEqual(exp, rendered_obj)
|
self.assertEqual(exp, rendered_obj)
|
||||||
|
|
||||||
|
def test_render_template_udp_ipv6_session_persistence_default_values(self):
|
||||||
|
# The session persistence default values refer to
|
||||||
|
# persistence_timeout and persistence_granularity
|
||||||
|
exp = ("# Configuration for Loadbalancer sample_loadbalancer_id_1\n"
|
||||||
|
"# Configuration for Listener sample_listener_id_1\n\n"
|
||||||
|
"net_namespace amphora-haproxy\n\n"
|
||||||
|
"virtual_server 2001:db8::2 80 {\n"
|
||||||
|
" lb_algo wrr\n"
|
||||||
|
" lb_kind NAT\n"
|
||||||
|
" protocol UDP\n"
|
||||||
|
" persistence_timeout 360\n"
|
||||||
|
" persistence_granularity 128\n"
|
||||||
|
" delay_loop 30\n"
|
||||||
|
" delay_before_retry 30\n"
|
||||||
|
" retry 3\n\n\n"
|
||||||
|
" # Configuration for Pool sample_pool_id_1\n"
|
||||||
|
" # Configuration for HealthMonitor sample_monitor_id_1\n"
|
||||||
|
" # Configuration for Member sample_member_id_1\n"
|
||||||
|
" real_server 10.0.0.99 82 {\n"
|
||||||
|
" weight 13\n"
|
||||||
|
" uthreshold 98\n"
|
||||||
|
" MISC_CHECK {\n"
|
||||||
|
" misc_path \"/var/lib/octavia/lvs/check/"
|
||||||
|
"udp_check.sh 10.0.0.99 82\"\n"
|
||||||
|
" misc_timeout 31\n"
|
||||||
|
" }\n"
|
||||||
|
" }\n\n"
|
||||||
|
" # Configuration for Member sample_member_id_2\n"
|
||||||
|
" real_server 10.0.0.98 82 {\n"
|
||||||
|
" weight 13\n"
|
||||||
|
" uthreshold 98\n"
|
||||||
|
" MISC_CHECK {\n"
|
||||||
|
" misc_path \"/var/lib/octavia/lvs/check/"
|
||||||
|
"udp_check.sh 10.0.0.98 82\"\n"
|
||||||
|
" misc_timeout 31\n"
|
||||||
|
" }\n"
|
||||||
|
" }\n\n"
|
||||||
|
"}\n\n")
|
||||||
|
udp_sample = sample_configs_combined.sample_lb_with_udp_listener_tuple(
|
||||||
|
listeners=[sample_configs_combined.sample_listener_tuple(
|
||||||
|
proto=constants.PROTOCOL_UDP,
|
||||||
|
persistence_type=constants.SESSION_PERSISTENCE_SOURCE_IP,
|
||||||
|
monitor_proto=constants.HEALTH_MONITOR_UDP_CONNECT,
|
||||||
|
connection_limit=98)]
|
||||||
|
)
|
||||||
|
udp_listener = udp_sample.listeners[0]
|
||||||
|
ipv6_lb = sample_configs_combined.sample_listener_loadbalancer_tuple(
|
||||||
|
vip=sample_configs_combined.sample_vip_tuple('2001:db8::2'))
|
||||||
|
udp_listener = udp_listener._replace(load_balancer=ipv6_lb)
|
||||||
|
rendered_obj = self.lvs_jinja_cfg.render_loadbalancer_obj(udp_listener)
|
||||||
|
self.assertEqual(exp, rendered_obj)
|
||||||
|
|
||||||
def test_render_template_udp_one_packet(self):
|
def test_render_template_udp_one_packet(self):
|
||||||
exp = ("# Configuration for Loadbalancer sample_loadbalancer_id_1\n"
|
exp = ("# Configuration for Loadbalancer sample_loadbalancer_id_1\n"
|
||||||
"# Configuration for Listener sample_listener_id_1\n\n"
|
"# Configuration for Listener sample_listener_id_1\n\n"
|
||||||
|
|||||||
@@ -574,7 +574,7 @@ RET_SCTP_LISTENER = {
|
|||||||
|
|
||||||
|
|
||||||
def sample_listener_loadbalancer_tuple(
|
def sample_listener_loadbalancer_tuple(
|
||||||
topology=None, enabled=True, pools=None):
|
topology=None, enabled=True, vip=None, pools=None):
|
||||||
if topology and topology in ['ACTIVE_STANDBY', 'ACTIVE_ACTIVE']:
|
if topology and topology in ['ACTIVE_STANDBY', 'ACTIVE_ACTIVE']:
|
||||||
more_amp = True
|
more_amp = True
|
||||||
else:
|
else:
|
||||||
@@ -586,7 +586,7 @@ def sample_listener_loadbalancer_tuple(
|
|||||||
return in_lb(
|
return in_lb(
|
||||||
id='sample_loadbalancer_id_1',
|
id='sample_loadbalancer_id_1',
|
||||||
name='test-lb',
|
name='test-lb',
|
||||||
vip=sample_vip_tuple(),
|
vip=vip or sample_vip_tuple(),
|
||||||
amphorae=[sample_amphora_tuple(role=constants.ROLE_MASTER),
|
amphorae=[sample_amphora_tuple(role=constants.ROLE_MASTER),
|
||||||
sample_amphora_tuple(
|
sample_amphora_tuple(
|
||||||
id='sample_amphora_id_2',
|
id='sample_amphora_id_2',
|
||||||
@@ -603,13 +603,13 @@ def sample_listener_loadbalancer_tuple(
|
|||||||
|
|
||||||
|
|
||||||
def sample_lb_with_udp_listener_tuple(
|
def sample_lb_with_udp_listener_tuple(
|
||||||
topology=None, enabled=True, pools=None):
|
topology=None, enabled=True, listeners=None, pools=None):
|
||||||
if topology and topology in ['ACTIVE_STANDBY', 'ACTIVE_ACTIVE']:
|
if topology and topology in ['ACTIVE_STANDBY', 'ACTIVE_ACTIVE']:
|
||||||
more_amp = True
|
more_amp = True
|
||||||
else:
|
else:
|
||||||
more_amp = False
|
more_amp = False
|
||||||
topology = constants.TOPOLOGY_SINGLE
|
topology = constants.TOPOLOGY_SINGLE
|
||||||
listeners = [sample_listener_tuple(
|
listeners = listeners or [sample_listener_tuple(
|
||||||
proto=constants.PROTOCOL_UDP,
|
proto=constants.PROTOCOL_UDP,
|
||||||
persistence_type=constants.SESSION_PERSISTENCE_SOURCE_IP,
|
persistence_type=constants.SESSION_PERSISTENCE_SOURCE_IP,
|
||||||
persistence_timeout=33,
|
persistence_timeout=33,
|
||||||
|
|||||||
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
fixes:
|
||||||
|
- |
|
||||||
|
Modified default Keepalived LVS persistence granularity
|
||||||
|
configuration value so it would be ipv6 compatible.
|
||||||
Reference in New Issue
Block a user