diff --git a/octavia/amphorae/drivers/haproxy/rest_api_driver.py b/octavia/amphorae/drivers/haproxy/rest_api_driver.py index dc287337f2..d903ea0e33 100644 --- a/octavia/amphorae/drivers/haproxy/rest_api_driver.py +++ b/octavia/amphorae/drivers/haproxy/rest_api_driver.py @@ -62,6 +62,18 @@ class HaproxyAmphoraLoadBalancerDriver( connection_logging=CONF.haproxy_amphora.connection_logging) self.udp_jinja = jinja_udp_cfg.LvsJinjaTemplater() + def _get_haproxy_versions(self, amphora): + """Get major and minor version number from haproxy + + Example: ['1', '6'] + + :returns version_list: A list with the major and minor numbers + """ + + version_string = self.client.get_info(amphora)['haproxy_version'] + + return version_string.split('.')[:2] + def update_amphora_listeners(self, listeners, amphora_index, amphorae, timeout_dict=None): """Update the amphora with a new configuration. @@ -87,6 +99,9 @@ class HaproxyAmphoraLoadBalancerDriver( amp = amphorae[amphora_index] if amp is None or amp.status == consts.DELETED: return + + haproxy_versions = self._get_haproxy_versions(amp) + # TODO(johnsom) remove when we don't have a process per listener for listener in listeners: LOG.debug("%s updating listener %s on amphora %s", @@ -102,9 +117,9 @@ class HaproxyAmphoraLoadBalancerDriver( certs = self._process_tls_certificates(listener) # Generate HaProxy configuration from listener object config = self.jinja.build_config( - host_amphora=amp, - listener=listener, - tls_cert=certs['tls_cert']) + host_amphora=amp, listener=listener, + tls_cert=certs['tls_cert'], + haproxy_versions=haproxy_versions) self.client.upload_config(amp, listener.id, config, timeout_dict=timeout_dict) self.client.reload_listener(amp, listener.id, @@ -137,11 +152,14 @@ class HaproxyAmphoraLoadBalancerDriver( for amp in listener.load_balancer.amphorae: if amp.status != consts.DELETED: + + haproxy_versions = self._get_haproxy_versions(amp) + # Generate HaProxy configuration from listener object config = self.jinja.build_config( - host_amphora=amp, - listener=listener, - tls_cert=certs['tls_cert']) + host_amphora=amp, listener=listener, + tls_cert=certs['tls_cert'], + haproxy_versions=haproxy_versions) self.client.upload_config(amp, listener.id, config) self.client.reload_listener(amp, listener.id) diff --git a/octavia/common/constants.py b/octavia/common/constants.py index 627d01e401..9501ce848e 100644 --- a/octavia/common/constants.py +++ b/octavia/common/constants.py @@ -329,7 +329,6 @@ AMP_LISTENER_UPDATE = 'octavia-amp-listeners-update' GENERATE_SERVER_PEM_TASK = 'GenerateServerPEMTask' # Batch Member Update constants -MEMBERS = 'members' UNORDERED_MEMBER_UPDATES_FLOW = 'octavia-unordered-member-updates-flow' UNORDERED_MEMBER_ACTIVE_FLOW = 'octavia-unordered-member-active-flow' UPDATE_ATTRIBUTES_FLOW = 'octavia-update-attributes-flow' @@ -553,3 +552,6 @@ ENABLE = 'enable' # systemd amphora netns service prefix AMP_NETNS_SVC_PREFIX = 'amphora-netns' + +# Amphora Feature Compatibility +HTTP_REUSE = 'has_http_reuse' diff --git a/octavia/common/jinja/haproxy/jinja_cfg.py b/octavia/common/jinja/haproxy/jinja_cfg.py index a988a4577b..de2a2664e9 100644 --- a/octavia/common/jinja/haproxy/jinja_cfg.py +++ b/octavia/common/jinja/haproxy/jinja_cfg.py @@ -78,7 +78,7 @@ class JinjaTemplater(object): self.connection_logging = connection_logging def build_config(self, host_amphora, listener, tls_cert, - socket_path=None): + haproxy_versions, socket_path=None): """Convert a logical configuration to the HAProxy version :param host_amphora: The Amphora this configuration is hosted on @@ -87,9 +87,19 @@ class JinjaTemplater(object): :param socket_path: The socket path for Haproxy process :return: Rendered configuration """ - return self.render_loadbalancer_obj(host_amphora, listener, - tls_cert=tls_cert, - socket_path=socket_path) + + # Check for any backward compatibility items we need to check + # This is done here for upgrade scenarios where one amp in a + # pair might be running an older amphora version. + + feature_compatibility = {} + # Is it newer than haproxy 1.5? + if not (int(haproxy_versions[0]) < 2 and int(haproxy_versions[1]) < 6): + feature_compatibility[constants.HTTP_REUSE] = True + + return self.render_loadbalancer_obj( + host_amphora, listener, tls_cert=tls_cert, socket_path=socket_path, + feature_compatibility=feature_compatibility) def _get_template(self): """Returns the specified Jinja configuration template.""" @@ -106,8 +116,8 @@ class JinjaTemplater(object): return JINJA_ENV.get_template(os.path.basename(self.haproxy_template)) def render_loadbalancer_obj(self, host_amphora, listener, - tls_cert=None, - socket_path=None): + tls_cert=None, socket_path=None, + feature_compatibility=None): """Renders a templated configuration from a load balancer object :param host_amphora: The Amphora this configuration is hosted on @@ -116,11 +126,13 @@ class JinjaTemplater(object): :param socket_path: The socket path for Haproxy process :return: Rendered configuration """ + feature_compatibility = feature_compatibility or {} loadbalancer = self._transform_loadbalancer( host_amphora, listener.load_balancer, listener, - tls_cert) + tls_cert, + feature_compatibility) if not socket_path: socket_path = '%s/%s.sock' % (self.base_amp_path, listener.id) return self._get_template().render( @@ -132,19 +144,21 @@ class JinjaTemplater(object): constants=constants) def _transform_loadbalancer(self, host_amphora, loadbalancer, listener, - tls_cert): + tls_cert, feature_compatibility): """Transforms a load balancer into an object that will be processed by the templating system """ - t_listener = self._transform_listener(listener, tls_cert) + t_listener = self._transform_listener( + listener, tls_cert, feature_compatibility) ret_value = { 'id': loadbalancer.id, 'vip_address': loadbalancer.vip.ip_address, 'listener': t_listener, 'topology': loadbalancer.topology, 'enabled': loadbalancer.enabled, - 'host_amphora': self._transform_amphora(host_amphora) + 'host_amphora': self._transform_amphora( + host_amphora, feature_compatibility) } # NOTE(sbalukoff): Global connection limit should be a sum of all # listeners' connection limits. Since Octavia presently supports @@ -157,7 +171,7 @@ class JinjaTemplater(object): constants.HAPROXY_MAX_MAXCONN) return ret_value - def _transform_amphora(self, amphora): + def _transform_amphora(self, amphora, feature_compatibility): """Transform an amphora into an object that will be processed by the templating system. @@ -175,7 +189,7 @@ class JinjaTemplater(object): 'vrrp_priority': amphora.vrrp_priority } - def _transform_listener(self, listener, tls_cert): + def _transform_listener(self, listener, tls_cert, feature_compatibility): """Transforms a listener into an object that will be processed by the templating system @@ -213,14 +227,16 @@ class JinjaTemplater(object): ret_value['crt_dir'] = os.path.join(self.base_crt_dir, listener.id) if listener.default_pool: ret_value['default_pool'] = self._transform_pool( - listener.default_pool) - pools = [self._transform_pool(x) for x in listener.pools] + listener.default_pool, feature_compatibility) + pools = [self._transform_pool(x, feature_compatibility) + for x in listener.pools] ret_value['pools'] = pools - l7policies = [self._transform_l7policy(x) for x in listener.l7policies] + l7policies = [self._transform_l7policy(x, feature_compatibility) + for x in listener.l7policies] ret_value['l7policies'] = l7policies return ret_value - def _transform_pool(self, pool): + def _transform_pool(self, pool, feature_compatibility): """Transforms a pool into an object that will be processed by the templating system @@ -234,21 +250,24 @@ class JinjaTemplater(object): 'session_persistence': '', 'enabled': pool.enabled, 'operating_status': pool.operating_status, - 'stick_size': CONF.haproxy_amphora.haproxy_stick_size + 'stick_size': CONF.haproxy_amphora.haproxy_stick_size, + constants.HTTP_REUSE: feature_compatibility.get( + constants.HTTP_REUSE, False) } - members = [self._transform_member(x) for x in pool.members] + members = [self._transform_member(x, feature_compatibility) + for x in pool.members] ret_value['members'] = members if pool.health_monitor: ret_value['health_monitor'] = self._transform_health_monitor( - pool.health_monitor) + pool.health_monitor, feature_compatibility) if pool.session_persistence: ret_value[ 'session_persistence'] = self._transform_session_persistence( - pool.session_persistence) + pool.session_persistence, feature_compatibility) return ret_value @staticmethod - def _transform_session_persistence(persistence): + def _transform_session_persistence(persistence, feature_compatibility): """Transforms session persistence into an object that will be processed by the templating system @@ -259,7 +278,7 @@ class JinjaTemplater(object): } @staticmethod - def _transform_member(member): + def _transform_member(member, feature_compatibility): """Transforms a member into an object that will be processed by the templating system @@ -277,7 +296,7 @@ class JinjaTemplater(object): 'backup': member.backup } - def _transform_health_monitor(self, monitor): + def _transform_health_monitor(self, monitor, feature_compatibility): """Transforms a health monitor into an object that will be processed by the templating system @@ -299,7 +318,7 @@ class JinjaTemplater(object): 'enabled': monitor.enabled, } - def _transform_l7policy(self, l7policy): + def _transform_l7policy(self, l7policy, feature_compatibility): """Transforms an L7 policy into an object that will be processed by the templating system @@ -312,15 +331,15 @@ class JinjaTemplater(object): } if l7policy.redirect_pool: ret_value['redirect_pool'] = self._transform_pool( - l7policy.redirect_pool) + l7policy.redirect_pool, feature_compatibility) else: ret_value['redirect_pool'] = None - l7rules = [self._transform_l7rule(x) for x in l7policy.l7rules - if x.enabled] + l7rules = [self._transform_l7rule(x, feature_compatibility) + for x in l7policy.l7rules if x.enabled] ret_value['l7rules'] = l7rules return ret_value - def _transform_l7rule(self, l7rule): + def _transform_l7rule(self, l7rule, feature_compatibility): """Transforms an L7 rule into an object that will be processed by the templating system diff --git a/octavia/common/jinja/haproxy/templates/macros.j2 b/octavia/common/jinja/haproxy/templates/macros.j2 index f74954609a..033a2fca05 100644 --- a/octavia/common/jinja/haproxy/templates/macros.j2 +++ b/octavia/common/jinja/haproxy/templates/macros.j2 @@ -203,7 +203,8 @@ backend {{ pool.id }} {% else %} mode {{ pool.protocol }} {% endif %} - {% if (pool.protocol.lower() == constants.PROTOCOL_HTTP.lower() or + {% if pool.get(constants.HTTP_REUSE, False) and ( + pool.protocol.lower() == constants.PROTOCOL_HTTP.lower() or (pool.protocol.lower() == constants.PROTOCOL_PROXY.lower() and listener.protocol_mode.lower() == constants.PROTOCOL_HTTP.lower()))%} diff --git a/octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver.py b/octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver.py index a79de0cf21..300e88766f 100644 --- a/octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver.py +++ b/octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver.py @@ -367,6 +367,14 @@ class TestHaproxyAmphoraLoadBalancerDriverTest(base.TestCase): self.driver.client.get_interface.assert_called_once_with( self.amp, self.amp.vrrp_ip, timeout_dict=None) + def test_get_haproxy_versions(self): + ref_versions = ['1', '6'] + self.driver.client.get_info.return_value = { + 'haproxy_version': u'1.6.3-1ubuntu0.1'} + result = self.driver._get_haproxy_versions(self.amp) + self.driver.client.get_info.assert_called_once_with(self.amp) + self.assertEqual(ref_versions, result) + class TestAmphoraAPIClientTest(base.TestCase): diff --git a/octavia/tests/unit/common/jinja/haproxy/test_jinja_cfg.py b/octavia/tests/unit/common/jinja/haproxy/test_jinja_cfg.py index 37c3e41ed8..469821d0ea 100644 --- a/octavia/tests/unit/common/jinja/haproxy/test_jinja_cfg.py +++ b/octavia/tests/unit/common/jinja/haproxy/test_jinja_cfg.py @@ -45,7 +45,6 @@ class TestHaproxyCfg(base.TestCase): maxconn=constants.HAPROXY_MAX_MAXCONN) be = ("backend sample_pool_id_1\n" " mode http\n" - " http-reuse safe\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" @@ -90,7 +89,6 @@ class TestHaproxyCfg(base.TestCase): maxconn=constants.HAPROXY_MAX_MAXCONN) be = ("backend sample_pool_id_1\n" " mode http\n" - " http-reuse safe\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" @@ -124,7 +122,6 @@ class TestHaproxyCfg(base.TestCase): def test_render_template_http(self): be = ("backend sample_pool_id_1\n" " mode http\n" - " http-reuse safe\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" @@ -151,7 +148,6 @@ class TestHaproxyCfg(base.TestCase): def test_render_template_member_backup(self): be = ("backend sample_pool_id_1\n" " mode http\n" - " http-reuse safe\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" @@ -189,7 +185,6 @@ class TestHaproxyCfg(base.TestCase): maxconn=constants.HAPROXY_MAX_MAXCONN) be = ("backend sample_pool_id_1\n" " mode http\n" - " http-reuse safe\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" @@ -226,7 +221,6 @@ class TestHaproxyCfg(base.TestCase): maxconn=constants.HAPROXY_MAX_MAXCONN) be = ("backend sample_pool_id_1\n" " mode http\n" - " http-reuse safe\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" @@ -255,7 +249,6 @@ class TestHaproxyCfg(base.TestCase): def test_render_template_member_monitor_addr_port(self): be = ("backend sample_pool_id_1\n" " mode http\n" - " http-reuse safe\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" @@ -350,7 +343,6 @@ class TestHaproxyCfg(base.TestCase): def test_render_template_no_monitor_http(self): be = ("backend sample_pool_id_1\n" " mode http\n" - " http-reuse safe\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " fullconn {maxconn}\n" @@ -371,7 +363,6 @@ class TestHaproxyCfg(base.TestCase): def test_render_template_disabled_member(self): be = ("backend sample_pool_id_1\n" " mode http\n" - " http-reuse safe\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " fullconn {maxconn}\n" @@ -393,7 +384,6 @@ class TestHaproxyCfg(base.TestCase): def test_render_template_ping_monitor_http(self): be = ("backend sample_pool_id_1\n" " mode http\n" - " http-reuse safe\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" @@ -476,7 +466,6 @@ class TestHaproxyCfg(base.TestCase): def test_render_template_no_persistence_http(self): be = ("backend sample_pool_id_1\n" " mode http\n" - " http-reuse safe\n" " balance roundrobin\n" " fullconn {maxconn}\n" " option allbackups\n" @@ -495,7 +484,6 @@ class TestHaproxyCfg(base.TestCase): def test_render_template_sourceip_persistence(self): be = ("backend sample_pool_id_1\n" " mode http\n" - " http-reuse safe\n" " balance roundrobin\n" " stick-table type ip size 10k\n" " stick on src\n" @@ -522,7 +510,6 @@ class TestHaproxyCfg(base.TestCase): def test_render_template_appcookie_persistence(self): be = ("backend sample_pool_id_1\n" " mode http\n" - " http-reuse safe\n" " balance roundrobin\n" " stick-table type string len 64 size 10k\n" " stick store-response res.cook(JSESSIONID)\n" @@ -628,7 +615,6 @@ class TestHaproxyCfg(base.TestCase): maxconn=constants.HAPROXY_MAX_MAXCONN) be = ("backend sample_pool_id_1\n" " mode http\n" - " http-reuse safe\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" @@ -645,7 +631,6 @@ class TestHaproxyCfg(base.TestCase): "\n" "backend sample_pool_id_2\n" " mode http\n" - " http-reuse safe\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" @@ -667,7 +652,6 @@ class TestHaproxyCfg(base.TestCase): def test_render_template_http_xff(self): be = ("backend sample_pool_id_1\n" " mode http\n" - " http-reuse safe\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" @@ -696,7 +680,6 @@ class TestHaproxyCfg(base.TestCase): def test_render_template_http_xff_xfport(self): be = ("backend sample_pool_id_1\n" " mode http\n" - " http-reuse safe\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" @@ -727,7 +710,6 @@ class TestHaproxyCfg(base.TestCase): def test_render_template_pool_proxy_protocol(self): be = ("backend sample_pool_id_1\n" " mode http\n" - " http-reuse safe\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" @@ -752,75 +734,84 @@ class TestHaproxyCfg(base.TestCase): def test_transform_session_persistence(self): in_persistence = sample_configs.sample_session_persistence_tuple() - ret = self.jinja_cfg._transform_session_persistence(in_persistence) + ret = self.jinja_cfg._transform_session_persistence(in_persistence, {}) self.assertEqual(sample_configs.RET_PERSISTENCE, ret) def test_transform_health_monitor(self): in_persistence = sample_configs.sample_health_monitor_tuple() - ret = self.jinja_cfg._transform_health_monitor(in_persistence) + ret = self.jinja_cfg._transform_health_monitor(in_persistence, {}) self.assertEqual(sample_configs.RET_MONITOR_1, ret) def test_transform_member(self): in_member = sample_configs.sample_member_tuple('sample_member_id_1', '10.0.0.99') - ret = self.jinja_cfg._transform_member(in_member) + ret = self.jinja_cfg._transform_member(in_member, {}) self.assertEqual(sample_configs.RET_MEMBER_1, ret) def test_transform_pool(self): in_pool = sample_configs.sample_pool_tuple() - ret = self.jinja_cfg._transform_pool(in_pool) + ret = self.jinja_cfg._transform_pool(in_pool, {}) self.assertEqual(sample_configs.RET_POOL_1, ret) def test_transform_pool_2(self): in_pool = sample_configs.sample_pool_tuple(sample_pool=2) - ret = self.jinja_cfg._transform_pool(in_pool) + ret = self.jinja_cfg._transform_pool(in_pool, {}) self.assertEqual(sample_configs.RET_POOL_2, ret) + def test_transform_pool_http_reuse(self): + in_pool = sample_configs.sample_pool_tuple(sample_pool=2) + ret = self.jinja_cfg._transform_pool( + in_pool, {constants.HTTP_REUSE: True}) + import copy + expected_config = copy.copy(sample_configs.RET_POOL_2) + expected_config[constants.HTTP_REUSE] = True + self.assertEqual(expected_config, ret) + def test_transform_listener(self): in_listener = sample_configs.sample_listener_tuple() - ret = self.jinja_cfg._transform_listener(in_listener, None) + ret = self.jinja_cfg._transform_listener(in_listener, None, {}) self.assertEqual(sample_configs.RET_LISTENER, ret) def test_transform_listener_with_l7(self): in_listener = sample_configs.sample_listener_tuple(l7=True) - ret = self.jinja_cfg._transform_listener(in_listener, None) + ret = self.jinja_cfg._transform_listener(in_listener, None, {}) self.assertEqual(sample_configs.RET_LISTENER_L7, ret) def test_transform_loadbalancer(self): in_amphora = sample_configs.sample_amphora_tuple() in_listener = sample_configs.sample_listener_tuple() ret = self.jinja_cfg._transform_loadbalancer( - in_amphora, in_listener.load_balancer, in_listener, None) + in_amphora, in_listener.load_balancer, in_listener, None, {}) self.assertEqual(sample_configs.RET_LB, ret) def test_transform_amphora(self): in_amphora = sample_configs.sample_amphora_tuple() - ret = self.jinja_cfg._transform_amphora(in_amphora) + ret = self.jinja_cfg._transform_amphora(in_amphora, {}) self.assertEqual(sample_configs.RET_AMPHORA, ret) def test_transform_loadbalancer_with_l7(self): in_amphora = sample_configs.sample_amphora_tuple() in_listener = sample_configs.sample_listener_tuple(l7=True) ret = self.jinja_cfg._transform_loadbalancer( - in_amphora, in_listener.load_balancer, in_listener, None) + in_amphora, in_listener.load_balancer, in_listener, None, {}) self.assertEqual(sample_configs.RET_LB_L7, ret) def test_transform_l7policy(self): in_l7policy = sample_configs.sample_l7policy_tuple( 'sample_l7policy_id_1') - ret = self.jinja_cfg._transform_l7policy(in_l7policy) + ret = self.jinja_cfg._transform_l7policy(in_l7policy, {}) self.assertEqual(sample_configs.RET_L7POLICY_1, ret) def test_transform_l7policy_2(self): in_l7policy = sample_configs.sample_l7policy_tuple( 'sample_l7policy_id_2', sample_policy=2) - ret = self.jinja_cfg._transform_l7policy(in_l7policy) + ret = self.jinja_cfg._transform_l7policy(in_l7policy, {}) self.assertEqual(sample_configs.RET_L7POLICY_2, ret) def test_transform_l7policy_disabled_rule(self): in_l7policy = sample_configs.sample_l7policy_tuple( 'sample_l7policy_id_6', sample_policy=6) - ret = self.jinja_cfg._transform_l7policy(in_l7policy) + ret = self.jinja_cfg._transform_l7policy(in_l7policy, {}) self.assertEqual(sample_configs.RET_L7POLICY_6, ret) def test_escape_haproxy_config_string(self): @@ -881,3 +872,61 @@ class TestHaproxyCfg(base.TestCase): self.assertEqual( sample_configs.sample_base_expected_config(defaults=defaults), rendered_obj) + + def test_http_reuse(self): + j_cfg = jinja_cfg.JinjaTemplater( + base_amp_path='/var/lib/octavia', + base_crt_dir='/var/lib/octavia/certs') + + # With http-reuse + be = ("backend sample_pool_id_1\n" + " mode http\n" + " http-reuse safe\n" + " balance roundrobin\n" + " cookie SRV insert indirect nocache\n" + " timeout check 31s\n" + " fullconn {maxconn}\n" + " option allbackups\n" + " timeout connect 5000\n" + " timeout server 50000\n" + " server sample_member_id_1 10.0.0.99:82 " + "weight 13 check inter 30s fall 3 rise 2 " + "cookie sample_member_id_1 send-proxy\n" + " server sample_member_id_2 10.0.0.98:82 " + "weight 13 check inter 30s fall 3 rise 2 " + "cookie sample_member_id_2 send-proxy\n\n").format( + maxconn=constants.HAPROXY_MAX_MAXCONN) + rendered_obj = j_cfg.build_config( + sample_configs.sample_amphora_tuple(), + sample_configs.sample_listener_tuple(be_proto='PROXY'), + tls_cert=None, + haproxy_versions=("1", "8", "1")) + self.assertEqual( + sample_configs.sample_base_expected_config(backend=be), + rendered_obj) + + # Without http-reuse + be = ("backend sample_pool_id_1\n" + " mode http\n" + " balance roundrobin\n" + " cookie SRV insert indirect nocache\n" + " timeout check 31s\n" + " fullconn {maxconn}\n" + " option allbackups\n" + " timeout connect 5000\n" + " timeout server 50000\n" + " server sample_member_id_1 10.0.0.99:82 " + "weight 13 check inter 30s fall 3 rise 2 " + "cookie sample_member_id_1 send-proxy\n" + " server sample_member_id_2 10.0.0.98:82 " + "weight 13 check inter 30s fall 3 rise 2 " + "cookie sample_member_id_2 send-proxy\n\n").format( + maxconn=constants.HAPROXY_MAX_MAXCONN) + rendered_obj = j_cfg.build_config( + sample_configs.sample_amphora_tuple(), + sample_configs.sample_listener_tuple(be_proto='PROXY'), + tls_cert=None, + haproxy_versions=("1", "5", "18")) + self.assertEqual( + sample_configs.sample_base_expected_config(backend=be), + rendered_obj) diff --git a/octavia/tests/unit/common/sample_configs/sample_configs.py b/octavia/tests/unit/common/sample_configs/sample_configs.py index 41d2477009..202d74855b 100644 --- a/octavia/tests/unit/common/sample_configs/sample_configs.py +++ b/octavia/tests/unit/common/sample_configs/sample_configs.py @@ -113,7 +113,8 @@ RET_POOL_1 = { 'session_persistence': RET_PERSISTENCE, 'enabled': True, 'operating_status': 'ACTIVE', - 'stick_size': '10k'} + 'stick_size': '10k', + constants.HTTP_REUSE: False} RET_POOL_2 = { 'id': 'sample_pool_id_2', @@ -124,7 +125,8 @@ RET_POOL_2 = { 'session_persistence': RET_PERSISTENCE, 'enabled': True, 'operating_status': 'ACTIVE', - 'stick_size': '10k'} + 'stick_size': '10k', + constants.HTTP_REUSE: False} RET_DEF_TLS_CONT = {'id': 'cont_id_1', 'allencompassingpem': 'imapem', 'primary_cn': 'FakeCn'} @@ -616,12 +618,13 @@ def sample_pool_tuple(proto=None, monitor=True, persistence=True, persistence_timeout=None, persistence_granularity=None, sample_pool=1, monitor_ip_port=False, monitor_proto=None, backup_member=False, - disabled_member=False): + disabled_member=False, has_http_reuse=True): proto = 'HTTP' if proto is None else proto monitor_proto = proto if monitor_proto is None else monitor_proto in_pool = collections.namedtuple( - 'pool', 'id, protocol, lb_algorithm, members, health_monitor,' - 'session_persistence, enabled, operating_status') + 'pool', 'id, protocol, lb_algorithm, members, health_monitor, ' + 'session_persistence, enabled, operating_status, ' + + constants.HTTP_REUSE) if (proto == constants.PROTOCOL_UDP and persistence_type == constants.SESSION_PERSISTENCE_SOURCE_IP): kwargs = {'persistence_type': persistence_type, @@ -656,7 +659,7 @@ def sample_pool_tuple(proto=None, monitor=True, persistence=True, health_monitor=mon, session_persistence=persis if persistence is True else None, enabled=True, - operating_status='ACTIVE') + operating_status='ACTIVE', has_http_reuse=has_http_reuse) def sample_member_tuple(id, ip, enabled=True, operating_status='ACTIVE', @@ -839,7 +842,6 @@ def sample_base_expected_config(frontend=None, backend=None, if backend is None: backend = ("backend sample_pool_id_1\n" " mode http\n" - " http-reuse safe\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" diff --git a/octavia/tests/unit/controller/healthmanager/test_health_manager.py b/octavia/tests/unit/controller/healthmanager/test_health_manager.py index 12f0551c00..ec73cb5eb5 100644 --- a/octavia/tests/unit/controller/healthmanager/test_health_manager.py +++ b/octavia/tests/unit/controller/healthmanager/test_health_manager.py @@ -17,6 +17,7 @@ import threading import mock from oslo_config import cfg +from oslo_config import fixture as oslo_fixture from oslo_db import exception as db_exc from oslo_utils import uuidutils @@ -51,6 +52,8 @@ class TestHealthManager(base.TestCase): @mock.patch('octavia.db.api.get_session') def test_health_check_stale_amphora(self, session_mock, get_stale_amp_mock, failover_mock, db_wait_mock): + conf = oslo_fixture.Config(cfg.CONF) + conf.config(group="health_manager", heartbeat_timeout=5) amphora_health = mock.MagicMock() amphora_health.amphora_id = AMPHORA_ID