Remove support for split listener configuration
Remove support for Amphora API 0.5 or earlier. During the PTG for the Antelope cycle it has been decided to remove split listener configuration.[1] [1]: https://etherpad.opendev.org/p/antelope-ptg-octavia Change-Id: Ia0c1d4107161fff8d1f3071d57860bfd265f596f
This commit is contained in:
parent
b687bd0a4c
commit
7a2df883a0
@ -125,3 +125,8 @@ class AmpConnectionRetry(AmphoraDriverError):
|
|||||||
|
|
||||||
message = _('Could not connect to amphora, exception caught: '
|
message = _('Could not connect to amphora, exception caught: '
|
||||||
'%(exception)s')
|
'%(exception)s')
|
||||||
|
|
||||||
|
|
||||||
|
class AmpVersionUnsupported(AmphoraDriverError):
|
||||||
|
|
||||||
|
message = _('Amphora version %(version)s is no longer supported.')
|
||||||
|
@ -17,6 +17,7 @@ import hashlib
|
|||||||
import os
|
import os
|
||||||
import ssl
|
import ssl
|
||||||
import time
|
import time
|
||||||
|
from typing import Optional
|
||||||
import warnings
|
import warnings
|
||||||
|
|
||||||
from oslo_context import context as oslo_context
|
from oslo_context import context as oslo_context
|
||||||
@ -33,20 +34,16 @@ from octavia.amphorae.drivers.keepalived import vrrp_rest_driver
|
|||||||
from octavia.common.config import cfg
|
from octavia.common.config import cfg
|
||||||
from octavia.common import constants as consts
|
from octavia.common import constants as consts
|
||||||
import octavia.common.jinja.haproxy.combined_listeners.jinja_cfg as jinja_combo
|
import octavia.common.jinja.haproxy.combined_listeners.jinja_cfg as jinja_combo
|
||||||
import octavia.common.jinja.haproxy.split_listeners.jinja_cfg as jinja_split
|
|
||||||
from octavia.common.jinja.lvs import jinja_cfg as jinja_udp_cfg
|
from octavia.common.jinja.lvs import jinja_cfg as jinja_udp_cfg
|
||||||
from octavia.common.tls_utils import cert_parser
|
from octavia.common.tls_utils import cert_parser
|
||||||
from octavia.common import utils
|
from octavia.common import utils
|
||||||
from octavia.db import api as db_apis
|
from octavia.db import api as db_apis
|
||||||
|
from octavia.db import models as db_models
|
||||||
from octavia.db import repositories as repo
|
from octavia.db import repositories as repo
|
||||||
from octavia.network import data_models as network_models
|
from octavia.network import data_models as network_models
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
API_VERSION = consts.API_VERSION
|
|
||||||
OCTAVIA_API_CLIENT = (
|
|
||||||
"Octavia HaProxy Rest Client/{version} "
|
|
||||||
"(https://wiki.openstack.org/wiki/Octavia)").format(version=API_VERSION)
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
|
|
||||||
|
|
||||||
@ -58,7 +55,6 @@ class HaproxyAmphoraLoadBalancerDriver(
|
|||||||
super().__init__()
|
super().__init__()
|
||||||
self.clients = {
|
self.clients = {
|
||||||
'base': AmphoraAPIClientBase(),
|
'base': AmphoraAPIClientBase(),
|
||||||
'0.5': AmphoraAPIClient0_5(),
|
|
||||||
'1.0': AmphoraAPIClient1_0(),
|
'1.0': AmphoraAPIClient1_0(),
|
||||||
}
|
}
|
||||||
self.cert_manager = stevedore_driver.DriverManager(
|
self.cert_manager = stevedore_driver.DriverManager(
|
||||||
@ -72,11 +68,6 @@ class HaproxyAmphoraLoadBalancerDriver(
|
|||||||
base_crt_dir=CONF.haproxy_amphora.base_cert_dir,
|
base_crt_dir=CONF.haproxy_amphora.base_cert_dir,
|
||||||
haproxy_template=CONF.haproxy_amphora.haproxy_template,
|
haproxy_template=CONF.haproxy_amphora.haproxy_template,
|
||||||
connection_logging=CONF.haproxy_amphora.connection_logging)
|
connection_logging=CONF.haproxy_amphora.connection_logging)
|
||||||
self.jinja_split = jinja_split.JinjaTemplater(
|
|
||||||
base_amp_path=CONF.haproxy_amphora.base_path,
|
|
||||||
base_crt_dir=CONF.haproxy_amphora.base_cert_dir,
|
|
||||||
haproxy_template=CONF.haproxy_amphora.haproxy_template,
|
|
||||||
connection_logging=CONF.haproxy_amphora.connection_logging)
|
|
||||||
self.lvs_jinja = jinja_udp_cfg.LvsJinjaTemplater()
|
self.lvs_jinja = jinja_udp_cfg.LvsJinjaTemplater()
|
||||||
|
|
||||||
def _get_haproxy_versions(self, amphora, timeout_dict=None):
|
def _get_haproxy_versions(self, amphora, timeout_dict=None):
|
||||||
@ -113,7 +104,13 @@ class HaproxyAmphoraLoadBalancerDriver(
|
|||||||
amphora.api_version = '0.5'
|
amphora.api_version = '0.5'
|
||||||
LOG.debug('Amphora %s has API version %s',
|
LOG.debug('Amphora %s has API version %s',
|
||||||
amphora.id, amphora.api_version)
|
amphora.id, amphora.api_version)
|
||||||
return list(map(int, amphora.api_version.split('.')))
|
api_version = list(map(int, amphora.api_version.split('.')))
|
||||||
|
|
||||||
|
if api_version[0] == 0 and api_version[1] <= 5: # 0.5 or earlier
|
||||||
|
raise driver_except.AmpVersionUnsupported(
|
||||||
|
version=amphora.api_version)
|
||||||
|
|
||||||
|
return api_version
|
||||||
|
|
||||||
def update_amphora_listeners(self, loadbalancer, amphora,
|
def update_amphora_listeners(self, loadbalancer, amphora,
|
||||||
timeout_dict=None):
|
timeout_dict=None):
|
||||||
@ -141,17 +138,8 @@ class HaproxyAmphoraLoadBalancerDriver(
|
|||||||
# Check which HAProxy version is on the amp
|
# Check which HAProxy version is on the amp
|
||||||
haproxy_versions = self._get_haproxy_versions(
|
haproxy_versions = self._get_haproxy_versions(
|
||||||
amphora, timeout_dict=timeout_dict)
|
amphora, timeout_dict=timeout_dict)
|
||||||
# Check which config style to use
|
# Check if version is supported
|
||||||
api_version = self._populate_amphora_api_version(amphora)
|
self._populate_amphora_api_version(amphora)
|
||||||
if api_version[0] == 0 and api_version[1] <= 5: # 0.5 or earlier
|
|
||||||
split_config = True
|
|
||||||
LOG.warning(
|
|
||||||
'Amphora %s for loadbalancer %s needs upgrade to single '
|
|
||||||
'process mode.', amphora.id, loadbalancer.id)
|
|
||||||
else:
|
|
||||||
split_config = False
|
|
||||||
LOG.debug('Amphora %s for loadbalancer %s is already in single '
|
|
||||||
'process mode.', amphora.id, loadbalancer.id)
|
|
||||||
|
|
||||||
has_tcp = False
|
has_tcp = False
|
||||||
certs = {}
|
certs = {}
|
||||||
@ -168,9 +156,6 @@ class HaproxyAmphoraLoadBalancerDriver(
|
|||||||
amphora, listener.id, timeout_dict=timeout_dict)
|
amphora, listener.id, timeout_dict=timeout_dict)
|
||||||
else:
|
else:
|
||||||
has_tcp = True
|
has_tcp = True
|
||||||
if split_config:
|
|
||||||
obj_id = listener.id
|
|
||||||
else:
|
|
||||||
obj_id = loadbalancer.id
|
obj_id = loadbalancer.id
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -192,20 +177,6 @@ class HaproxyAmphoraLoadBalancerDriver(
|
|||||||
certs.update(self._process_listener_pool_certs(
|
certs.update(self._process_listener_pool_certs(
|
||||||
listener, amphora, obj_id))
|
listener, amphora, obj_id))
|
||||||
|
|
||||||
if split_config:
|
|
||||||
config = self.jinja_split.build_config(
|
|
||||||
host_amphora=amphora, listener=listener,
|
|
||||||
haproxy_versions=haproxy_versions,
|
|
||||||
client_ca_filename=certs[
|
|
||||||
listener.client_ca_tls_certificate_id],
|
|
||||||
client_crl=certs[listener.client_crl_container_id],
|
|
||||||
pool_tls_certs=certs)
|
|
||||||
self.clients[amphora.api_version].upload_config(
|
|
||||||
amphora, listener.id, config,
|
|
||||||
timeout_dict=timeout_dict)
|
|
||||||
self.clients[amphora.api_version].reload_listener(
|
|
||||||
amphora, listener.id, timeout_dict=timeout_dict)
|
|
||||||
else:
|
|
||||||
listeners_to_update.append(listener)
|
listeners_to_update.append(listener)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.exception('Unable to update listener %s due to '
|
LOG.exception('Unable to update listener %s due to '
|
||||||
@ -216,7 +187,7 @@ class HaproxyAmphoraLoadBalancerDriver(
|
|||||||
provisioning_status=consts.ERROR,
|
provisioning_status=consts.ERROR,
|
||||||
operating_status=consts.ERROR)
|
operating_status=consts.ERROR)
|
||||||
|
|
||||||
if has_tcp and not split_config:
|
if has_tcp:
|
||||||
if listeners_to_update:
|
if listeners_to_update:
|
||||||
# Generate HaProxy configuration from listener object
|
# Generate HaProxy configuration from listener object
|
||||||
amp_details = self.clients[amphora.api_version].get_details(
|
amp_details = self.clients[amphora.api_version].get_details(
|
||||||
@ -272,25 +243,10 @@ class HaproxyAmphoraLoadBalancerDriver(
|
|||||||
else:
|
else:
|
||||||
amphorae = [amphora]
|
amphorae = [amphora]
|
||||||
|
|
||||||
timeout_dict = args[0]
|
|
||||||
|
|
||||||
for amp in amphorae:
|
for amp in amphorae:
|
||||||
if amp.status != consts.DELETED:
|
if amp.status != consts.DELETED:
|
||||||
api_version = self._populate_amphora_api_version(
|
self._populate_amphora_api_version(
|
||||||
amp, timeout_dict=timeout_dict)
|
amp, timeout_dict=args[0])
|
||||||
# Check which config style to use
|
|
||||||
if api_version[0] == 0 and api_version[1] <= 5:
|
|
||||||
# 0.5 or earlier
|
|
||||||
LOG.warning(
|
|
||||||
'Amphora %s for loadbalancer %s needs upgrade to '
|
|
||||||
'single process mode.', amp.id, loadbalancer.id)
|
|
||||||
for listener in loadbalancer.listeners:
|
|
||||||
getattr(self.clients[amp.api_version], func_name)(
|
|
||||||
amp, listener.id, *args)
|
|
||||||
else:
|
|
||||||
LOG.debug(
|
|
||||||
'Amphora %s for loadbalancer %s is already in single '
|
|
||||||
'process mode.', amp.id, loadbalancer.id)
|
|
||||||
has_tcp = False
|
has_tcp = False
|
||||||
for listener in loadbalancer.listeners:
|
for listener in loadbalancer.listeners:
|
||||||
if listener.protocol in consts.LVS_PROTOCOLS:
|
if listener.protocol in consts.LVS_PROTOCOLS:
|
||||||
@ -321,23 +277,8 @@ class HaproxyAmphoraLoadBalancerDriver(
|
|||||||
return
|
return
|
||||||
|
|
||||||
# In case the listener is not UDP or SCTP, things get more complicated.
|
# In case the listener is not UDP or SCTP, things get more complicated.
|
||||||
# We need to do this individually for each amphora in case some are
|
|
||||||
# using split config and others are using combined config.
|
|
||||||
for amp in loadbalancer.amphorae:
|
for amp in loadbalancer.amphorae:
|
||||||
if amp.status != consts.DELETED:
|
if amp.status != consts.DELETED:
|
||||||
api_version = self._populate_amphora_api_version(amp)
|
|
||||||
# Check which config style to use
|
|
||||||
if api_version[0] == 0 and api_version[1] <= 5:
|
|
||||||
# 0.5 or earlier
|
|
||||||
LOG.warning(
|
|
||||||
'Amphora %s for loadbalancer %s needs upgrade to '
|
|
||||||
'single process mode.', amp.id, loadbalancer.id)
|
|
||||||
self.clients[amp.api_version].delete_listener(
|
|
||||||
amp, listener.id)
|
|
||||||
else:
|
|
||||||
LOG.debug(
|
|
||||||
'Amphora %s for loadbalancer %s is already in single '
|
|
||||||
'process mode.', amp.id, loadbalancer.id)
|
|
||||||
self._combined_config_delete(amp, listener)
|
self._combined_config_delete(amp, listener)
|
||||||
|
|
||||||
def _combined_config_delete(self, amphora, listener):
|
def _combined_config_delete(self, amphora, listener):
|
||||||
@ -706,8 +647,10 @@ class AmphoraAPIClientBase(object):
|
|||||||
ip=ip,
|
ip=ip,
|
||||||
port=CONF.haproxy_amphora.bind_port)
|
port=CONF.haproxy_amphora.bind_port)
|
||||||
|
|
||||||
def request(self, method, amp, path='/', timeout_dict=None,
|
def request(self, method: str, amp: db_models.Amphora, path: str = '/',
|
||||||
retry_404=True, raise_retry_exception=False, **kwargs):
|
timeout_dict: Optional[dict] = None,
|
||||||
|
retry_404: bool = True, raise_retry_exception: bool = False,
|
||||||
|
**kwargs):
|
||||||
cfg_ha_amp = CONF.haproxy_amphora
|
cfg_ha_amp = CONF.haproxy_amphora
|
||||||
if timeout_dict is None:
|
if timeout_dict is None:
|
||||||
timeout_dict = {}
|
timeout_dict = {}
|
||||||
@ -731,7 +674,9 @@ class AmphoraAPIClientBase(object):
|
|||||||
reqargs.update(kwargs)
|
reqargs.update(kwargs)
|
||||||
headers = reqargs.setdefault('headers', {})
|
headers = reqargs.setdefault('headers', {})
|
||||||
|
|
||||||
headers['User-Agent'] = OCTAVIA_API_CLIENT
|
headers['User-Agent'] = (
|
||||||
|
f"Octavia HaProxy Rest Client/{amp.api_version} "
|
||||||
|
f"(https://wiki.openstack.org/wiki/Octavia)")
|
||||||
self.ssl_adapter.uuid = amp.id
|
self.ssl_adapter.uuid = amp.id
|
||||||
exception = None
|
exception = None
|
||||||
# Keep retrying
|
# Keep retrying
|
||||||
@ -797,123 +742,6 @@ class AmphoraAPIClientBase(object):
|
|||||||
return r.json()
|
return r.json()
|
||||||
|
|
||||||
|
|
||||||
class AmphoraAPIClient0_5(AmphoraAPIClientBase):
|
|
||||||
def __init__(self):
|
|
||||||
super().__init__()
|
|
||||||
|
|
||||||
self.start_listener = functools.partial(self._action,
|
|
||||||
consts.AMP_ACTION_START)
|
|
||||||
self.reload_listener = functools.partial(self._action,
|
|
||||||
consts.AMP_ACTION_RELOAD)
|
|
||||||
|
|
||||||
self.start_vrrp = functools.partial(self._vrrp_action,
|
|
||||||
consts.AMP_ACTION_START)
|
|
||||||
self.stop_vrrp = functools.partial(self._vrrp_action,
|
|
||||||
consts.AMP_ACTION_STOP)
|
|
||||||
self.reload_vrrp = functools.partial(self._vrrp_action,
|
|
||||||
consts.AMP_ACTION_RELOAD)
|
|
||||||
|
|
||||||
def upload_config(self, amp, listener_id, config, timeout_dict=None):
|
|
||||||
r = self.put(
|
|
||||||
amp,
|
|
||||||
'listeners/{amphora_id}/{listener_id}/haproxy'.format(
|
|
||||||
amphora_id=amp.id, listener_id=listener_id), timeout_dict,
|
|
||||||
data=config)
|
|
||||||
return exc.check_exception(r)
|
|
||||||
|
|
||||||
def _action(self, action, amp, listener_id, timeout_dict=None):
|
|
||||||
r = self.put(amp, 'listeners/{listener_id}/{action}'.format(
|
|
||||||
listener_id=listener_id, action=action), timeout_dict=timeout_dict)
|
|
||||||
return exc.check_exception(r)
|
|
||||||
|
|
||||||
def upload_cert_pem(self, amp, listener_id, pem_filename, pem_file):
|
|
||||||
r = self.put(
|
|
||||||
amp, 'listeners/{listener_id}/certificates/{filename}'.format(
|
|
||||||
listener_id=listener_id, filename=pem_filename),
|
|
||||||
data=pem_file)
|
|
||||||
return exc.check_exception(r)
|
|
||||||
|
|
||||||
def get_cert_md5sum(self, amp, listener_id, pem_filename, ignore=tuple()):
|
|
||||||
r = self.get(
|
|
||||||
amp, 'listeners/{listener_id}/certificates/{filename}'.format(
|
|
||||||
listener_id=listener_id, filename=pem_filename))
|
|
||||||
if exc.check_exception(r, ignore):
|
|
||||||
return r.json().get("md5sum")
|
|
||||||
return None
|
|
||||||
|
|
||||||
def delete_cert_pem(self, amp, listener_id, pem_filename):
|
|
||||||
r = self.delete(
|
|
||||||
amp, 'listeners/{listener_id}/certificates/{filename}'.format(
|
|
||||||
listener_id=listener_id, filename=pem_filename))
|
|
||||||
return exc.check_exception(r, (404,))
|
|
||||||
|
|
||||||
def update_cert_for_rotation(self, amp, pem_file):
|
|
||||||
r = self.put(amp, 'certificate', data=pem_file)
|
|
||||||
return exc.check_exception(r)
|
|
||||||
|
|
||||||
def delete_listener(self, amp, listener_id):
|
|
||||||
r = self.delete(
|
|
||||||
amp, 'listeners/{listener_id}'.format(listener_id=listener_id))
|
|
||||||
return exc.check_exception(r, (404,))
|
|
||||||
|
|
||||||
def get_info(self, amp, raise_retry_exception=False,
|
|
||||||
timeout_dict=None):
|
|
||||||
r = self.get(amp, "info", raise_retry_exception=raise_retry_exception,
|
|
||||||
timeout_dict=timeout_dict)
|
|
||||||
if exc.check_exception(r):
|
|
||||||
return r.json()
|
|
||||||
return None
|
|
||||||
|
|
||||||
def get_details(self, amp):
|
|
||||||
r = self.get(amp, "details")
|
|
||||||
if exc.check_exception(r):
|
|
||||||
return r.json()
|
|
||||||
return None
|
|
||||||
|
|
||||||
def get_all_listeners(self, amp):
|
|
||||||
r = self.get(amp, "listeners")
|
|
||||||
if exc.check_exception(r):
|
|
||||||
return r.json()
|
|
||||||
return None
|
|
||||||
|
|
||||||
def plug_network(self, amp, port):
|
|
||||||
r = self.post(amp, 'plug/network',
|
|
||||||
json=port)
|
|
||||||
return exc.check_exception(r)
|
|
||||||
|
|
||||||
def plug_vip(self, amp, vip, net_info):
|
|
||||||
r = self.post(amp,
|
|
||||||
'plug/vip/{vip}'.format(vip=vip),
|
|
||||||
json=net_info)
|
|
||||||
return exc.check_exception(r)
|
|
||||||
|
|
||||||
def upload_vrrp_config(self, amp, config):
|
|
||||||
r = self.put(amp, 'vrrp/upload', data=config)
|
|
||||||
return exc.check_exception(r)
|
|
||||||
|
|
||||||
def _vrrp_action(self, action, amp, timeout_dict=None):
|
|
||||||
r = self.put(amp, 'vrrp/{action}'.format(action=action),
|
|
||||||
timeout_dict=timeout_dict)
|
|
||||||
return exc.check_exception(r)
|
|
||||||
|
|
||||||
def get_interface(self, amp, ip_addr, timeout_dict=None, log_error=True):
|
|
||||||
r = self.get(amp, 'interface/{ip_addr}'.format(ip_addr=ip_addr),
|
|
||||||
timeout_dict=timeout_dict)
|
|
||||||
return exc.check_exception(r, log_error=log_error).json()
|
|
||||||
|
|
||||||
def upload_udp_config(self, amp, listener_id, config, timeout_dict=None):
|
|
||||||
r = self.put(
|
|
||||||
amp,
|
|
||||||
'listeners/{amphora_id}/{listener_id}/udp_listener'.format(
|
|
||||||
amphora_id=amp.id, listener_id=listener_id), timeout_dict,
|
|
||||||
data=config)
|
|
||||||
return exc.check_exception(r)
|
|
||||||
|
|
||||||
def update_agent_config(self, amp, agent_config, timeout_dict=None):
|
|
||||||
r = self.put(amp, 'config', timeout_dict, data=agent_config)
|
|
||||||
return exc.check_exception(r)
|
|
||||||
|
|
||||||
|
|
||||||
class AmphoraAPIClient1_0(AmphoraAPIClientBase):
|
class AmphoraAPIClient1_0(AmphoraAPIClientBase):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
|
@ -19,7 +19,6 @@ from octavia.amphorae.drivers.keepalived.jinja import jinja_cfg
|
|||||||
from octavia.common import constants
|
from octavia.common import constants
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
API_VERSION = constants.API_VERSION
|
|
||||||
|
|
||||||
|
|
||||||
class KeepalivedAmphoraDriverMixin(driver_base.VRRPDriverMixin):
|
class KeepalivedAmphoraDriverMixin(driver_base.VRRPDriverMixin):
|
||||||
|
@ -666,8 +666,6 @@ QUOTA_UNLIMITED = -1
|
|||||||
MIN_QUOTA = QUOTA_UNLIMITED
|
MIN_QUOTA = QUOTA_UNLIMITED
|
||||||
MAX_QUOTA = 2000000000
|
MAX_QUOTA = 2000000000
|
||||||
|
|
||||||
API_VERSION = '0.5'
|
|
||||||
|
|
||||||
HAPROXY_BASE_PEER_PORT = 1025
|
HAPROXY_BASE_PEER_PORT = 1025
|
||||||
KEEPALIVED_JINJA2_UPSTART = 'keepalived.upstart.j2'
|
KEEPALIVED_JINJA2_UPSTART = 'keepalived.upstart.j2'
|
||||||
KEEPALIVED_JINJA2_SYSTEMD = 'keepalived.systemd.j2'
|
KEEPALIVED_JINJA2_SYSTEMD = 'keepalived.systemd.j2'
|
||||||
|
@ -1,470 +0,0 @@
|
|||||||
# Copyright (c) 2015 Rackspace
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
|
|
||||||
import jinja2
|
|
||||||
from oslo_log import log as logging
|
|
||||||
|
|
||||||
from octavia.common.config import cfg
|
|
||||||
from octavia.common import constants
|
|
||||||
from octavia.common import utils as octavia_utils
|
|
||||||
|
|
||||||
PROTOCOL_MAP = {
|
|
||||||
constants.PROTOCOL_TCP: 'tcp',
|
|
||||||
constants.PROTOCOL_HTTP: 'http',
|
|
||||||
constants.PROTOCOL_HTTPS: 'tcp',
|
|
||||||
constants.PROTOCOL_PROXY: 'proxy',
|
|
||||||
constants.PROTOCOL_TERMINATED_HTTPS: 'http'
|
|
||||||
}
|
|
||||||
|
|
||||||
BALANCE_MAP = {
|
|
||||||
constants.LB_ALGORITHM_ROUND_ROBIN: 'roundrobin',
|
|
||||||
constants.LB_ALGORITHM_LEAST_CONNECTIONS: 'leastconn',
|
|
||||||
constants.LB_ALGORITHM_SOURCE_IP: 'source'
|
|
||||||
}
|
|
||||||
|
|
||||||
CLIENT_AUTH_MAP = {constants.CLIENT_AUTH_NONE: 'none',
|
|
||||||
constants.CLIENT_AUTH_OPTIONAL: 'optional',
|
|
||||||
constants.CLIENT_AUTH_MANDATORY: 'required'}
|
|
||||||
|
|
||||||
ACTIVE_PENDING_STATUSES = constants.SUPPORTED_PROVISIONING_STATUSES + (
|
|
||||||
constants.DEGRADED,)
|
|
||||||
|
|
||||||
BASE_PATH = '/var/lib/octavia'
|
|
||||||
BASE_CRT_DIR = BASE_PATH + '/certs'
|
|
||||||
|
|
||||||
HAPROXY_TEMPLATE = os.path.abspath(
|
|
||||||
os.path.join(os.path.dirname(__file__),
|
|
||||||
'templates/haproxy.cfg.j2'))
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
JINJA_ENV = None
|
|
||||||
|
|
||||||
|
|
||||||
class JinjaTemplater(object):
|
|
||||||
|
|
||||||
def __init__(self,
|
|
||||||
base_amp_path=None,
|
|
||||||
base_crt_dir=None,
|
|
||||||
haproxy_template=None,
|
|
||||||
log_http=None,
|
|
||||||
log_server=None,
|
|
||||||
connection_logging=True):
|
|
||||||
"""HaProxy configuration generation
|
|
||||||
|
|
||||||
:param base_amp_path: Base path for amphora data
|
|
||||||
:param base_crt_dir: Base directory for certificate storage
|
|
||||||
:param haproxy_template: Absolute path to Jinja template
|
|
||||||
:param log_http: Haproxy HTTP logging path
|
|
||||||
:param log_server: Haproxy Server logging path
|
|
||||||
:param connection_logging: enable logging connections in haproxy
|
|
||||||
"""
|
|
||||||
|
|
||||||
self.base_amp_path = base_amp_path or BASE_PATH
|
|
||||||
self.base_crt_dir = base_crt_dir or BASE_CRT_DIR
|
|
||||||
self.haproxy_template = haproxy_template or HAPROXY_TEMPLATE
|
|
||||||
self.log_http = log_http
|
|
||||||
self.log_server = log_server
|
|
||||||
self.connection_logging = connection_logging
|
|
||||||
|
|
||||||
def build_config(self, host_amphora, listener, haproxy_versions,
|
|
||||||
socket_path=None, client_ca_filename=None,
|
|
||||||
client_crl=None, pool_tls_certs=None):
|
|
||||||
"""Convert a logical configuration to the HAProxy version
|
|
||||||
|
|
||||||
:param host_amphora: The Amphora this configuration is hosted on
|
|
||||||
:param listener: The listener configuration
|
|
||||||
:param socket_path: The socket path for Haproxy process
|
|
||||||
:return: Rendered configuration
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Check for any backward compatibility items we need to check
|
|
||||||
# This is done here for upgrade scenarios where one amp in a
|
|
||||||
# pair might be running an older amphora version.
|
|
||||||
|
|
||||||
feature_compatibility = {}
|
|
||||||
# Is it newer than haproxy 1.5?
|
|
||||||
if not (int(haproxy_versions[0]) < 2 and int(haproxy_versions[1]) < 6):
|
|
||||||
feature_compatibility[constants.HTTP_REUSE] = True
|
|
||||||
|
|
||||||
return self.render_loadbalancer_obj(
|
|
||||||
host_amphora, listener, socket_path=socket_path,
|
|
||||||
feature_compatibility=feature_compatibility,
|
|
||||||
client_ca_filename=client_ca_filename, client_crl=client_crl,
|
|
||||||
pool_tls_certs=pool_tls_certs)
|
|
||||||
|
|
||||||
def _get_template(self):
|
|
||||||
"""Returns the specified Jinja configuration template."""
|
|
||||||
global JINJA_ENV
|
|
||||||
if not JINJA_ENV:
|
|
||||||
template_loader = jinja2.FileSystemLoader(
|
|
||||||
searchpath=os.path.dirname(self.haproxy_template))
|
|
||||||
JINJA_ENV = jinja2.Environment(
|
|
||||||
autoescape=True,
|
|
||||||
loader=template_loader,
|
|
||||||
trim_blocks=True,
|
|
||||||
lstrip_blocks=True)
|
|
||||||
JINJA_ENV.filters['hash_amp_id'] = octavia_utils.base64_sha1_string
|
|
||||||
return JINJA_ENV.get_template(os.path.basename(self.haproxy_template))
|
|
||||||
|
|
||||||
def _format_log_string(self, load_balancer, protocol):
|
|
||||||
log_format = CONF.haproxy_amphora.user_log_format.replace(
|
|
||||||
'{{ project_id }}', load_balancer.project_id)
|
|
||||||
log_format = log_format.replace('{{ lb_id }}', load_balancer.id)
|
|
||||||
|
|
||||||
# Order of these filters matter.
|
|
||||||
# TODO(johnsom) Remove when HAProxy handles the format string
|
|
||||||
# with HTTP variables in TCP listeners.
|
|
||||||
# Currently it either throws an error or just fails
|
|
||||||
# to log the message.
|
|
||||||
if protocol not in constants.HAPROXY_HTTP_PROTOCOLS:
|
|
||||||
log_format = log_format.replace('%{+Q}r', '-')
|
|
||||||
log_format = log_format.replace('%r', '-')
|
|
||||||
log_format = log_format.replace('%{+Q}ST', '-')
|
|
||||||
log_format = log_format.replace('%ST', '-')
|
|
||||||
|
|
||||||
log_format = log_format.replace(' ', '\\ ')
|
|
||||||
return log_format
|
|
||||||
|
|
||||||
def render_loadbalancer_obj(self, host_amphora, listener, socket_path=None,
|
|
||||||
feature_compatibility=None,
|
|
||||||
client_ca_filename=None, client_crl=None,
|
|
||||||
pool_tls_certs=None):
|
|
||||||
"""Renders a templated configuration from a load balancer object
|
|
||||||
|
|
||||||
:param host_amphora: The Amphora this configuration is hosted on
|
|
||||||
:param listener: The listener configuration
|
|
||||||
:param client_ca_filename: The CA certificate for client authorization
|
|
||||||
:param socket_path: The socket path for Haproxy process
|
|
||||||
:return: Rendered configuration
|
|
||||||
"""
|
|
||||||
feature_compatibility = feature_compatibility or {}
|
|
||||||
loadbalancer = self._transform_loadbalancer(
|
|
||||||
host_amphora, listener.load_balancer, listener,
|
|
||||||
feature_compatibility, client_ca_filename=client_ca_filename,
|
|
||||||
client_crl=client_crl, pool_tls_certs=pool_tls_certs)
|
|
||||||
if not socket_path:
|
|
||||||
socket_path = '%s/%s.sock' % (self.base_amp_path, listener.id)
|
|
||||||
return self._get_template().render(
|
|
||||||
{'loadbalancer': loadbalancer,
|
|
||||||
'stats_sock': socket_path,
|
|
||||||
'log_http': self.log_http,
|
|
||||||
'log_server': self.log_server,
|
|
||||||
'administrative_log_facility':
|
|
||||||
CONF.amphora_agent.administrative_log_facility,
|
|
||||||
'user_log_facility': CONF.amphora_agent.user_log_facility,
|
|
||||||
'connection_logging': self.connection_logging},
|
|
||||||
constants=constants)
|
|
||||||
|
|
||||||
def _transform_loadbalancer(self, host_amphora, loadbalancer, listener,
|
|
||||||
feature_compatibility, client_ca_filename=None,
|
|
||||||
client_crl=None, pool_tls_certs=None):
|
|
||||||
"""Transforms a load balancer into an object that will
|
|
||||||
|
|
||||||
be processed by the templating system
|
|
||||||
"""
|
|
||||||
t_listener = self._transform_listener(
|
|
||||||
listener, feature_compatibility, loadbalancer,
|
|
||||||
client_ca_filename=client_ca_filename, client_crl=client_crl,
|
|
||||||
pool_tls_certs=pool_tls_certs)
|
|
||||||
additional_vips = [
|
|
||||||
vip.ip_address for vip in loadbalancer.additional_vips]
|
|
||||||
ret_value = {
|
|
||||||
'id': loadbalancer.id,
|
|
||||||
'vip_address': loadbalancer.vip.ip_address,
|
|
||||||
'additional_vips': additional_vips,
|
|
||||||
'listener': t_listener,
|
|
||||||
'topology': loadbalancer.topology,
|
|
||||||
'enabled': loadbalancer.enabled,
|
|
||||||
'host_amphora': self._transform_amphora(
|
|
||||||
host_amphora, feature_compatibility)
|
|
||||||
}
|
|
||||||
# NOTE(sbalukoff): Global connection limit should be a sum of all
|
|
||||||
# listeners' connection limits. Since Octavia presently supports
|
|
||||||
# just one listener per haproxy process, this makes determining
|
|
||||||
# the global value trivial.
|
|
||||||
if listener.connection_limit and listener.connection_limit > -1:
|
|
||||||
ret_value['global_connection_limit'] = listener.connection_limit
|
|
||||||
else:
|
|
||||||
ret_value['global_connection_limit'] = (
|
|
||||||
constants.HAPROXY_MAX_MAXCONN)
|
|
||||||
return ret_value
|
|
||||||
|
|
||||||
def _transform_amphora(self, amphora, feature_compatibility):
|
|
||||||
"""Transform an amphora into an object that will
|
|
||||||
|
|
||||||
be processed by the templating system.
|
|
||||||
"""
|
|
||||||
return {
|
|
||||||
'id': amphora.id,
|
|
||||||
'lb_network_ip': amphora.lb_network_ip,
|
|
||||||
'vrrp_ip': amphora.vrrp_ip,
|
|
||||||
'ha_ip': amphora.ha_ip,
|
|
||||||
'vrrp_port_id': amphora.vrrp_port_id,
|
|
||||||
'ha_port_id': amphora.ha_port_id,
|
|
||||||
'role': amphora.role,
|
|
||||||
'status': amphora.status,
|
|
||||||
'vrrp_interface': amphora.vrrp_interface,
|
|
||||||
'vrrp_priority': amphora.vrrp_priority
|
|
||||||
}
|
|
||||||
|
|
||||||
def _transform_listener(self, listener, feature_compatibility,
|
|
||||||
loadbalancer, client_ca_filename=None,
|
|
||||||
client_crl=None, pool_tls_certs=None):
|
|
||||||
"""Transforms a listener into an object that will
|
|
||||||
|
|
||||||
be processed by the templating system
|
|
||||||
"""
|
|
||||||
ret_value = {
|
|
||||||
'id': listener.id,
|
|
||||||
'protocol_port': listener.protocol_port,
|
|
||||||
'protocol_mode': PROTOCOL_MAP[listener.protocol],
|
|
||||||
'protocol': listener.protocol,
|
|
||||||
'peer_port': listener.peer_port,
|
|
||||||
'insert_headers': listener.insert_headers,
|
|
||||||
'topology': listener.load_balancer.topology,
|
|
||||||
'amphorae': listener.load_balancer.amphorae,
|
|
||||||
'enabled': listener.enabled,
|
|
||||||
'timeout_client_data': (
|
|
||||||
listener.timeout_client_data or
|
|
||||||
CONF.haproxy_amphora.timeout_client_data),
|
|
||||||
'timeout_member_connect': (
|
|
||||||
listener.timeout_member_connect or
|
|
||||||
CONF.haproxy_amphora.timeout_member_connect),
|
|
||||||
'timeout_member_data': (
|
|
||||||
listener.timeout_member_data or
|
|
||||||
CONF.haproxy_amphora.timeout_member_data),
|
|
||||||
'timeout_tcp_inspect': (listener.timeout_tcp_inspect or
|
|
||||||
CONF.haproxy_amphora.timeout_tcp_inspect),
|
|
||||||
}
|
|
||||||
if self.connection_logging:
|
|
||||||
ret_value['user_log_format'] = (
|
|
||||||
self._format_log_string(loadbalancer, listener.protocol))
|
|
||||||
if listener.connection_limit and listener.connection_limit > -1:
|
|
||||||
ret_value['connection_limit'] = listener.connection_limit
|
|
||||||
else:
|
|
||||||
ret_value['connection_limit'] = constants.HAPROXY_MAX_MAXCONN
|
|
||||||
|
|
||||||
if listener.tls_certificate_id:
|
|
||||||
ret_value['crt_list_filename'] = os.path.join(
|
|
||||||
CONF.haproxy_amphora.base_cert_dir,
|
|
||||||
listener.id, '{}.pem'.format(listener.id))
|
|
||||||
|
|
||||||
if listener.client_ca_tls_certificate_id:
|
|
||||||
ret_value['client_ca_tls_path'] = '%s' % (
|
|
||||||
os.path.join(self.base_crt_dir, listener.id,
|
|
||||||
client_ca_filename))
|
|
||||||
ret_value['client_auth'] = CLIENT_AUTH_MAP.get(
|
|
||||||
listener.client_authentication)
|
|
||||||
if listener.client_crl_container_id:
|
|
||||||
ret_value['client_crl_path'] = '%s' % (
|
|
||||||
os.path.join(self.base_crt_dir, listener.id, client_crl))
|
|
||||||
|
|
||||||
if (listener.default_pool and
|
|
||||||
listener.default_pool.provisioning_status !=
|
|
||||||
constants.PENDING_DELETE):
|
|
||||||
kwargs = {}
|
|
||||||
if pool_tls_certs and pool_tls_certs.get(listener.default_pool.id):
|
|
||||||
kwargs = {'pool_tls_certs': pool_tls_certs.get(
|
|
||||||
listener.default_pool.id)}
|
|
||||||
ret_value['default_pool'] = self._transform_pool(
|
|
||||||
listener.default_pool, feature_compatibility, **kwargs)
|
|
||||||
pools = []
|
|
||||||
pool_gen = (pool for pool in listener.pools if
|
|
||||||
pool.provisioning_status != constants.PENDING_DELETE)
|
|
||||||
for x in pool_gen:
|
|
||||||
kwargs = {}
|
|
||||||
if pool_tls_certs and pool_tls_certs.get(x.id):
|
|
||||||
kwargs = {'pool_tls_certs': pool_tls_certs.get(x.id)}
|
|
||||||
pools.append(self._transform_pool(
|
|
||||||
x, feature_compatibility, **kwargs))
|
|
||||||
ret_value['pools'] = pools
|
|
||||||
policy_gen = (policy for policy in listener.l7policies if
|
|
||||||
policy.provisioning_status != constants.PENDING_DELETE)
|
|
||||||
l7policies = [self._transform_l7policy(
|
|
||||||
x, feature_compatibility, pool_tls_certs)
|
|
||||||
for x in policy_gen]
|
|
||||||
ret_value['l7policies'] = l7policies
|
|
||||||
return ret_value
|
|
||||||
|
|
||||||
def _transform_pool(self, pool, feature_compatibility,
|
|
||||||
pool_tls_certs=None):
|
|
||||||
"""Transforms a pool into an object that will
|
|
||||||
|
|
||||||
be processed by the templating system
|
|
||||||
"""
|
|
||||||
ret_value = {
|
|
||||||
'id': pool.id,
|
|
||||||
'protocol': PROTOCOL_MAP[pool.protocol],
|
|
||||||
'lb_algorithm': BALANCE_MAP.get(pool.lb_algorithm, 'roundrobin'),
|
|
||||||
'members': [],
|
|
||||||
'health_monitor': '',
|
|
||||||
'session_persistence': '',
|
|
||||||
'enabled': pool.enabled,
|
|
||||||
'operating_status': pool.operating_status,
|
|
||||||
'stick_size': CONF.haproxy_amphora.haproxy_stick_size,
|
|
||||||
constants.HTTP_REUSE: feature_compatibility.get(
|
|
||||||
constants.HTTP_REUSE, False),
|
|
||||||
'ca_tls_path': '',
|
|
||||||
'crl_path': '',
|
|
||||||
'tls_enabled': pool.tls_enabled
|
|
||||||
}
|
|
||||||
members_gen = (mem for mem in pool.members if
|
|
||||||
mem.provisioning_status != constants.PENDING_DELETE)
|
|
||||||
members = [self._transform_member(x, feature_compatibility)
|
|
||||||
for x in members_gen]
|
|
||||||
ret_value['members'] = members
|
|
||||||
health_mon = pool.health_monitor
|
|
||||||
if (health_mon and
|
|
||||||
health_mon.provisioning_status != constants.PENDING_DELETE):
|
|
||||||
ret_value['health_monitor'] = self._transform_health_monitor(
|
|
||||||
health_mon, feature_compatibility)
|
|
||||||
if pool.session_persistence:
|
|
||||||
ret_value[
|
|
||||||
'session_persistence'] = self._transform_session_persistence(
|
|
||||||
pool.session_persistence, feature_compatibility)
|
|
||||||
if (pool.tls_certificate_id and pool_tls_certs and
|
|
||||||
pool_tls_certs.get('client_cert')):
|
|
||||||
ret_value['client_cert'] = pool_tls_certs.get('client_cert')
|
|
||||||
if (pool.ca_tls_certificate_id and pool_tls_certs and
|
|
||||||
pool_tls_certs.get('ca_cert')):
|
|
||||||
ret_value['ca_cert'] = pool_tls_certs.get('ca_cert')
|
|
||||||
if (pool.crl_container_id and pool_tls_certs and
|
|
||||||
pool_tls_certs.get('crl')):
|
|
||||||
ret_value['crl'] = pool_tls_certs.get('crl')
|
|
||||||
|
|
||||||
return ret_value
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _transform_session_persistence(persistence, feature_compatibility):
|
|
||||||
"""Transforms session persistence into an object that will
|
|
||||||
|
|
||||||
be processed by the templating system
|
|
||||||
"""
|
|
||||||
return {
|
|
||||||
'type': persistence.type,
|
|
||||||
'cookie_name': persistence.cookie_name
|
|
||||||
}
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _transform_member(member, feature_compatibility):
|
|
||||||
"""Transforms a member into an object that will
|
|
||||||
|
|
||||||
be processed by the templating system
|
|
||||||
"""
|
|
||||||
return {
|
|
||||||
'id': member.id,
|
|
||||||
'address': member.ip_address,
|
|
||||||
'protocol_port': member.protocol_port,
|
|
||||||
'weight': member.weight,
|
|
||||||
'enabled': member.enabled,
|
|
||||||
'subnet_id': member.subnet_id,
|
|
||||||
'operating_status': member.operating_status,
|
|
||||||
'monitor_address': member.monitor_address,
|
|
||||||
'monitor_port': member.monitor_port,
|
|
||||||
'backup': member.backup
|
|
||||||
}
|
|
||||||
|
|
||||||
def _transform_health_monitor(self, monitor, feature_compatibility):
|
|
||||||
"""Transforms a health monitor into an object that will
|
|
||||||
|
|
||||||
be processed by the templating system
|
|
||||||
"""
|
|
||||||
codes = None
|
|
||||||
if monitor.expected_codes:
|
|
||||||
codes = '|'.join(octavia_utils.expand_expected_codes(
|
|
||||||
monitor.expected_codes))
|
|
||||||
return {
|
|
||||||
'id': monitor.id,
|
|
||||||
'type': monitor.type,
|
|
||||||
'delay': monitor.delay,
|
|
||||||
'timeout': monitor.timeout,
|
|
||||||
'fall_threshold': monitor.fall_threshold,
|
|
||||||
'rise_threshold': monitor.rise_threshold,
|
|
||||||
'http_method': monitor.http_method,
|
|
||||||
'url_path': monitor.url_path,
|
|
||||||
'expected_codes': codes,
|
|
||||||
'enabled': monitor.enabled,
|
|
||||||
'http_version': monitor.http_version,
|
|
||||||
'domain_name': monitor.domain_name,
|
|
||||||
}
|
|
||||||
|
|
||||||
def _transform_l7policy(self, l7policy, feature_compatibility,
|
|
||||||
pool_tls_certs=None):
|
|
||||||
"""Transforms an L7 policy into an object that will
|
|
||||||
|
|
||||||
be processed by the templating system
|
|
||||||
"""
|
|
||||||
ret_value = {
|
|
||||||
'id': l7policy.id,
|
|
||||||
'action': l7policy.action,
|
|
||||||
'redirect_url': l7policy.redirect_url,
|
|
||||||
'redirect_prefix': l7policy.redirect_prefix,
|
|
||||||
'enabled': l7policy.enabled
|
|
||||||
}
|
|
||||||
if (l7policy.redirect_pool and
|
|
||||||
l7policy.redirect_pool.provisioning_status !=
|
|
||||||
constants.PENDING_DELETE):
|
|
||||||
kwargs = {}
|
|
||||||
if pool_tls_certs and pool_tls_certs.get(
|
|
||||||
l7policy.redirect_pool.id):
|
|
||||||
kwargs = {'pool_tls_certs':
|
|
||||||
pool_tls_certs.get(l7policy.redirect_pool.id)}
|
|
||||||
ret_value['redirect_pool'] = self._transform_pool(
|
|
||||||
l7policy.redirect_pool, feature_compatibility, **kwargs)
|
|
||||||
else:
|
|
||||||
ret_value['redirect_pool'] = None
|
|
||||||
if (l7policy.action in [constants.L7POLICY_ACTION_REDIRECT_TO_URL,
|
|
||||||
constants.L7POLICY_ACTION_REDIRECT_PREFIX] and
|
|
||||||
l7policy.redirect_http_code):
|
|
||||||
ret_value['redirect_http_code'] = l7policy.redirect_http_code
|
|
||||||
else:
|
|
||||||
ret_value['redirect_http_code'] = None
|
|
||||||
rule_gen = (rule for rule in l7policy.l7rules if rule.enabled and
|
|
||||||
rule.provisioning_status != constants.PENDING_DELETE)
|
|
||||||
l7rules = [self._transform_l7rule(x, feature_compatibility)
|
|
||||||
for x in rule_gen]
|
|
||||||
ret_value['l7rules'] = l7rules
|
|
||||||
return ret_value
|
|
||||||
|
|
||||||
def _transform_l7rule(self, l7rule, feature_compatibility):
|
|
||||||
"""Transforms an L7 rule into an object that will
|
|
||||||
|
|
||||||
be processed by the templating system
|
|
||||||
"""
|
|
||||||
return {
|
|
||||||
'id': l7rule.id,
|
|
||||||
'type': l7rule.type,
|
|
||||||
'compare_type': l7rule.compare_type,
|
|
||||||
'key': l7rule.key,
|
|
||||||
'value': self._escape_haproxy_config_string(l7rule.value),
|
|
||||||
'invert': l7rule.invert,
|
|
||||||
'enabled': l7rule.enabled
|
|
||||||
}
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _escape_haproxy_config_string(value):
|
|
||||||
"""Escapes certain characters in a given string such that
|
|
||||||
|
|
||||||
haproxy will parse the string as a single value
|
|
||||||
"""
|
|
||||||
# Escape backslashes first
|
|
||||||
value = re.sub(r'\\', r'\\\\', value)
|
|
||||||
# Spaces next
|
|
||||||
value = re.sub(' ', '\\ ', value)
|
|
||||||
return value
|
|
@ -1,50 +0,0 @@
|
|||||||
{# Copyright (c) 2015 Rackspace
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
#}
|
|
||||||
# Configuration for loadbalancer {{ loadbalancer_id }}
|
|
||||||
global
|
|
||||||
daemon
|
|
||||||
user nobody
|
|
||||||
log {{ log_http | default('/run/rsyslog/octavia/log', true)}} local{{ user_log_facility }}
|
|
||||||
log {{ log_server | default('/run/rsyslog/octavia/log', true)}} local{{ administrative_log_facility }} notice
|
|
||||||
stats socket {{ sock_path }} mode 0666 level user
|
|
||||||
{% if loadbalancer.global_connection_limit is defined %}
|
|
||||||
maxconn {{ loadbalancer.global_connection_limit }}
|
|
||||||
{% endif %}
|
|
||||||
{% set found_ns = namespace(found=false) %}
|
|
||||||
{% for pool in loadbalancer.listener.pools if pool.enabled %}
|
|
||||||
{% if pool.health_monitor and pool.health_monitor.enabled and
|
|
||||||
pool.health_monitor.type == constants.HEALTH_MONITOR_PING and
|
|
||||||
found_ns.found == false %}
|
|
||||||
{% set found_ns.found = true %}
|
|
||||||
external-check
|
|
||||||
{% endif %}
|
|
||||||
{% endfor %}
|
|
||||||
|
|
||||||
defaults
|
|
||||||
{% if connection_logging %}
|
|
||||||
log global
|
|
||||||
{% else %}
|
|
||||||
no log
|
|
||||||
{% endif %}
|
|
||||||
retries 3
|
|
||||||
option redispatch
|
|
||||||
option splice-request
|
|
||||||
option splice-response
|
|
||||||
option http-keep-alive
|
|
||||||
|
|
||||||
{% block peers %}{% endblock peers %}
|
|
||||||
|
|
||||||
{% block proxies %}{% endblock proxies %}
|
|
@ -1,40 +0,0 @@
|
|||||||
{# Copyright (c) 2015 Rackspace
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
#}
|
|
||||||
{% extends 'base.j2' %}
|
|
||||||
|
|
||||||
|
|
||||||
{% from 'macros.j2' import frontend_macro, backend_macro %}
|
|
||||||
{% from 'macros.j2' import peers_macro %}
|
|
||||||
|
|
||||||
|
|
||||||
{% set loadbalancer_id = loadbalancer.id %}
|
|
||||||
{% set sock_path = stats_sock %}
|
|
||||||
|
|
||||||
|
|
||||||
{% block peers %}
|
|
||||||
{{ peers_macro(constants, loadbalancer.listener) }}
|
|
||||||
{% endblock peers %}
|
|
||||||
|
|
||||||
|
|
||||||
{% block proxies %}
|
|
||||||
{% if loadbalancer.enabled and loadbalancer.listener.enabled %}
|
|
||||||
{{- frontend_macro(constants, loadbalancer.listener,
|
|
||||||
loadbalancer.vip_address, loadbalancer.additional_vips) }}
|
|
||||||
{% for pool in loadbalancer.listener.pools if pool.enabled %}
|
|
||||||
{{- backend_macro(constants, loadbalancer.listener, pool) }}
|
|
||||||
{% endfor %}
|
|
||||||
{% endif %}
|
|
||||||
{% endblock proxies %}
|
|
@ -1,373 +0,0 @@
|
|||||||
{# Copyright (c) 2015 Rackspace
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
#}
|
|
||||||
{% macro peers_macro(constants,listener) %}
|
|
||||||
{% if listener.topology == constants.TOPOLOGY_ACTIVE_STANDBY %}
|
|
||||||
peers {{ "%s_peers"|format(listener.id.replace("-", ""))|trim() }}
|
|
||||||
{% for amp in listener.amphorae if (
|
|
||||||
amp.status == constants.AMPHORA_ALLOCATED) %}
|
|
||||||
{# HAProxy has peer name limitations, thus the hash filter #}
|
|
||||||
peer {{ amp.id|hash_amp_id|replace('=', '') }} {{
|
|
||||||
amp.vrrp_ip }}:{{ listener.peer_port }}
|
|
||||||
{% endfor %}
|
|
||||||
{% endif %}
|
|
||||||
{% endmacro %}
|
|
||||||
|
|
||||||
|
|
||||||
{% macro bind_macro(constants, listener, lb_vip_address) %}
|
|
||||||
{% if listener.crt_list_filename is defined %}
|
|
||||||
{% set def_crt_opt = ("ssl crt-list %s"|format(
|
|
||||||
listener.crt_list_filename)|trim()) %}
|
|
||||||
{% else %}
|
|
||||||
{% set def_crt_opt = "" %}
|
|
||||||
{% endif %}
|
|
||||||
{% if listener.client_ca_tls_path and listener.client_auth %}
|
|
||||||
{% set client_ca_opt = "ca-file %s verify %s"|format(listener.client_ca_tls_path, listener.client_auth)|trim() %}
|
|
||||||
{% else %}
|
|
||||||
{% set client_ca_opt = "" %}
|
|
||||||
{% endif %}
|
|
||||||
{% if listener.client_crl_path and listener.client_ca_tls_path %}
|
|
||||||
{% set ca_crl_opt = "crl-file %s"|format(listener.client_crl_path)|trim() %}
|
|
||||||
{% else %}
|
|
||||||
{% set ca_crl_opt = "" %}
|
|
||||||
{% endif %}
|
|
||||||
bind {{ lb_vip_address }}:{{ listener.protocol_port }} {{
|
|
||||||
"%s %s %s"|format(def_crt_opt, client_ca_opt, ca_crl_opt)|trim() }}
|
|
||||||
{% endmacro %}
|
|
||||||
|
|
||||||
|
|
||||||
{% macro l7rule_compare_type_macro(constants, ctype) %}
|
|
||||||
{% if ctype == constants.L7RULE_COMPARE_TYPE_REGEX %}
|
|
||||||
{{- "-m reg" -}}
|
|
||||||
{% elif ctype == constants.L7RULE_COMPARE_TYPE_STARTS_WITH %}
|
|
||||||
{{- "-m beg" -}}
|
|
||||||
{% elif ctype == constants.L7RULE_COMPARE_TYPE_ENDS_WITH %}
|
|
||||||
{{- "-m end" -}}
|
|
||||||
{% elif ctype == constants.L7RULE_COMPARE_TYPE_CONTAINS %}
|
|
||||||
{{- "-m sub" -}}
|
|
||||||
{% elif ctype == constants.L7RULE_COMPARE_TYPE_EQUAL_TO %}
|
|
||||||
{{- "-m str" -}}
|
|
||||||
{% endif %}
|
|
||||||
{% endmacro %}
|
|
||||||
|
|
||||||
|
|
||||||
{% macro l7rule_macro(constants, l7rule) %}
|
|
||||||
{% if l7rule.type == constants.L7RULE_TYPE_HOST_NAME %}
|
|
||||||
acl {{ l7rule.id }} req.hdr(host) -i {{ l7rule_compare_type_macro(
|
|
||||||
constants, l7rule.compare_type) }} {{ l7rule.value }}
|
|
||||||
{% elif l7rule.type == constants.L7RULE_TYPE_PATH %}
|
|
||||||
acl {{ l7rule.id }} path {{ l7rule_compare_type_macro(
|
|
||||||
constants, l7rule.compare_type) }} {{ l7rule.value }}
|
|
||||||
{% elif l7rule.type == constants.L7RULE_TYPE_FILE_TYPE %}
|
|
||||||
acl {{ l7rule.id }} path_end {{ l7rule_compare_type_macro(
|
|
||||||
constants, l7rule.compare_type) }} {{ l7rule.value }}
|
|
||||||
{% elif l7rule.type == constants.L7RULE_TYPE_HEADER %}
|
|
||||||
acl {{ l7rule.id }} req.hdr({{ l7rule.key }}) {{
|
|
||||||
l7rule_compare_type_macro(
|
|
||||||
constants, l7rule.compare_type) }} {{ l7rule.value }}
|
|
||||||
{% elif l7rule.type == constants.L7RULE_TYPE_COOKIE %}
|
|
||||||
acl {{ l7rule.id }} req.cook({{ l7rule.key }}) {{
|
|
||||||
l7rule_compare_type_macro(
|
|
||||||
constants, l7rule.compare_type) }} {{ l7rule.value }}
|
|
||||||
{% elif l7rule.type == constants.L7RULE_TYPE_SSL_CONN_HAS_CERT %}
|
|
||||||
acl {{ l7rule.id }} ssl_c_used
|
|
||||||
{% elif l7rule.type == constants.L7RULE_TYPE_SSL_VERIFY_RESULT %}
|
|
||||||
acl {{ l7rule.id }} ssl_c_verify eq {{ l7rule.value }}
|
|
||||||
{% elif l7rule.type == constants.L7RULE_TYPE_SSL_DN_FIELD %}
|
|
||||||
acl {{ l7rule.id }} ssl_c_s_dn({{ l7rule.key }}) {{
|
|
||||||
l7rule_compare_type_macro(
|
|
||||||
constants, l7rule.compare_type) }} {{ l7rule.value }}
|
|
||||||
{% endif %}
|
|
||||||
{% endmacro %}
|
|
||||||
|
|
||||||
|
|
||||||
{% macro l7rule_invert_macro(invert) %}
|
|
||||||
{% if invert %}
|
|
||||||
{{- "!" -}}
|
|
||||||
{% endif %}
|
|
||||||
{% endmacro %}
|
|
||||||
|
|
||||||
|
|
||||||
{% macro l7rule_list_macro(l7policy) %}
|
|
||||||
{% for l7rule in l7policy.l7rules %}
|
|
||||||
{{- " " -}}{{- l7rule_invert_macro(l7rule.invert) -}}{{- l7rule.id -}}
|
|
||||||
{% endfor %}
|
|
||||||
{% endmacro %}
|
|
||||||
|
|
||||||
|
|
||||||
{% macro l7policy_macro(constants, l7policy) %}
|
|
||||||
{% for l7rule in l7policy.l7rules %}
|
|
||||||
{{- l7rule_macro(constants, l7rule) -}}
|
|
||||||
{% endfor %}
|
|
||||||
{% if l7policy.redirect_http_code %}
|
|
||||||
{% set redirect_http_code_opt = " code %s"|format(
|
|
||||||
l7policy.redirect_http_code) %}
|
|
||||||
{% else %}
|
|
||||||
{% set redirect_http_code_opt = "" %}
|
|
||||||
{% endif %}
|
|
||||||
{% if l7policy.action == constants.L7POLICY_ACTION_REJECT %}
|
|
||||||
http-request deny if{{ l7rule_list_macro(l7policy) }}
|
|
||||||
{% elif l7policy.action == constants.L7POLICY_ACTION_REDIRECT_TO_URL %}
|
|
||||||
redirect {{- redirect_http_code_opt }} location {{ l7policy.redirect_url }} if{{ l7rule_list_macro(
|
|
||||||
l7policy) }}
|
|
||||||
{% elif l7policy.action == constants.L7POLICY_ACTION_REDIRECT_TO_POOL and l7policy.redirect_pool.enabled %}
|
|
||||||
use_backend {{ l7policy.redirect_pool.id }} if{{ l7rule_list_macro(
|
|
||||||
l7policy) }}
|
|
||||||
{% elif l7policy.action == constants.L7POLICY_ACTION_REDIRECT_PREFIX %}
|
|
||||||
redirect {{- redirect_http_code_opt }} prefix {{ l7policy.redirect_prefix }} if{{ l7rule_list_macro(
|
|
||||||
l7policy) }}
|
|
||||||
{% endif %}
|
|
||||||
{% endmacro %}
|
|
||||||
|
|
||||||
|
|
||||||
{% macro frontend_macro(constants, listener, lb_vip_address, additional_vips) %}
|
|
||||||
frontend {{ listener.id }}
|
|
||||||
{% if listener.connection_limit is defined %}
|
|
||||||
maxconn {{ listener.connection_limit }}
|
|
||||||
{% endif %}
|
|
||||||
{% if (listener.protocol.lower() ==
|
|
||||||
constants.PROTOCOL_TERMINATED_HTTPS.lower()) %}
|
|
||||||
redirect scheme https if !{ ssl_fc }
|
|
||||||
{% endif %}
|
|
||||||
{{ bind_macro(constants, listener, lb_vip_address)|trim() }}
|
|
||||||
{% for add_vip in additional_vips %}
|
|
||||||
{{ bind_macro(constants, listener, add_vip)|trim() }}
|
|
||||||
{% endfor %}
|
|
||||||
mode {{ listener.protocol_mode }}
|
|
||||||
{% for l7policy in listener.l7policies if (l7policy.enabled and
|
|
||||||
l7policy.l7rules|length > 0) %}
|
|
||||||
{{- l7policy_macro(constants, l7policy) -}}
|
|
||||||
{% endfor %}
|
|
||||||
{% if listener.default_pool and listener.default_pool.enabled %}
|
|
||||||
default_backend {{ listener.default_pool.id }}
|
|
||||||
{% endif %}
|
|
||||||
timeout client {{ listener.timeout_client_data }}
|
|
||||||
{% if listener.user_log_format is defined %}
|
|
||||||
log-format {{ listener.user_log_format }}
|
|
||||||
{% endif %}
|
|
||||||
{% if listener.timeout_tcp_inspect %}
|
|
||||||
tcp-request inspect-delay {{ listener.timeout_tcp_inspect }}
|
|
||||||
{% endif %}
|
|
||||||
{% endmacro %}
|
|
||||||
|
|
||||||
|
|
||||||
{% macro member_macro(constants, pool, member) %}
|
|
||||||
{% if pool.health_monitor and pool.health_monitor.enabled %}
|
|
||||||
{% if member.monitor_address %}
|
|
||||||
{% set monitor_addr_opt = " addr %s"|format(member.monitor_address) %}
|
|
||||||
{% else %}
|
|
||||||
{% set monitor_addr_opt = "" %}
|
|
||||||
{% endif %}
|
|
||||||
{% if member.monitor_port %}
|
|
||||||
{% set monitor_port_opt = " port %s"|format(member.monitor_port) %}
|
|
||||||
{% else %}
|
|
||||||
{% set monitor_port_opt = "" %}
|
|
||||||
{% endif %}
|
|
||||||
{% if pool.health_monitor.type == constants.HEALTH_MONITOR_HTTPS %}
|
|
||||||
{% set monitor_ssl_opt = " check-ssl verify none" %}
|
|
||||||
{% else %}
|
|
||||||
{% set monitor_ssl_opt = "" %}
|
|
||||||
{% endif %}
|
|
||||||
{% set hm_opt = " check%s inter %ds fall %d rise %d%s%s"|format(
|
|
||||||
monitor_ssl_opt, pool.health_monitor.delay,
|
|
||||||
pool.health_monitor.fall_threshold,
|
|
||||||
pool.health_monitor.rise_threshold, monitor_addr_opt,
|
|
||||||
monitor_port_opt) %}
|
|
||||||
{% else %}
|
|
||||||
{% set hm_opt = "" %}
|
|
||||||
{% endif %}
|
|
||||||
{% if (pool.session_persistence.type ==
|
|
||||||
constants.SESSION_PERSISTENCE_HTTP_COOKIE) %}
|
|
||||||
{% set persistence_opt = " cookie %s"|format(member.id) %}
|
|
||||||
{% else %}
|
|
||||||
{% set persistence_opt = "" %}
|
|
||||||
{% endif %}
|
|
||||||
{% if pool.protocol.lower() == constants.PROTOCOL_PROXY.lower() %}
|
|
||||||
{% set proxy_protocol_opt = " send-proxy" %}
|
|
||||||
{% else %}
|
|
||||||
{% set proxy_protocol_opt = "" %}
|
|
||||||
{% endif %}
|
|
||||||
{% if member.backup %}
|
|
||||||
{% set member_backup_opt = " backup" %}
|
|
||||||
{% else %}
|
|
||||||
{% set member_backup_opt = "" %}
|
|
||||||
{% endif %}
|
|
||||||
{% if member.enabled %}
|
|
||||||
{% set member_enabled_opt = "" %}
|
|
||||||
{% else %}
|
|
||||||
{% set member_enabled_opt = " disabled" %}
|
|
||||||
{% endif %}
|
|
||||||
{% if pool.tls_enabled %}
|
|
||||||
{% set def_opt_prefix = " ssl" %}
|
|
||||||
{% set def_sni_opt = " sni ssl_fc_sni" %}
|
|
||||||
{% else %}
|
|
||||||
{% set def_opt_prefix = "" %}
|
|
||||||
{% set def_sni_opt = "" %}
|
|
||||||
{% endif %}
|
|
||||||
{% if pool.client_cert and pool.tls_enabled %}
|
|
||||||
{% set def_crt_opt = " crt %s"|format(pool.client_cert) %}
|
|
||||||
{% else %}
|
|
||||||
{% set def_crt_opt = "" %}
|
|
||||||
{% endif %}
|
|
||||||
{% if pool.ca_cert and pool.tls_enabled %}
|
|
||||||
{% set ca_opt = " ca-file %s"|format(pool.ca_cert) %}
|
|
||||||
{% set def_verify_opt = " verify required" %}
|
|
||||||
{% if pool.crl %}
|
|
||||||
{% set crl_opt = " crl-file %s"|format(pool.crl) %}
|
|
||||||
{% else %}
|
|
||||||
{% set def_verify_opt = "" %}
|
|
||||||
{% endif %}
|
|
||||||
{% elif pool.tls_enabled %}
|
|
||||||
{% set def_verify_opt = " verify none" %}
|
|
||||||
{% endif %}
|
|
||||||
{{ "server %s %s:%d weight %s%s%s%s%s%s%s%s%s%s%s%s"|e|format(
|
|
||||||
member.id, member.address, member.protocol_port, member.weight,
|
|
||||||
hm_opt, persistence_opt, proxy_protocol_opt, member_backup_opt,
|
|
||||||
member_enabled_opt, def_opt_prefix, def_crt_opt, ca_opt, crl_opt,
|
|
||||||
def_verify_opt, def_sni_opt)|trim() }}
|
|
||||||
{% endmacro %}
|
|
||||||
|
|
||||||
|
|
||||||
{% macro backend_macro(constants, listener, pool) %}
|
|
||||||
backend {{ pool.id }}
|
|
||||||
{% if pool.protocol.lower() == constants.PROTOCOL_PROXY.lower() %}
|
|
||||||
mode {{ listener.protocol_mode }}
|
|
||||||
{% else %}
|
|
||||||
mode {{ pool.protocol }}
|
|
||||||
{% endif %}
|
|
||||||
{% if pool.get(constants.HTTP_REUSE, False) and (
|
|
||||||
pool.protocol.lower() == constants.PROTOCOL_HTTP.lower() or
|
|
||||||
(pool.protocol.lower() == constants.PROTOCOL_PROXY.lower() and
|
|
||||||
listener.protocol_mode.lower() ==
|
|
||||||
constants.PROTOCOL_HTTP.lower()))%}
|
|
||||||
http-reuse safe
|
|
||||||
{% endif %}
|
|
||||||
balance {{ pool.lb_algorithm }}
|
|
||||||
{% if pool.session_persistence %}
|
|
||||||
{% if (pool.session_persistence.type ==
|
|
||||||
constants.SESSION_PERSISTENCE_SOURCE_IP) %}
|
|
||||||
{% if listener.topology == constants.TOPOLOGY_ACTIVE_STANDBY %}
|
|
||||||
stick-table type ip size {{ pool.stick_size }} peers {{
|
|
||||||
"%s_peers"|format(listener.id.replace("-", ""))|trim() }}
|
|
||||||
{% else %}
|
|
||||||
stick-table type ip size {{ pool.stick_size }}
|
|
||||||
{% endif %}
|
|
||||||
stick on src
|
|
||||||
{% elif (pool.session_persistence.type ==
|
|
||||||
constants.SESSION_PERSISTENCE_APP_COOKIE) %}
|
|
||||||
{% if listener.topology == constants.TOPOLOGY_ACTIVE_STANDBY %}
|
|
||||||
stick-table type string len 64 size {{
|
|
||||||
pool.stick_size }} peers {{
|
|
||||||
"%s_peers"|format(listener.id.replace("-", ""))|trim() }}
|
|
||||||
{% else %}
|
|
||||||
stick-table type string len 64 size {{ pool.stick_size }}
|
|
||||||
{% endif %}
|
|
||||||
stick store-response res.cook({{ pool.session_persistence.cookie_name }})
|
|
||||||
stick match req.cook({{ pool.session_persistence.cookie_name }})
|
|
||||||
{% elif (pool.session_persistence.type ==
|
|
||||||
constants.SESSION_PERSISTENCE_HTTP_COOKIE) %}
|
|
||||||
cookie SRV insert indirect nocache
|
|
||||||
{% endif %}
|
|
||||||
{% endif %}
|
|
||||||
{% if pool.health_monitor and pool.health_monitor.enabled %}
|
|
||||||
timeout check {{ pool.health_monitor.timeout }}s
|
|
||||||
{% if (pool.health_monitor.type ==
|
|
||||||
constants.HEALTH_MONITOR_HTTP or pool.health_monitor.type ==
|
|
||||||
constants.HEALTH_MONITOR_HTTPS) %}
|
|
||||||
{% if (pool.health_monitor.http_version and
|
|
||||||
pool.health_monitor.http_version == 1.1 and
|
|
||||||
pool.health_monitor.domain_name) %}
|
|
||||||
option httpchk {{ pool.health_monitor.http_method }} {{ pool.health_monitor.url_path }} HTTP/
|
|
||||||
{{- pool.health_monitor.http_version -}}{{- "\\r\\n" | safe -}}
|
|
||||||
Host:\ {{ pool.health_monitor.domain_name }}
|
|
||||||
{% elif pool.health_monitor.http_version %}
|
|
||||||
option httpchk {{ pool.health_monitor.http_method }} {{ pool.health_monitor.url_path }} HTTP/
|
|
||||||
{{- pool.health_monitor.http_version -}}{{- "\\r\\n" | safe }}
|
|
||||||
{% else %}
|
|
||||||
option httpchk {{ pool.health_monitor.http_method }} {{ pool.health_monitor.url_path }}
|
|
||||||
{% endif %}
|
|
||||||
http-check expect rstatus {{ pool.health_monitor.expected_codes }}
|
|
||||||
{% endif %}
|
|
||||||
{% if pool.health_monitor.type == constants.HEALTH_MONITOR_TLS_HELLO %}
|
|
||||||
option ssl-hello-chk
|
|
||||||
{% endif %}
|
|
||||||
{% if pool.health_monitor.type == constants.HEALTH_MONITOR_PING %}
|
|
||||||
option external-check
|
|
||||||
external-check command /var/lib/octavia/ping-wrapper.sh
|
|
||||||
{% endif %}
|
|
||||||
{% endif %}
|
|
||||||
{% if pool.protocol.lower() == constants.PROTOCOL_HTTP.lower() %}
|
|
||||||
{% if listener.insert_headers.get('X-Forwarded-For',
|
|
||||||
'False').lower() == 'true' %}
|
|
||||||
option forwardfor
|
|
||||||
{% endif %}
|
|
||||||
{% if listener.insert_headers.get('X-Forwarded-Port',
|
|
||||||
'False').lower() == 'true' %}
|
|
||||||
http-request set-header X-Forwarded-Port %[dst_port]
|
|
||||||
{% endif %}
|
|
||||||
{% endif %}
|
|
||||||
{% if listener.insert_headers.get('X-Forwarded-Proto',
|
|
||||||
'False').lower() == 'true' %}
|
|
||||||
{% if listener.protocol.lower() == constants.PROTOCOL_HTTP.lower() %}
|
|
||||||
http-request set-header X-Forwarded-Proto http
|
|
||||||
{% elif listener.protocol.lower() ==
|
|
||||||
constants.PROTOCOL_TERMINATED_HTTPS.lower() %}
|
|
||||||
http-request set-header X-Forwarded-Proto https
|
|
||||||
{% endif %}
|
|
||||||
{% endif %}
|
|
||||||
{% if listener.protocol.lower() == constants.PROTOCOL_TERMINATED_HTTPS.lower() %}
|
|
||||||
{% if listener.insert_headers.get('X-SSL-Client-Verify',
|
|
||||||
'False').lower() == 'true' %}
|
|
||||||
http-request set-header X-SSL-Client-Verify %[ssl_c_verify]
|
|
||||||
{% endif %}
|
|
||||||
{% if listener.insert_headers.get('X-SSL-Client-Has-Cert',
|
|
||||||
'False').lower() == 'true' %}
|
|
||||||
http-request set-header X-SSL-Client-Has-Cert %[ssl_c_used]
|
|
||||||
{% endif %}
|
|
||||||
{% if listener.insert_headers.get('X-SSL-Client-DN',
|
|
||||||
'False').lower() == 'true' %}
|
|
||||||
http-request set-header X-SSL-Client-DN %{+Q}[ssl_c_s_dn]
|
|
||||||
{% endif %}
|
|
||||||
{% if listener.insert_headers.get('X-SSL-Client-CN',
|
|
||||||
'False').lower() == 'true' %}
|
|
||||||
http-request set-header X-SSL-Client-CN %{+Q}[ssl_c_s_dn(cn)]
|
|
||||||
{% endif %}
|
|
||||||
{% if listener.insert_headers.get('X-SSL-Issuer',
|
|
||||||
'False').lower() == 'true' %}
|
|
||||||
http-request set-header X-SSL-Issuer %{+Q}[ssl_c_i_dn]
|
|
||||||
{% endif %}
|
|
||||||
{% if listener.insert_headers.get('X-SSL-Client-SHA1',
|
|
||||||
'False').lower() == 'true' %}
|
|
||||||
http-request set-header X-SSL-Client-SHA1 %{+Q}[ssl_c_sha1,hex]
|
|
||||||
{% endif %}
|
|
||||||
{% if listener.insert_headers.get('X-SSL-Client-Not-Before',
|
|
||||||
'False').lower() == 'true' %}
|
|
||||||
http-request set-header X-SSL-Client-Not-Before %{+Q}[ssl_c_notbefore]
|
|
||||||
{% endif %}
|
|
||||||
{% if listener.insert_headers.get('X-SSL-Client-Not-After',
|
|
||||||
'False').lower() == 'true' %}
|
|
||||||
http-request set-header X-SSL-Client-Not-After %{+Q}[ssl_c_notafter]
|
|
||||||
{% endif %}
|
|
||||||
{% endif %}
|
|
||||||
{% if listener.connection_limit is defined %}
|
|
||||||
fullconn {{ listener.connection_limit }}
|
|
||||||
{% endif %}
|
|
||||||
option allbackups
|
|
||||||
timeout connect {{ listener.timeout_member_connect }}
|
|
||||||
timeout server {{ listener.timeout_member_data }}
|
|
||||||
{% for member in pool.members %}
|
|
||||||
{{- member_macro(constants, pool, member) -}}
|
|
||||||
{% endfor %}
|
|
||||||
{% endmacro %}
|
|
@ -13,6 +13,7 @@
|
|||||||
# under the License.
|
# under the License.
|
||||||
from unittest import mock
|
from unittest import mock
|
||||||
|
|
||||||
|
from octavia.amphorae.driver_exceptions.exceptions import AmpVersionUnsupported
|
||||||
from octavia.amphorae.drivers.haproxy import exceptions as exc
|
from octavia.amphorae.drivers.haproxy import exceptions as exc
|
||||||
from octavia.amphorae.drivers.haproxy import rest_api_driver
|
from octavia.amphorae.drivers.haproxy import rest_api_driver
|
||||||
import octavia.tests.unit.base as base
|
import octavia.tests.unit.base as base
|
||||||
@ -81,3 +82,11 @@ class TestHAProxyAmphoraDriver(base.TestCase):
|
|||||||
mock_api_version.assert_called_once_with(amphora_mock, None)
|
mock_api_version.assert_called_once_with(amphora_mock, None)
|
||||||
client_mock.get_interface.assert_called_once_with(
|
client_mock.get_interface.assert_called_once_with(
|
||||||
amphora_mock, IP_ADDRESS, None, log_error=False)
|
amphora_mock, IP_ADDRESS, None, log_error=False)
|
||||||
|
|
||||||
|
def test_unsupported_api_version(self):
|
||||||
|
mock_amp = mock.MagicMock()
|
||||||
|
mock_amp.api_version = "0.5"
|
||||||
|
|
||||||
|
self.assertRaises(AmpVersionUnsupported,
|
||||||
|
self.driver._populate_amphora_api_version,
|
||||||
|
mock_amp)
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -934,7 +934,7 @@ class TestAmphoraAPIClientTest(base.TestCase):
|
|||||||
@requests_mock.mock()
|
@requests_mock.mock()
|
||||||
def test_get_info(self, m):
|
def test_get_info(self, m):
|
||||||
info = {"hostname": "some_hostname", "version": "some_version",
|
info = {"hostname": "some_hostname", "version": "some_version",
|
||||||
"api_version": "0.5", "uuid": FAKE_UUID_1}
|
"api_version": "1.0", "uuid": FAKE_UUID_1}
|
||||||
m.get("{base}/info".format(base=self.base_url_ver),
|
m.get("{base}/info".format(base=self.base_url_ver),
|
||||||
json=info)
|
json=info)
|
||||||
information = self.driver.get_info(self.amp)
|
information = self.driver.get_info(self.amp)
|
||||||
@ -943,7 +943,7 @@ class TestAmphoraAPIClientTest(base.TestCase):
|
|||||||
@requests_mock.mock()
|
@requests_mock.mock()
|
||||||
def test_get_info_with_timeout_dict(self, m):
|
def test_get_info_with_timeout_dict(self, m):
|
||||||
info = {"hostname": "some_hostname", "version": "some_version",
|
info = {"hostname": "some_hostname", "version": "some_version",
|
||||||
"api_version": "0.5", "uuid": FAKE_UUID_1}
|
"api_version": "1.0", "uuid": FAKE_UUID_1}
|
||||||
m.get("{base}/info".format(base=self.base_url_ver),
|
m.get("{base}/info".format(base=self.base_url_ver),
|
||||||
json=info)
|
json=info)
|
||||||
timeout_dict = {
|
timeout_dict = {
|
||||||
@ -983,7 +983,7 @@ class TestAmphoraAPIClientTest(base.TestCase):
|
|||||||
@requests_mock.mock()
|
@requests_mock.mock()
|
||||||
def test_get_details(self, m):
|
def test_get_details(self, m):
|
||||||
details = {"hostname": "some_hostname", "version": "some_version",
|
details = {"hostname": "some_hostname", "version": "some_version",
|
||||||
"api_version": "0.5", "uuid": FAKE_UUID_1,
|
"api_version": "1.0", "uuid": FAKE_UUID_1,
|
||||||
"network_tx": "some_tx", "network_rx": "some_rx",
|
"network_tx": "some_tx", "network_rx": "some_rx",
|
||||||
"active": True, "haproxy_count": 10}
|
"active": True, "haproxy_count": 10}
|
||||||
m.get("{base}/details".format(base=self.base_url_ver),
|
m.get("{base}/details".format(base=self.base_url_ver),
|
||||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
deprecations:
|
||||||
|
- |
|
||||||
|
Amphora load balancers support single process mode only now. Split listener
|
||||||
|
configuration, which was used up to API version 0.5, has been
|
||||||
|
removed from the codebase.
|
Loading…
Reference in New Issue
Block a user