Fix IPv6 in Active/Standby topology
Load balancers with IPv6 VIP addresses would fail to create due to a duplicate address detection issue. The keepalived process would also crash with a segfault due to a known bug[1]. This patch resolves both issues and allows load balancers with IPv6 VIP addresses to be created in active/standby topology. [1] https://github.com/acassen/keepalived/issues/457 Story: 2003451 Task: 24657 Co-Authored-By: Michael Johnson <johnsomor@gmail.com> Change-Id: I15a4be05740e2657f998902d468e57763c3ed52e
This commit is contained in:
parent
1e4940f37e
commit
fbb9397979
@ -23,6 +23,7 @@ sysctl-write-value net.ipv4.netfilter.ip_conntrack_tcp_timeout_fin_wait 5 || tru
|
||||
|
||||
sysctl-write-value net.ipv4.tcp_fin_timeout 5
|
||||
sysctl-write-value net.ipv4.ip_nonlocal_bind 1
|
||||
sysctl-write-value net.ipv6.ip_nonlocal_bind 1
|
||||
sysctl-write-value net.ipv4.tcp_rmem "16384 65536 524288"
|
||||
sysctl-write-value net.ipv4.tcp_wmem "16384 349520 699040"
|
||||
sysctl-write-value net.ipv4.ip_local_port_range "1025 65534"
|
||||
|
11
elements/keepalived-octavia/post-install.d/11-ip6-tables
Executable file
11
elements/keepalived-octavia/post-install.d/11-ip6-tables
Executable file
@ -0,0 +1,11 @@
|
||||
#!/bin/bash
|
||||
# keepalived older than 1.3 will segfault when using IPv6 VIPs if the
|
||||
# ip6_tables module is not loaded. Make sure it is loaded on releases we
|
||||
# know have the older version.
|
||||
|
||||
set -eu
|
||||
set -o xtrace
|
||||
|
||||
if [ "$DISTRO_NAME" == "ubuntu" ] && { [ "$DIB_RELEASE" == "trusty" ] || [ "$DIB_RELEASE" == "xenial" ]; }; then
|
||||
echo ip6_tables > /etc/modules-load.d/ip6_tables.conf
|
||||
fi
|
@ -57,4 +57,5 @@ class AgentJinjaTemplater(object):
|
||||
'use_upstart': CONF.haproxy_amphora.use_upstart,
|
||||
'respawn_count': CONF.haproxy_amphora.respawn_count,
|
||||
'respawn_interval': CONF.haproxy_amphora.respawn_interval,
|
||||
'amphora_udp_driver': CONF.amphora_agent.amphora_udp_driver})
|
||||
'amphora_udp_driver': CONF.amphora_agent.amphora_udp_driver,
|
||||
'topology': CONF.controller_worker.loadbalancer_topology})
|
||||
|
@ -64,6 +64,11 @@ class Keepalived(object):
|
||||
if init_system == consts.INIT_SYSTEMD:
|
||||
template = SYSTEMD_TEMPLATE
|
||||
init_enable_cmd = "systemctl enable octavia-keepalived"
|
||||
|
||||
# Render and install the network namespace systemd service
|
||||
util.install_netns_systemd_service()
|
||||
util.run_systemctl_command(
|
||||
consts.ENABLE, consts.AMP_NETNS_SVC_PREFIX)
|
||||
elif init_system == consts.INIT_UPSTART:
|
||||
template = UPSTART_TEMPLATE
|
||||
elif init_system == consts.INIT_SYSVINIT:
|
||||
@ -86,7 +91,8 @@ class Keepalived(object):
|
||||
keepalived_cmd=consts.KEEPALIVED_CMD,
|
||||
keepalived_cfg=util.keepalived_cfg_path(),
|
||||
keepalived_log=util.keepalived_log_path(),
|
||||
amphora_nsname=consts.AMPHORA_NAMESPACE
|
||||
amphora_nsname=consts.AMPHORA_NAMESPACE,
|
||||
amphora_netns=consts.AMP_NETNS_SVC_PREFIX
|
||||
)
|
||||
text_file.write(text)
|
||||
|
||||
|
@ -43,7 +43,6 @@ CONF = cfg.CONF
|
||||
UPSTART_CONF = 'upstart.conf.j2'
|
||||
SYSVINIT_CONF = 'sysvinit.conf.j2'
|
||||
SYSTEMD_CONF = 'systemd.conf.j2'
|
||||
consts.AMP_NETNS_SVC_PREFIX = 'amphora-netns'
|
||||
|
||||
JINJA_ENV = jinja2.Environment(
|
||||
autoescape=True,
|
||||
|
@ -113,6 +113,7 @@ class BaseOS(object):
|
||||
with os.fdopen(os.open(interface_file_path, flags, mode),
|
||||
'w') as text_file:
|
||||
text = template_vip.render(
|
||||
consts=consts,
|
||||
interface=primary_interface,
|
||||
vip=vip,
|
||||
vip_ipv6=ip.version == 6,
|
||||
@ -125,6 +126,7 @@ class BaseOS(object):
|
||||
vrrp_ip=vrrp_ip,
|
||||
vrrp_ipv6=vrrp_version == 6,
|
||||
host_routes=render_host_routes,
|
||||
topology=CONF.controller_worker.loadbalancer_topology,
|
||||
)
|
||||
text_file.write(text)
|
||||
|
||||
@ -225,8 +227,10 @@ class BaseOS(object):
|
||||
|
||||
def bring_interfaces_up(self, ip, primary_interface, secondary_interface):
|
||||
self._bring_if_down(primary_interface)
|
||||
if secondary_interface:
|
||||
self._bring_if_down(secondary_interface)
|
||||
self._bring_if_up(primary_interface, 'VIP')
|
||||
if secondary_interface:
|
||||
self._bring_if_up(secondary_interface, 'VIP')
|
||||
|
||||
def has_ifup_all(self):
|
||||
@ -396,7 +400,10 @@ class RH(BaseOS):
|
||||
netmask, gateway, mtu, vrrp_ip, vrrp_version, render_host_routes,
|
||||
template_vip)
|
||||
|
||||
if ip.version == 4:
|
||||
# keepalived will handle the VIP if we are on active/standby
|
||||
if (ip.version == 4 and
|
||||
CONF.controller_worker.loadbalancer_topology ==
|
||||
consts.TOPOLOGY_SINGLE):
|
||||
# Create an IPv4 alias interface, needed in RH based flavors
|
||||
alias_interface_file_path = self.get_alias_network_interface_file(
|
||||
primary_interface)
|
||||
@ -414,6 +421,9 @@ class RH(BaseOS):
|
||||
routes_interface_file_path, primary_interface,
|
||||
render_host_routes, template_routes, gateway, vip, netmask)
|
||||
|
||||
# keepalived will handle the rule(s) if we are on actvie/standby
|
||||
if (CONF.controller_worker.loadbalancer_topology ==
|
||||
consts.TOPOLOGY_SINGLE):
|
||||
route_rules_interface_file_path = (
|
||||
self.get_route_rules_interface_file(primary_interface))
|
||||
template_rules = j2_env.get_template(self.RULE_ETH_X_CONF)
|
||||
@ -441,11 +451,13 @@ class RH(BaseOS):
|
||||
with os.fdopen(os.open(interface_file_path, flags, mode),
|
||||
'w') as text_file:
|
||||
text = template_routes.render(
|
||||
consts=consts,
|
||||
interface=interface,
|
||||
host_routes=host_routes,
|
||||
gateway=gateway,
|
||||
network=utils.ip_netmask_to_cidr(vip, netmask),
|
||||
vip=vip,
|
||||
topology=CONF.controller_worker.loadbalancer_topology,
|
||||
)
|
||||
text_file.write(text)
|
||||
|
||||
|
@ -19,7 +19,6 @@ import socket
|
||||
import stat
|
||||
import subprocess
|
||||
|
||||
import jinja2
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
import pyroute2
|
||||
@ -38,11 +37,6 @@ ETH_X_PORT_CONF = 'plug_port_ethX.conf.j2'
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
j2_env = jinja2.Environment(autoescape=True, loader=jinja2.FileSystemLoader(
|
||||
os.path.dirname(os.path.realpath(__file__)) + consts.AGENT_API_TEMPLATES))
|
||||
template_port = j2_env.get_template(ETH_X_PORT_CONF)
|
||||
template_vip = j2_env.get_template(ETH_X_VIP_CONF)
|
||||
|
||||
|
||||
class Plug(object):
|
||||
def __init__(self, osutils):
|
||||
@ -153,6 +147,10 @@ class Plug(object):
|
||||
ipr.link('set', index=idx, net_ns_fd=consts.AMPHORA_NAMESPACE,
|
||||
IFLA_IFNAME=primary_interface)
|
||||
|
||||
# In an ha amphora, keepalived should bring the VIP interface up
|
||||
if (CONF.controller_worker.loadbalancer_topology ==
|
||||
consts.TOPOLOGY_ACTIVE_STANDBY):
|
||||
secondary_interface = None
|
||||
# bring interfaces up
|
||||
self._osutils.bring_interfaces_up(
|
||||
ip, primary_interface, secondary_interface)
|
||||
|
@ -15,7 +15,11 @@
|
||||
# under the License.
|
||||
#}
|
||||
# Generated by Octavia agent
|
||||
{%- if topology == consts.TOPOLOGY_SINGLE %}
|
||||
auto {{ interface }} {{ interface }}:0
|
||||
{%- else %}
|
||||
auto {{ interface }}
|
||||
{%- endif %}
|
||||
{%- if vrrp_ip %}
|
||||
iface {{ interface }} inet{{ '6' if vrrp_ipv6 }} static
|
||||
address {{ vrrp_ip }}
|
||||
@ -42,22 +46,37 @@ down route del -net {{ hr.network }} gw {{ hr.gw }} dev {{ interface }}
|
||||
iface {{ interface }} inet{{ '6' if vip_ipv6 }} {{ 'auto' if vip_ipv6 else 'dhcp' }}
|
||||
{%- endif %}
|
||||
|
||||
{%- if topology == consts.TOPOLOGY_SINGLE %}
|
||||
iface {{ interface }}:0 inet{{ '6' if vip_ipv6 }} static
|
||||
address {{ vip }}
|
||||
broadcast {{ broadcast }}
|
||||
netmask {{ netmask }}
|
||||
{%- endif %}
|
||||
|
||||
# Add a source routing table to allow members to access the VIP
|
||||
{%- if gateway %}
|
||||
post-up /sbin/ip {{ '-6 ' if vip_ipv6 }}route add {{ network }} dev {{ interface }} src {{ vip }} scope link table 1
|
||||
|
||||
post-up /sbin/ip {{ '-6 ' if vip_ipv6 }}route add default via {{ gateway }} dev {{ interface }} onlink table 1
|
||||
post-down /sbin/ip {{ '-6 ' if vip_ipv6 }}route del default via {{ gateway }} dev {{ interface }} onlink table 1
|
||||
|
||||
{# Keepalived will insert and remove this route in active/standby #}
|
||||
{%- if topology == consts.TOPOLOGY_SINGLE %}
|
||||
post-up /sbin/ip {{ '-6 ' if vip_ipv6 }}route add {{ network }} dev {{ interface }} src {{ vip }} scope link table 1
|
||||
post-down /sbin/ip {{ '-6 ' if vip_ipv6 }}route del {{ network }} dev {{ interface }} src {{ vip }} scope link table 1
|
||||
{%- endif %}
|
||||
|
||||
{%- endif %}
|
||||
|
||||
{%- for hr in host_routes %}
|
||||
post-up /sbin/ip {{ '-6 ' if vip_ipv6 }}route add {{ hr.network }} via {{ hr.gw }} dev {{ interface }} onlink table 1
|
||||
post-down /sbin/ip {{ '-6 ' if vip_ipv6 }}route del {{ hr.network }} via {{ hr.gw }} dev {{ interface }} onlink table 1
|
||||
{%- endfor %}
|
||||
post-up /sbin/ip {{ '-6 ' if vip_ipv6 }}rule add from {{ vip }}/32 table 1 priority 100
|
||||
post-down /sbin/ip {{ '-6 ' if vip_ipv6 }}rule del from {{ vip }}/32 table 1 priority 100
|
||||
|
||||
{# Keepalived will insert and remove this rule in active/standby #}
|
||||
{%- if topology == consts.TOPOLOGY_SINGLE %}
|
||||
post-up /sbin/ip {{ '-6 ' if vip_ipv6 }}rule add from {{ vip }}/{{ '128' if vip_ipv6 else '32' }} table 1 priority 100
|
||||
post-down /sbin/ip {{ '-6 ' if vip_ipv6 }}rule del from {{ vip }}/{{ '128' if vip_ipv6 else '32' }} table 1 priority 100
|
||||
{%- endif %}
|
||||
|
||||
post-up /sbin/ip{{ '6' if vip_ipv6 }}tables -t nat -A POSTROUTING -p udp -o {{ interface }} -j MASQUERADE
|
||||
post-down /sbin/ip{{ '6' if vip_ipv6 }}tables -t nat -D POSTROUTING -p udp -o {{ interface }} -j MASQUERADE
|
||||
|
@ -19,7 +19,9 @@
|
||||
{%- endfor %}
|
||||
# Add a source routing table to allow members to access the VIP
|
||||
{%- if gateway %}
|
||||
{%- if topology == consts.TOPOLOGY_SINGLE %}
|
||||
{{ network }} dev {{ interface }} src {{ vip }} scope link table 1
|
||||
{%- endif %}
|
||||
default table 1 via {{ gateway }} dev {{ interface }}
|
||||
{%- endif %}
|
||||
{%- for hr in host_routes %}
|
||||
|
@ -42,3 +42,6 @@ agent_server_network_file = {{ agent_server_network_file }}
|
||||
agent_request_read_timeout = {{ agent_request_read_timeout }}
|
||||
amphora_id = {{ amphora_id }}
|
||||
amphora_udp_driver = {{ amphora_udp_driver }}
|
||||
|
||||
[controller_worker]
|
||||
loadbalancer_topology = {{ topology }}
|
||||
|
@ -12,6 +12,7 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import ipaddress
|
||||
import os
|
||||
|
||||
import jinja2
|
||||
@ -53,11 +54,12 @@ class KeepalivedJinjaTemplater(object):
|
||||
lstrip_blocks=True)
|
||||
return self._jinja_env.get_template(os.path.basename(template_file))
|
||||
|
||||
def build_keepalived_config(self, loadbalancer, amphora):
|
||||
def build_keepalived_config(self, loadbalancer, amphora, vip_cidr):
|
||||
"""Renders the loadblanacer keepalived configuration for Active/Standby
|
||||
|
||||
:param loadbalancer: A lodabalancer object
|
||||
:param amp: An amphora object
|
||||
:param vip_cidr: The VIP subnet cidr
|
||||
"""
|
||||
# Note on keepalived configuration: The current base configuration
|
||||
# enforced Master election whenever a high priority VRRP instance
|
||||
@ -67,6 +69,22 @@ class KeepalivedJinjaTemplater(object):
|
||||
# several backend services. To disable the fallback behavior, we need
|
||||
# to add the "nopreempt" flag in the backup instance section.
|
||||
peers_ips = []
|
||||
|
||||
# Validate the VIP address and see if it is IPv6
|
||||
vip = loadbalancer.vip.ip_address
|
||||
vip_addr = ipaddress.ip_address(
|
||||
vip if isinstance(vip, six.text_type) else six.u(vip))
|
||||
vip_ipv6 = True if vip_addr.version == 6 else False
|
||||
|
||||
# Normalize and validate the VIP subnet CIDR
|
||||
vip_network_cidr = None
|
||||
vip_cidr = (vip_cidr if isinstance(vip_cidr, six.text_type) else
|
||||
six.u(vip_cidr))
|
||||
if vip_ipv6:
|
||||
vip_network_cidr = ipaddress.IPv6Network(vip_cidr).with_prefixlen
|
||||
else:
|
||||
vip_network_cidr = ipaddress.IPv4Network(vip_cidr).with_prefixlen
|
||||
|
||||
for amp in six.moves.filter(
|
||||
lambda amp: amp.status == constants.AMPHORA_ALLOCATED,
|
||||
loadbalancer.amphorae):
|
||||
@ -86,12 +104,14 @@ class KeepalivedJinjaTemplater(object):
|
||||
'vrrp_auth_pass': loadbalancer.vrrp_group.vrrp_auth_pass,
|
||||
'amp_vrrp_ip': amphora.vrrp_ip,
|
||||
'peers_vrrp_ips': peers_ips,
|
||||
'vip_ip_address': loadbalancer.vip.ip_address,
|
||||
'vip_ip_address': vip,
|
||||
'advert_int': loadbalancer.vrrp_group.advert_int,
|
||||
'check_script_path': util.keepalived_check_script_path(),
|
||||
'vrrp_check_interval':
|
||||
CONF.keepalived_vrrp.vrrp_check_interval,
|
||||
'vrrp_fail_count': CONF.keepalived_vrrp.vrrp_fail_count,
|
||||
'vrrp_success_count':
|
||||
CONF.keepalived_vrrp.vrrp_success_count},
|
||||
CONF.keepalived_vrrp.vrrp_success_count,
|
||||
'vip_network_cidr': vip_network_cidr,
|
||||
'vip_ipv6': vip_ipv6},
|
||||
constants=constants)
|
||||
|
@ -26,6 +26,7 @@ vrrp_instance {{ vrrp_group_name }} {
|
||||
virtual_router_id {{ amp_vrrp_id }}
|
||||
priority {{ amp_priority }}
|
||||
nopreempt
|
||||
accept
|
||||
garp_master_refresh {{ vrrp_garp_refresh }}
|
||||
garp_master_refresh_repeat {{ vrrp_garp_refresh_repeat }}
|
||||
advert_int {{ advert_int }}
|
||||
@ -44,8 +45,16 @@ vrrp_instance {{ vrrp_group_name }} {
|
||||
virtual_ipaddress {
|
||||
{{ vip_ip_address }}
|
||||
}
|
||||
|
||||
virtual_routes {
|
||||
{{ vip_network_cidr }} dev {{ amp_intf }} src {{ vip_ip_address }} scope link table 1
|
||||
}
|
||||
|
||||
virtual_rules {
|
||||
from {{ vip_ip_address }}/{{ '128' if vip_ipv6 else '32' }} table 1 priority 100
|
||||
}
|
||||
|
||||
track_script {
|
||||
check_script
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -30,10 +30,11 @@ class KeepalivedAmphoraDriverMixin(driver_base.VRRPDriverMixin):
|
||||
# The Mixed class must define a self.client object for the
|
||||
# AmphoraApiClient
|
||||
|
||||
def update_vrrp_conf(self, loadbalancer):
|
||||
def update_vrrp_conf(self, loadbalancer, amphorae_network_config):
|
||||
"""Update amphorae of the loadbalancer with a new VRRP configuration
|
||||
|
||||
:param loadbalancer: loadbalancer object
|
||||
:param amphorae_network_config: amphorae network configurations
|
||||
"""
|
||||
templater = jinja_cfg.KeepalivedJinjaTemplater()
|
||||
|
||||
@ -43,8 +44,13 @@ class KeepalivedAmphoraDriverMixin(driver_base.VRRPDriverMixin):
|
||||
for amp in six.moves.filter(
|
||||
lambda amp: amp.status == constants.AMPHORA_ALLOCATED,
|
||||
loadbalancer.amphorae):
|
||||
|
||||
# Get the VIP subnet prefix for the amphora
|
||||
vip_cidr = amphorae_network_config[amp.id].vip_subnet.cidr
|
||||
|
||||
# Generate Keepalived configuration from loadbalancer object
|
||||
config = templater.build_keepalived_config(loadbalancer, amp)
|
||||
config = templater.build_keepalived_config(
|
||||
loadbalancer, amp, vip_cidr)
|
||||
self.client.upload_vrrp_config(amp, config)
|
||||
|
||||
def stop_vrrp_service(self, loadbalancer):
|
||||
|
@ -483,7 +483,8 @@ class AmphoraFlows(object):
|
||||
provides=constants.LOADBALANCER))
|
||||
vrrp_subflow.add(amphora_driver_tasks.AmphoraVRRPUpdate(
|
||||
name=sf_name + '-' + constants.AMP_VRRP_UPDATE,
|
||||
requires=constants.LOADBALANCER))
|
||||
requires=(constants.LOADBALANCER,
|
||||
constants.AMPHORAE_NETWORK_CONFIG)))
|
||||
vrrp_subflow.add(amphora_driver_tasks.AmphoraVRRPStart(
|
||||
name=sf_name + '-' + constants.AMP_VRRP_START,
|
||||
requires=constants.LOADBALANCER))
|
||||
|
@ -330,9 +330,10 @@ class AmphoraUpdateVRRPInterface(BaseAmphoraTask):
|
||||
class AmphoraVRRPUpdate(BaseAmphoraTask):
|
||||
"""Task to update the VRRP configuration of the loadbalancer amphorae."""
|
||||
|
||||
def execute(self, loadbalancer):
|
||||
def execute(self, loadbalancer, amphorae_network_config):
|
||||
"""Execute update_vrrp_conf."""
|
||||
self.amphora_driver.update_vrrp_conf(loadbalancer)
|
||||
self.amphora_driver.update_vrrp_conf(loadbalancer,
|
||||
amphorae_network_config)
|
||||
LOG.debug("Uploaded VRRP configuration of loadbalancer %s amphorae",
|
||||
loadbalancer.id)
|
||||
|
||||
|
@ -45,6 +45,10 @@ class TestServerTestCase(base.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestServerTestCase, self).setUp()
|
||||
self.conf = self.useFixture(oslo_fixture.Config(config.cfg.CONF))
|
||||
self.conf.config(group="haproxy_amphora", base_path='/var/lib/octavia')
|
||||
self.conf.config(group="controller_worker",
|
||||
loadbalancer_topology=consts.TOPOLOGY_SINGLE)
|
||||
with mock.patch('distro.id',
|
||||
return_value='ubuntu'):
|
||||
self.ubuntu_test_server = server.Server()
|
||||
@ -55,9 +59,6 @@ class TestServerTestCase(base.TestCase):
|
||||
self.centos_test_server = server.Server()
|
||||
self.centos_app = self.centos_test_server.app.test_client()
|
||||
|
||||
self.conf = self.useFixture(oslo_fixture.Config(config.cfg.CONF))
|
||||
self.conf.config(group="haproxy_amphora", base_path='/var/lib/octavia')
|
||||
|
||||
@mock.patch('octavia.amphorae.backends.agent.api_server.util.'
|
||||
'get_os_init_system', return_value=consts.INIT_SYSTEMD)
|
||||
def test_ubuntu_haproxy_systemd(self, mock_init_system):
|
||||
@ -1741,19 +1742,18 @@ class TestServerTestCase(base.TestCase):
|
||||
'dev {netns_int}\n'
|
||||
'down route del -host 203.0.115.1/32 gw 203.0.113.5 '
|
||||
'dev {netns_int}\n'
|
||||
'\n'
|
||||
'iface {netns_int}:0 inet static\n'
|
||||
'address 203.0.113.2\n'
|
||||
'broadcast 203.0.113.255\n'
|
||||
'netmask 255.255.255.0\n'
|
||||
'netmask 255.255.255.0\n\n'
|
||||
'# Add a source routing table to allow members to '
|
||||
'access the VIP\n'
|
||||
'post-up /sbin/ip route add 203.0.113.0/24 '
|
||||
'dev eth1 src 203.0.113.2 scope link table 1\n'
|
||||
'access the VIP\n\n'
|
||||
'post-up /sbin/ip route add default via 203.0.113.1 '
|
||||
'dev eth1 onlink table 1\n'
|
||||
'post-down /sbin/ip route del default via 203.0.113.1 '
|
||||
'dev eth1 onlink table 1\n'
|
||||
'dev eth1 onlink table 1\n\n\n'
|
||||
'post-up /sbin/ip route add 203.0.113.0/24 '
|
||||
'dev eth1 src 203.0.113.2 scope link table 1\n'
|
||||
'post-down /sbin/ip route del 203.0.113.0/24 '
|
||||
'dev eth1 src 203.0.113.2 scope link table 1\n'
|
||||
'post-up /sbin/ip route add 203.0.114.0/24 '
|
||||
@ -1763,11 +1763,11 @@ class TestServerTestCase(base.TestCase):
|
||||
'post-up /sbin/ip route add 203.0.115.1/32 '
|
||||
'via 203.0.113.5 dev eth1 onlink table 1\n'
|
||||
'post-down /sbin/ip route del 203.0.115.1/32 '
|
||||
'via 203.0.113.5 dev eth1 onlink table 1\n'
|
||||
'via 203.0.113.5 dev eth1 onlink table 1\n\n\n'
|
||||
'post-up /sbin/ip rule add from 203.0.113.2/32 table 1 '
|
||||
'priority 100\n'
|
||||
'post-down /sbin/ip rule del from 203.0.113.2/32 table 1 '
|
||||
'priority 100\n'
|
||||
'priority 100\n\n'
|
||||
'post-up /sbin/iptables -t nat -A POSTROUTING -p udp '
|
||||
'-o eth1 -j MASQUERADE\n'
|
||||
'post-down /sbin/iptables -t nat -D POSTROUTING -p udp '
|
||||
@ -1860,25 +1860,25 @@ class TestServerTestCase(base.TestCase):
|
||||
handle.write.assert_any_call(
|
||||
'\n# Generated by Octavia agent\n'
|
||||
'auto {netns_int} {netns_int}:0\n\n'
|
||||
'iface {netns_int} inet dhcp\n\n'
|
||||
'iface {netns_int} inet dhcp\n'
|
||||
'iface {netns_int}:0 inet static\n'
|
||||
'address 203.0.113.2\n'
|
||||
'broadcast 203.0.113.255\n'
|
||||
'netmask 255.255.255.0\n'
|
||||
'netmask 255.255.255.0\n\n'
|
||||
'# Add a source routing table to allow members to '
|
||||
'access the VIP\n'
|
||||
'post-up /sbin/ip route add 203.0.113.0/24 '
|
||||
'dev eth1 src 203.0.113.2 scope link table 1\n'
|
||||
'access the VIP\n\n'
|
||||
'post-up /sbin/ip route add default via 203.0.113.1 '
|
||||
'dev eth1 onlink table 1\n'
|
||||
'post-down /sbin/ip route del default via 203.0.113.1 '
|
||||
'dev eth1 onlink table 1\n'
|
||||
'post-down /sbin/ip route del 203.0.113.0/24 '
|
||||
'dev eth1 onlink table 1\n\n\n'
|
||||
'post-up /sbin/ip route add 203.0.113.0/24 '
|
||||
'dev eth1 src 203.0.113.2 scope link table 1\n'
|
||||
'post-down /sbin/ip route del 203.0.113.0/24 '
|
||||
'dev eth1 src 203.0.113.2 scope link table 1\n\n\n'
|
||||
'post-up /sbin/ip rule add from 203.0.113.2/32 table 1 '
|
||||
'priority 100\n'
|
||||
'post-down /sbin/ip rule del from 203.0.113.2/32 table 1 '
|
||||
'priority 100\n'
|
||||
'priority 100\n\n'
|
||||
'post-up /sbin/iptables -t nat -A POSTROUTING -p udp '
|
||||
'-o eth1 -j MASQUERADE\n'
|
||||
'post-down /sbin/iptables -t nat -D POSTROUTING -p udp '
|
||||
@ -2103,20 +2103,19 @@ class TestServerTestCase(base.TestCase):
|
||||
'dev {netns_int}\n'
|
||||
'down route del -host 2001:db9::1/128 gw 2001:db8::5 '
|
||||
'dev {netns_int}\n'
|
||||
'\n'
|
||||
'iface {netns_int}:0 inet6 static\n'
|
||||
'address 2001:0db8:0000:0000:0000:0000:0000:0002\n'
|
||||
'broadcast 2001:0db8:ffff:ffff:ffff:ffff:ffff:ffff\n'
|
||||
'netmask 32\n'
|
||||
'netmask 32\n\n'
|
||||
'# Add a source routing table to allow members to access '
|
||||
'the VIP\n'
|
||||
'post-up /sbin/ip -6 route add 2001:db8::/32 '
|
||||
'dev eth1 src 2001:0db8:0000:0000:0000:0000:0000:0002 '
|
||||
'scope link table 1\n'
|
||||
'the VIP\n\n'
|
||||
'post-up /sbin/ip -6 route add default via 2001:db8::1 '
|
||||
'dev eth1 onlink table 1\n'
|
||||
'post-down /sbin/ip -6 route del default via 2001:db8::1 '
|
||||
'dev eth1 onlink table 1\n'
|
||||
'dev eth1 onlink table 1\n\n\n'
|
||||
'post-up /sbin/ip -6 route add 2001:db8::/32 '
|
||||
'dev eth1 src 2001:0db8:0000:0000:0000:0000:0000:0002 '
|
||||
'scope link table 1\n'
|
||||
'post-down /sbin/ip -6 route del 2001:db8::/32 '
|
||||
'dev eth1 src 2001:0db8:0000:0000:0000:0000:0000:0002 '
|
||||
'scope link table 1\n'
|
||||
@ -2127,13 +2126,13 @@ class TestServerTestCase(base.TestCase):
|
||||
'post-up /sbin/ip -6 route add 2001:db9::1/128 via '
|
||||
'2001:db8::5 dev eth1 onlink table 1\n'
|
||||
'post-down /sbin/ip -6 route del 2001:db9::1/128 '
|
||||
'via 2001:db8::5 dev eth1 onlink table 1\n'
|
||||
'via 2001:db8::5 dev eth1 onlink table 1\n\n\n'
|
||||
'post-up /sbin/ip -6 rule add from '
|
||||
'2001:0db8:0000:0000:0000:0000:0000:0002/32 table 1 '
|
||||
'2001:0db8:0000:0000:0000:0000:0000:0002/128 table 1 '
|
||||
'priority 100\n'
|
||||
'post-down /sbin/ip -6 rule del from '
|
||||
'2001:0db8:0000:0000:0000:0000:0000:0002/32 table 1 '
|
||||
'priority 100\n'
|
||||
'2001:0db8:0000:0000:0000:0000:0000:0002/128 table 1 '
|
||||
'priority 100\n\n'
|
||||
'post-up /sbin/ip6tables -t nat -A POSTROUTING -p udp '
|
||||
'-o eth1 -j MASQUERADE\n'
|
||||
'post-down /sbin/ip6tables -t nat -D POSTROUTING -p udp '
|
||||
@ -2222,29 +2221,29 @@ class TestServerTestCase(base.TestCase):
|
||||
handle.write.assert_any_call(
|
||||
'\n# Generated by Octavia agent\n'
|
||||
'auto {netns_int} {netns_int}:0\n\n'
|
||||
'iface {netns_int} inet6 auto\n\n'
|
||||
'iface {netns_int} inet6 auto\n'
|
||||
'iface {netns_int}:0 inet6 static\n'
|
||||
'address 2001:0db8:0000:0000:0000:0000:0000:0002\n'
|
||||
'broadcast 2001:0db8:ffff:ffff:ffff:ffff:ffff:ffff\n'
|
||||
'netmask 32\n'
|
||||
'netmask 32\n\n'
|
||||
'# Add a source routing table to allow members to access '
|
||||
'the VIP\n'
|
||||
'post-up /sbin/ip -6 route add 2001:db8::/32 '
|
||||
'dev eth1 src 2001:0db8:0000:0000:0000:0000:0000:0002 '
|
||||
'scope link table 1\n'
|
||||
'the VIP\n\n'
|
||||
'post-up /sbin/ip -6 route add default via 2001:db8::1 '
|
||||
'dev eth1 onlink table 1\n'
|
||||
'post-down /sbin/ip -6 route del default via 2001:db8::1 '
|
||||
'dev eth1 onlink table 1\n'
|
||||
'post-down /sbin/ip -6 route del 2001:db8::/32 '
|
||||
'dev eth1 onlink table 1\n\n\n'
|
||||
'post-up /sbin/ip -6 route add 2001:db8::/32 '
|
||||
'dev eth1 src 2001:0db8:0000:0000:0000:0000:0000:0002 '
|
||||
'scope link table 1\n'
|
||||
'post-down /sbin/ip -6 route del 2001:db8::/32 '
|
||||
'dev eth1 src 2001:0db8:0000:0000:0000:0000:0000:0002 '
|
||||
'scope link table 1\n\n\n'
|
||||
'post-up /sbin/ip -6 rule add from '
|
||||
'2001:0db8:0000:0000:0000:0000:0000:0002/32 table 1 '
|
||||
'2001:0db8:0000:0000:0000:0000:0000:0002/128 table 1 '
|
||||
'priority 100\n'
|
||||
'post-down /sbin/ip -6 rule del from '
|
||||
'2001:0db8:0000:0000:0000:0000:0000:0002/32 table 1 '
|
||||
'priority 100\n'
|
||||
'2001:0db8:0000:0000:0000:0000:0000:0002/128 table 1 '
|
||||
'priority 100\n\n'
|
||||
'post-up /sbin/ip6tables -t nat -A POSTROUTING -p udp '
|
||||
'-o eth1 -j MASQUERADE\n'
|
||||
'post-down /sbin/ip6tables -t nat -D POSTROUTING -p udp '
|
||||
|
@ -17,9 +17,12 @@ import subprocess
|
||||
|
||||
import mock
|
||||
import netifaces
|
||||
from oslo_config import cfg
|
||||
from oslo_config import fixture as oslo_fixture
|
||||
|
||||
from octavia.amphorae.backends.agent.api_server import osutils
|
||||
from octavia.amphorae.backends.agent.api_server import plug
|
||||
from octavia.common import constants
|
||||
import octavia.tests.unit.base as base
|
||||
|
||||
FAKE_CIDR_IPV4 = '10.0.0.0/24'
|
||||
@ -111,6 +114,9 @@ class TestPlug(base.TestCase):
|
||||
def test_plug_vip_ipv6(self, mock_makedirs, mock_copytree,
|
||||
mock_check_output, mock_netns, mock_netns_create,
|
||||
mock_pyroute2, mock_webob, mock_nspopen):
|
||||
conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
|
||||
conf.config(group='controller_worker',
|
||||
loadbalancer_topology=constants.TOPOLOGY_ACTIVE_STANDBY)
|
||||
m = mock.mock_open()
|
||||
with mock.patch('os.open'), mock.patch.object(os, 'fdopen', m):
|
||||
self.test_plug.plug_vip(
|
||||
|
@ -80,7 +80,9 @@ class AgentJinjaTestCase(base.TestCase):
|
||||
'/etc/network/interfaces.d/\n'
|
||||
'agent_request_read_timeout = 120\n'
|
||||
'amphora_id = ' + AMP_ID + '\n'
|
||||
'amphora_udp_driver = keepalived_lvs')
|
||||
'amphora_udp_driver = keepalived_lvs\n\n'
|
||||
'[controller_worker]\n'
|
||||
'loadbalancer_topology = SINGLE')
|
||||
agent_cfg = ajc.build_agent_config(AMP_ID)
|
||||
self.assertEqual(expected_config, agent_cfg)
|
||||
|
||||
@ -115,7 +117,9 @@ class AgentJinjaTestCase(base.TestCase):
|
||||
'/etc/network/interfaces\n'
|
||||
'agent_request_read_timeout = 120\n'
|
||||
'amphora_id = ' + AMP_ID + '\n'
|
||||
'amphora_udp_driver = keepalived_lvs')
|
||||
'amphora_udp_driver = keepalived_lvs\n\n'
|
||||
'[controller_worker]\n'
|
||||
'loadbalancer_topology = SINGLE')
|
||||
agent_cfg = ajc.build_agent_config(AMP_ID)
|
||||
self.assertEqual(expected_config, agent_cfg)
|
||||
|
||||
@ -149,6 +153,8 @@ class AgentJinjaTestCase(base.TestCase):
|
||||
'/etc/network/interfaces.d/\n'
|
||||
'agent_request_read_timeout = 120\n'
|
||||
'amphora_id = ' + AMP_ID + '\n'
|
||||
'amphora_udp_driver = new_udp_driver')
|
||||
'amphora_udp_driver = new_udp_driver\n\n'
|
||||
'[controller_worker]\n'
|
||||
'loadbalancer_topology = SINGLE')
|
||||
agent_cfg = ajc.build_agent_config(AMP_ID)
|
||||
self.assertEqual(expected_config, agent_cfg)
|
||||
|
@ -13,6 +13,8 @@
|
||||
# under the License.
|
||||
#
|
||||
|
||||
import copy
|
||||
|
||||
import mock
|
||||
from oslo_config import cfg
|
||||
from oslo_config import fixture as oslo_fixture
|
||||
@ -73,6 +75,7 @@ class TestVRRPRestDriver(base.TestCase):
|
||||
" virtual_router_id 1\n"
|
||||
" priority 100\n"
|
||||
" nopreempt\n"
|
||||
" accept\n"
|
||||
" garp_master_refresh 5\n"
|
||||
" garp_master_refresh_repeat 2\n"
|
||||
" advert_int 10\n"
|
||||
@ -88,12 +91,76 @@ class TestVRRPRestDriver(base.TestCase):
|
||||
"\n"
|
||||
" virtual_ipaddress {\n"
|
||||
" 10.1.0.5\n"
|
||||
" }\n"
|
||||
" }\n\n"
|
||||
" virtual_routes {\n"
|
||||
" 10.1.0.0/24 dev eth1 src 10.1.0.5 scope link "
|
||||
"table 1\n"
|
||||
" }\n\n"
|
||||
" virtual_rules {\n"
|
||||
" from 10.1.0.5/32 table 1 priority 100\n"
|
||||
" }\n\n"
|
||||
" track_script {\n"
|
||||
" check_script\n"
|
||||
" }\n"
|
||||
"}\n")
|
||||
"}")
|
||||
|
||||
self.amphora1v6 = copy.deepcopy(self.amphora1)
|
||||
self.amphora1v6.vrrp_ip = '2001:db8::10'
|
||||
self.amphora2v6 = copy.deepcopy(self.amphora2)
|
||||
self.amphora2v6.vrrp_ip = '2001:db8::11'
|
||||
self.lbv6 = copy.deepcopy(self.lb)
|
||||
self.lbv6.amphorae = [self.amphora1v6, self.amphora2v6]
|
||||
self.lbv6.vip.ip_address = '2001:db8::15'
|
||||
|
||||
self.ref_v6_conf = ("vrrp_script check_script {\n"
|
||||
" script /tmp/test/vrrp/check_script.sh\n"
|
||||
" interval 5\n"
|
||||
" fall 2\n"
|
||||
" rise 2\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"vrrp_instance TESTGROUP {\n"
|
||||
" state MASTER\n"
|
||||
" interface eth1\n"
|
||||
" virtual_router_id 1\n"
|
||||
" priority 100\n"
|
||||
" nopreempt\n"
|
||||
" accept\n"
|
||||
" garp_master_refresh 5\n"
|
||||
" garp_master_refresh_repeat 2\n"
|
||||
" advert_int 10\n"
|
||||
" authentication {\n"
|
||||
" auth_type PASS\n"
|
||||
" auth_pass TESTPASSWORD\n"
|
||||
" }\n"
|
||||
"\n"
|
||||
" unicast_src_ip 2001:db8::10\n"
|
||||
" unicast_peer {\n"
|
||||
" 2001:db8::11\n"
|
||||
" }\n"
|
||||
"\n"
|
||||
" virtual_ipaddress {\n"
|
||||
" 2001:db8::15\n"
|
||||
" }\n\n"
|
||||
" virtual_routes {\n"
|
||||
" 2001:db8::/64 dev eth1 src "
|
||||
"2001:db8::15 scope link table 1\n"
|
||||
" }\n\n"
|
||||
" virtual_rules {\n"
|
||||
" from 2001:db8::15/128 table 1 "
|
||||
"priority 100\n"
|
||||
" }\n\n"
|
||||
" track_script {\n"
|
||||
" check_script\n"
|
||||
" }\n"
|
||||
"}")
|
||||
|
||||
def test_build_keepalived_config(self):
|
||||
config = self.templater.build_keepalived_config(self.lb, self.amphora1)
|
||||
config = self.templater.build_keepalived_config(
|
||||
self.lb, self.amphora1, '10.1.0.0/24')
|
||||
self.assertEqual(self.ref_conf, config)
|
||||
|
||||
def test_build_keepalived_ipv6_config(self):
|
||||
config = self.templater.build_keepalived_config(
|
||||
self.lbv6, self.amphora1v6, '2001:db8::/64')
|
||||
self.assertEqual(self.ref_v6_conf, config)
|
||||
|
@ -14,6 +14,7 @@
|
||||
#
|
||||
|
||||
import mock
|
||||
from oslo_utils import uuidutils
|
||||
|
||||
from octavia.amphorae.drivers.keepalived import vrrp_rest_driver
|
||||
from octavia.common import constants
|
||||
@ -29,8 +30,14 @@ class TestVRRPRestDriver(base.TestCase):
|
||||
self.FAKE_CONFIG = 'FAKE CONFIG'
|
||||
self.lb_mock = mock.MagicMock()
|
||||
self.amphora_mock = mock.MagicMock()
|
||||
self.amphora_mock.id = uuidutils.generate_uuid()
|
||||
self.amphora_mock.status = constants.AMPHORA_ALLOCATED
|
||||
self.lb_mock.amphorae = [self.amphora_mock]
|
||||
self.amphorae_network_config = {}
|
||||
vip_subnet = mock.MagicMock()
|
||||
vip_subnet.cidr = '192.0.2.0/24'
|
||||
self.amphorae_network_config[self.amphora_mock.id] = vip_subnet
|
||||
|
||||
super(TestVRRPRestDriver, self).setUp()
|
||||
|
||||
@mock.patch('octavia.amphorae.drivers.keepalived.jinja.'
|
||||
@ -39,7 +46,8 @@ class TestVRRPRestDriver(base.TestCase):
|
||||
|
||||
mock_templater.return_value = self.FAKE_CONFIG
|
||||
|
||||
self.keepalived_mixin.update_vrrp_conf(self.lb_mock)
|
||||
self.keepalived_mixin.update_vrrp_conf(self.lb_mock,
|
||||
self.amphorae_network_config)
|
||||
|
||||
self.client.upload_vrrp_config.assert_called_once_with(
|
||||
self.amphora_mock,
|
||||
|
@ -355,7 +355,7 @@ class TestAmphoraFlows(base.TestCase):
|
||||
self.assertIn(constants.LOADBALANCER, vrrp_subflow.requires)
|
||||
|
||||
self.assertEqual(1, len(vrrp_subflow.provides))
|
||||
self.assertEqual(1, len(vrrp_subflow.requires))
|
||||
self.assertEqual(2, len(vrrp_subflow.requires))
|
||||
|
||||
def test_get_post_map_lb_subflow(self, mock_get_net_driver):
|
||||
|
||||
|
@ -564,10 +564,12 @@ class TestAmphoraDriverTasks(base.TestCase):
|
||||
mock_listener_repo_get,
|
||||
mock_listener_repo_update,
|
||||
mock_amphora_repo_update):
|
||||
amphorae_network_config = mock.MagicMock()
|
||||
amphora_vrrp_update_obj = (
|
||||
amphora_driver_tasks.AmphoraVRRPUpdate())
|
||||
amphora_vrrp_update_obj.execute(_LB_mock)
|
||||
mock_driver.update_vrrp_conf.assert_called_once_with(_LB_mock)
|
||||
amphora_vrrp_update_obj.execute(_LB_mock, amphorae_network_config)
|
||||
mock_driver.update_vrrp_conf.assert_called_once_with(
|
||||
_LB_mock, amphorae_network_config)
|
||||
|
||||
def test_amphora_vrrp_stop(self,
|
||||
mock_driver,
|
||||
|
@ -0,0 +1,10 @@
|
||||
---
|
||||
upgrade:
|
||||
- |
|
||||
To resolve the IPv6 VIP issues on active/standby load balancers you
|
||||
need to build a new amphora image.
|
||||
fixes:
|
||||
- |
|
||||
Fixes issues using IPv6 VIP addresses with load balancers configured for
|
||||
active/standby topology. This fix requires a new amphora image to be
|
||||
built.
|
Loading…
Reference in New Issue
Block a user