Refactor the failover flows
This patch refactors the failover flows to improve the performance and reliability of failovers in Octavia. Specific improvements are: * More tasks and flows will retry when other OpenStack services are failing. * Failover can now succeed even when all of the amphora are missing for a given load balancer. * It will check and repair the load balancer VIP should the VIP port(s) become corrupted in neutron. * It will cleanup extra resources that may be associated with a load balancer in the event of a cloud service failure. This patch also removes some dead code. Change-Id: I04cb2f1f10ec566298834f81df0cf8b100ca916c Story: 2003084 Task: 23166 Story: 2004440 Task: 28108
This commit is contained in:
parent
f26ab8b97b
commit
955bb88406
@ -165,6 +165,19 @@
|
|||||||
# Endpoint type to use for communication with the Barbican service.
|
# Endpoint type to use for communication with the Barbican service.
|
||||||
# endpoint_type = publicURL
|
# endpoint_type = publicURL
|
||||||
|
|
||||||
|
[compute]
|
||||||
|
# The maximum attempts to retry an action with the compute service.
|
||||||
|
# max_retries = 15
|
||||||
|
|
||||||
|
# Seconds to wait before retrying an action with the compute service.
|
||||||
|
# retry_interval = 1
|
||||||
|
|
||||||
|
# The seconds to backoff retry attempts
|
||||||
|
# retry_backoff = 1
|
||||||
|
|
||||||
|
# The maximum interval in seconds between retry attempts
|
||||||
|
# retry_max = 10
|
||||||
|
|
||||||
[networking]
|
[networking]
|
||||||
# The maximum attempts to retry an action with the networking service.
|
# The maximum attempts to retry an action with the networking service.
|
||||||
# max_retries = 15
|
# max_retries = 15
|
||||||
@ -172,6 +185,12 @@
|
|||||||
# Seconds to wait before retrying an action with the networking service.
|
# Seconds to wait before retrying an action with the networking service.
|
||||||
# retry_interval = 1
|
# retry_interval = 1
|
||||||
|
|
||||||
|
# The seconds to backoff retry attempts
|
||||||
|
# retry_backoff = 1
|
||||||
|
|
||||||
|
# The maximum interval in seconds between retry attempts
|
||||||
|
# retry_max = 10
|
||||||
|
|
||||||
# The maximum time to wait, in seconds, for a port to detach from an amphora
|
# The maximum time to wait, in seconds, for a port to detach from an amphora
|
||||||
# port_detach_timeout = 300
|
# port_detach_timeout = 300
|
||||||
|
|
||||||
@ -236,11 +255,26 @@
|
|||||||
# active_connection_max_retries = 15
|
# active_connection_max_retries = 15
|
||||||
# active_connection_rety_interval = 2
|
# active_connection_rety_interval = 2
|
||||||
|
|
||||||
|
# These "failover" timeouts are used during the failover process to probe
|
||||||
|
# amphorae that are part of the load balancer being failed over.
|
||||||
|
# These values are very low to facilitate "fail fast" should an amphora
|
||||||
|
# not respond in a failure situation.
|
||||||
|
# failover_connection_max_retries = 2
|
||||||
|
# failover_connection_retry_interval = 5
|
||||||
|
|
||||||
# The user flow log format for HAProxy.
|
# The user flow log format for HAProxy.
|
||||||
# {{ project_id }} and {{ lb_id }} will be automatically substituted by the
|
# {{ project_id }} and {{ lb_id }} will be automatically substituted by the
|
||||||
# controller when configuring HAProxy if they are present in the string.
|
# controller when configuring HAProxy if they are present in the string.
|
||||||
# user_log_format = '{{ project_id }} {{ lb_id }} %f %ci %cp %t %{+Q}r %ST %B %U %[ssl_c_verify] %{+Q}[ssl_c_s_dn] %b %s %Tt %tsc'
|
# user_log_format = '{{ project_id }} {{ lb_id }} %f %ci %cp %t %{+Q}r %ST %B %U %[ssl_c_verify] %{+Q}[ssl_c_s_dn] %b %s %Tt %tsc'
|
||||||
|
|
||||||
|
# API messaging / database commit retries
|
||||||
|
# This is many times the controller worker retries waiting for the API to
|
||||||
|
# complete a database commit for a message received over the queue.
|
||||||
|
# api_db_commit_retry_attempts = 15
|
||||||
|
# api_db_commit_retry_initial_delay = 1
|
||||||
|
# api_db_commit_retry_backoff = 1
|
||||||
|
# api_db_commit_retry_max = 5
|
||||||
|
|
||||||
[controller_worker]
|
[controller_worker]
|
||||||
# workers = 1
|
# workers = 1
|
||||||
# amp_active_retries = 30
|
# amp_active_retries = 30
|
||||||
@ -297,6 +331,9 @@
|
|||||||
# loadbalancer_topology = SINGLE
|
# loadbalancer_topology = SINGLE
|
||||||
# user_data_config_drive = False
|
# user_data_config_drive = False
|
||||||
|
|
||||||
|
# amphora_delete_retries = 5
|
||||||
|
# amphora_delete_retry_interval = 5
|
||||||
|
|
||||||
[task_flow]
|
[task_flow]
|
||||||
# TaskFlow engine options are:
|
# TaskFlow engine options are:
|
||||||
# - serial: Runs all tasks on a single thread.
|
# - serial: Runs all tasks on a single thread.
|
||||||
|
@ -12,7 +12,6 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
import ipaddress
|
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
import socket
|
import socket
|
||||||
@ -21,10 +20,11 @@ import subprocess
|
|||||||
import pyroute2
|
import pyroute2
|
||||||
import webob
|
import webob
|
||||||
|
|
||||||
import netifaces
|
|
||||||
from octavia.amphorae.backends.agent import api_server
|
from octavia.amphorae.backends.agent import api_server
|
||||||
from octavia.amphorae.backends.agent.api_server import util
|
from octavia.amphorae.backends.agent.api_server import util
|
||||||
|
from octavia.amphorae.backends.utils import network_utils
|
||||||
from octavia.common import constants as consts
|
from octavia.common import constants as consts
|
||||||
|
from octavia.common import exceptions
|
||||||
|
|
||||||
|
|
||||||
class AmphoraInfo(object):
|
class AmphoraInfo(object):
|
||||||
@ -175,65 +175,15 @@ class AmphoraInfo(object):
|
|||||||
return networks
|
return networks
|
||||||
|
|
||||||
def get_interface(self, ip_addr):
|
def get_interface(self, ip_addr):
|
||||||
|
|
||||||
try:
|
try:
|
||||||
ip_version = ipaddress.ip_address(ip_addr).version
|
interface = network_utils.get_interface_name(
|
||||||
except Exception:
|
ip_addr, net_ns=consts.AMPHORA_NAMESPACE)
|
||||||
|
except exceptions.InvalidIPAddress:
|
||||||
|
return webob.Response(json=dict(message="Invalid IP address"),
|
||||||
|
status=400)
|
||||||
|
except exceptions.NotFound:
|
||||||
return webob.Response(
|
return webob.Response(
|
||||||
json=dict(message="Invalid IP address"), status=400)
|
json=dict(message="Error interface not found for IP address"),
|
||||||
|
status=404)
|
||||||
if ip_version == 4:
|
return webob.Response(json=dict(message='OK', interface=interface),
|
||||||
address_format = netifaces.AF_INET
|
status=200)
|
||||||
elif ip_version == 6:
|
|
||||||
address_format = netifaces.AF_INET6
|
|
||||||
else:
|
|
||||||
return webob.Response(
|
|
||||||
json=dict(message="Bad IP address version"), status=400)
|
|
||||||
|
|
||||||
# We need to normalize the address as IPv6 has multiple representations
|
|
||||||
# fe80:0000:0000:0000:f816:3eff:fef2:2058 == fe80::f816:3eff:fef2:2058
|
|
||||||
normalized_addr = socket.inet_ntop(address_format,
|
|
||||||
socket.inet_pton(address_format,
|
|
||||||
ip_addr))
|
|
||||||
|
|
||||||
with pyroute2.NetNS(consts.AMPHORA_NAMESPACE) as netns:
|
|
||||||
for addr in netns.get_addr():
|
|
||||||
# Save the interface index as IPv6 records don't list a
|
|
||||||
# textual interface
|
|
||||||
interface_idx = addr['index']
|
|
||||||
# Save the address family (IPv4/IPv6) for use normalizing
|
|
||||||
# the IP address for comparison
|
|
||||||
interface_af = addr['family']
|
|
||||||
# Search through the attributes of each address record
|
|
||||||
for attr in addr['attrs']:
|
|
||||||
# Look for the attribute name/value pair for the address
|
|
||||||
if attr[0] == 'IFA_ADDRESS':
|
|
||||||
# Compare the normalized address with the address we
|
|
||||||
# we are looking for. Since we have matched the name
|
|
||||||
# above, attr[1] is the address value
|
|
||||||
if normalized_addr == socket.inet_ntop(
|
|
||||||
interface_af,
|
|
||||||
socket.inet_pton(interface_af, attr[1])):
|
|
||||||
|
|
||||||
# Lookup the matching interface name by
|
|
||||||
# getting the interface with the index we found
|
|
||||||
# in the above address search
|
|
||||||
lookup_int = netns.get_links(interface_idx)
|
|
||||||
# Search through the attributes of the matching
|
|
||||||
# interface record
|
|
||||||
for int_attr in lookup_int[0]['attrs']:
|
|
||||||
# Look for the attribute name/value pair
|
|
||||||
# that includes the interface name
|
|
||||||
if int_attr[0] == 'IFLA_IFNAME':
|
|
||||||
# Return the response with the matching
|
|
||||||
# interface name that is in int_attr[1]
|
|
||||||
# for the matching interface attribute
|
|
||||||
# name
|
|
||||||
return webob.Response(
|
|
||||||
json=dict(message='OK',
|
|
||||||
interface=int_attr[1]),
|
|
||||||
status=200)
|
|
||||||
|
|
||||||
return webob.Response(
|
|
||||||
json=dict(message="Error interface not found for IP address"),
|
|
||||||
status=404)
|
|
||||||
|
@ -47,6 +47,7 @@ class Keepalived(object):
|
|||||||
|
|
||||||
if not os.path.exists(util.keepalived_dir()):
|
if not os.path.exists(util.keepalived_dir()):
|
||||||
os.makedirs(util.keepalived_dir())
|
os.makedirs(util.keepalived_dir())
|
||||||
|
if not os.path.exists(util.keepalived_check_scripts_dir()):
|
||||||
os.makedirs(util.keepalived_check_scripts_dir())
|
os.makedirs(util.keepalived_check_scripts_dir())
|
||||||
|
|
||||||
conf_file = util.keepalived_cfg_path()
|
conf_file = util.keepalived_cfg_path()
|
||||||
@ -112,6 +113,9 @@ class Keepalived(object):
|
|||||||
)
|
)
|
||||||
text_file.write(text)
|
text_file.write(text)
|
||||||
|
|
||||||
|
# Configure the monitoring of haproxy
|
||||||
|
util.vrrp_check_script_update(None, consts.AMP_ACTION_START)
|
||||||
|
|
||||||
# Make sure the new service is enabled on boot
|
# Make sure the new service is enabled on boot
|
||||||
if init_system != consts.INIT_UPSTART:
|
if init_system != consts.INIT_UPSTART:
|
||||||
try:
|
try:
|
||||||
|
@ -78,7 +78,8 @@ class KeepalivedLvs(udp_listener_base.UdpListenerApiServerBase):
|
|||||||
# Active-Standby topology will create the directory below. So for
|
# Active-Standby topology will create the directory below. So for
|
||||||
# Single topology, it should not create the directory and the check
|
# Single topology, it should not create the directory and the check
|
||||||
# scripts for status change.
|
# scripts for status change.
|
||||||
if not os.path.exists(util.keepalived_check_scripts_dir()):
|
if (CONF.controller_worker.loadbalancer_topology !=
|
||||||
|
consts.TOPOLOGY_ACTIVE_STANDBY):
|
||||||
NEED_CHECK = False
|
NEED_CHECK = False
|
||||||
|
|
||||||
conf_file = util.keepalived_lvs_cfg_path(listener_id)
|
conf_file = util.keepalived_lvs_cfg_path(listener_id)
|
||||||
@ -157,6 +158,9 @@ class KeepalivedLvs(udp_listener_base.UdpListenerApiServerBase):
|
|||||||
script_path = os.path.join(util.keepalived_check_scripts_dir(),
|
script_path = os.path.join(util.keepalived_check_scripts_dir(),
|
||||||
KEEPALIVED_CHECK_SCRIPT_NAME)
|
KEEPALIVED_CHECK_SCRIPT_NAME)
|
||||||
if not os.path.exists(script_path):
|
if not os.path.exists(script_path):
|
||||||
|
if not os.path.exists(util.keepalived_check_scripts_dir()):
|
||||||
|
os.makedirs(util.keepalived_check_scripts_dir())
|
||||||
|
|
||||||
with os.fdopen(os.open(script_path, flags, stat.S_IEXEC),
|
with os.fdopen(os.open(script_path, flags, stat.S_IEXEC),
|
||||||
'w') as script_file:
|
'w') as script_file:
|
||||||
text = check_script_file_template.render(
|
text = check_script_file_template.render(
|
||||||
|
@ -235,12 +235,11 @@ class Loadbalancer(object):
|
|||||||
details="Unknown action: {0}".format(action)), status=400)
|
details="Unknown action: {0}".format(action)), status=400)
|
||||||
|
|
||||||
self._check_lb_exists(lb_id)
|
self._check_lb_exists(lb_id)
|
||||||
|
is_vrrp = (CONF.controller_worker.loadbalancer_topology ==
|
||||||
|
consts.TOPOLOGY_ACTIVE_STANDBY)
|
||||||
|
|
||||||
# Since this script should be created at LB create time
|
if is_vrrp:
|
||||||
# we can check for this path to see if VRRP is enabled
|
util.vrrp_check_script_update(lb_id, action)
|
||||||
# on this amphora and not write the file if VRRP is not in use
|
|
||||||
if os.path.exists(util.keepalived_check_script_path()):
|
|
||||||
self.vrrp_check_script_update(lb_id, action)
|
|
||||||
|
|
||||||
# HAProxy does not start the process when given a reload
|
# HAProxy does not start the process when given a reload
|
||||||
# so start it if haproxy is not already running
|
# so start it if haproxy is not already running
|
||||||
@ -262,6 +261,14 @@ class Loadbalancer(object):
|
|||||||
return webob.Response(json=dict(
|
return webob.Response(json=dict(
|
||||||
message="Error {0}ing haproxy".format(action),
|
message="Error {0}ing haproxy".format(action),
|
||||||
details=e.output), status=500)
|
details=e.output), status=500)
|
||||||
|
|
||||||
|
# If we are not in active/standby we need to send an IP
|
||||||
|
# advertisement (GARP or NA). Keepalived handles this for
|
||||||
|
# active/standby load balancers.
|
||||||
|
if not is_vrrp and action in [consts.AMP_ACTION_START,
|
||||||
|
consts.AMP_ACTION_RELOAD]:
|
||||||
|
util.send_vip_advertisements(lb_id)
|
||||||
|
|
||||||
if action in [consts.AMP_ACTION_STOP,
|
if action in [consts.AMP_ACTION_STOP,
|
||||||
consts.AMP_ACTION_RELOAD]:
|
consts.AMP_ACTION_RELOAD]:
|
||||||
return webob.Response(json=dict(
|
return webob.Response(json=dict(
|
||||||
@ -307,7 +314,7 @@ class Loadbalancer(object):
|
|||||||
# we can check for this path to see if VRRP is enabled
|
# we can check for this path to see if VRRP is enabled
|
||||||
# on this amphora and not write the file if VRRP is not in use
|
# on this amphora and not write the file if VRRP is not in use
|
||||||
if os.path.exists(util.keepalived_check_script_path()):
|
if os.path.exists(util.keepalived_check_script_path()):
|
||||||
self.vrrp_check_script_update(
|
util.vrrp_check_script_update(
|
||||||
lb_id, action=consts.AMP_ACTION_STOP)
|
lb_id, action=consts.AMP_ACTION_STOP)
|
||||||
|
|
||||||
# delete the ssl files
|
# delete the ssl files
|
||||||
@ -455,22 +462,6 @@ class Loadbalancer(object):
|
|||||||
def _cert_file_path(self, lb_id, filename):
|
def _cert_file_path(self, lb_id, filename):
|
||||||
return os.path.join(self._cert_dir(lb_id), filename)
|
return os.path.join(self._cert_dir(lb_id), filename)
|
||||||
|
|
||||||
def vrrp_check_script_update(self, lb_id, action):
|
|
||||||
lb_ids = util.get_loadbalancers()
|
|
||||||
if action == consts.AMP_ACTION_STOP:
|
|
||||||
lb_ids.remove(lb_id)
|
|
||||||
args = []
|
|
||||||
for lbid in lb_ids:
|
|
||||||
args.append(util.haproxy_sock_path(lbid))
|
|
||||||
|
|
||||||
if not os.path.exists(util.keepalived_dir()):
|
|
||||||
os.makedirs(util.keepalived_dir())
|
|
||||||
os.makedirs(util.keepalived_check_scripts_dir())
|
|
||||||
|
|
||||||
cmd = 'haproxy-vrrp-check {args}; exit $?'.format(args=' '.join(args))
|
|
||||||
with open(util.haproxy_check_script_path(), 'w') as text_file:
|
|
||||||
text_file.write(cmd)
|
|
||||||
|
|
||||||
def _check_haproxy_status(self, lb_id):
|
def _check_haproxy_status(self, lb_id):
|
||||||
if os.path.exists(util.pid_path(lb_id)):
|
if os.path.exists(util.pid_path(lb_id)):
|
||||||
if os.path.exists(
|
if os.path.exists(
|
||||||
|
@ -23,6 +23,8 @@ from oslo_config import cfg
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from octavia.amphorae.backends.agent.api_server import osutils
|
from octavia.amphorae.backends.agent.api_server import osutils
|
||||||
|
from octavia.amphorae.backends.utils import ip_advertisement
|
||||||
|
from octavia.amphorae.backends.utils import network_utils
|
||||||
from octavia.common import constants as consts
|
from octavia.common import constants as consts
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
@ -188,7 +190,7 @@ def get_listeners():
|
|||||||
def get_loadbalancers():
|
def get_loadbalancers():
|
||||||
"""Get Load balancers
|
"""Get Load balancers
|
||||||
|
|
||||||
:returns: An array with the ids of all load balancers,
|
:returns: An array with the uuids of all load balancers,
|
||||||
e.g. ['123', '456', ...] or [] if no loadbalancers exist
|
e.g. ['123', '456', ...] or [] if no loadbalancers exist
|
||||||
"""
|
"""
|
||||||
if os.path.exists(CONF.haproxy_amphora.base_path):
|
if os.path.exists(CONF.haproxy_amphora.base_path):
|
||||||
@ -332,3 +334,71 @@ def parse_haproxy_file(lb_id):
|
|||||||
stats_socket = m.group(1)
|
stats_socket = m.group(1)
|
||||||
|
|
||||||
return stats_socket, listeners
|
return stats_socket, listeners
|
||||||
|
|
||||||
|
|
||||||
|
def vrrp_check_script_update(lb_id, action):
|
||||||
|
os.makedirs(keepalived_dir(), exist_ok=True)
|
||||||
|
os.makedirs(keepalived_check_scripts_dir(), exist_ok=True)
|
||||||
|
|
||||||
|
lb_ids = get_loadbalancers()
|
||||||
|
udp_ids = get_udp_listeners()
|
||||||
|
# If no LBs are found, so make sure keepalived thinks haproxy is down.
|
||||||
|
if not lb_ids:
|
||||||
|
if not udp_ids:
|
||||||
|
with open(haproxy_check_script_path(), 'w') as text_file:
|
||||||
|
text_file.write('exit 1')
|
||||||
|
return
|
||||||
|
if action == consts.AMP_ACTION_STOP:
|
||||||
|
lb_ids.remove(lb_id)
|
||||||
|
args = []
|
||||||
|
for lbid in lb_ids:
|
||||||
|
args.append(haproxy_sock_path(lbid))
|
||||||
|
|
||||||
|
cmd = 'haproxy-vrrp-check {args}; exit $?'.format(args=' '.join(args))
|
||||||
|
with open(haproxy_check_script_path(), 'w') as text_file:
|
||||||
|
text_file.write(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def get_haproxy_vip_addresses(lb_id):
|
||||||
|
"""Get the VIP addresses for a load balancer.
|
||||||
|
|
||||||
|
:param lb_id: The load balancer ID to get VIP addresses from.
|
||||||
|
:returns: List of VIP addresses (IPv4 and IPv6)
|
||||||
|
"""
|
||||||
|
vips = []
|
||||||
|
with open(config_path(lb_id), 'r') as file:
|
||||||
|
for line in file:
|
||||||
|
current_line = line.strip()
|
||||||
|
if current_line.startswith('bind'):
|
||||||
|
for section in current_line.split(' '):
|
||||||
|
# We will always have a port assigned per the template.
|
||||||
|
if ':' in section:
|
||||||
|
if ',' in section:
|
||||||
|
addr_port = section.rstrip(',')
|
||||||
|
vips.append(addr_port.rpartition(':')[0])
|
||||||
|
else:
|
||||||
|
vips.append(section.rpartition(':')[0])
|
||||||
|
break
|
||||||
|
return vips
|
||||||
|
|
||||||
|
|
||||||
|
def send_vip_advertisements(lb_id):
|
||||||
|
"""Sends address advertisements for each load balancer VIP.
|
||||||
|
|
||||||
|
This method will send either GARP (IPv4) or neighbor advertisements (IPv6)
|
||||||
|
for the VIP addresses on a load balancer.
|
||||||
|
|
||||||
|
:param lb_id: The load balancer ID to send advertisements for.
|
||||||
|
:returns: None
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
vips = get_haproxy_vip_addresses(lb_id)
|
||||||
|
|
||||||
|
for vip in vips:
|
||||||
|
interface = network_utils.get_interface_name(
|
||||||
|
vip, net_ns=consts.AMPHORA_NAMESPACE)
|
||||||
|
ip_advertisement.send_ip_advertisement(
|
||||||
|
interface, vip, net_ns=consts.AMPHORA_NAMESPACE)
|
||||||
|
except Exception as e:
|
||||||
|
LOG.debug('Send VIP advertisement failed due to :%s. '
|
||||||
|
'This amphora may not be the MASTER. Ignoring.', str(e))
|
||||||
|
183
octavia/amphorae/backends/utils/ip_advertisement.py
Normal file
183
octavia/amphorae/backends/utils/ip_advertisement.py
Normal file
@ -0,0 +1,183 @@
|
|||||||
|
# Copyright 2020 Red Hat, Inc. All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
import fcntl
|
||||||
|
import socket
|
||||||
|
from struct import pack
|
||||||
|
from struct import unpack
|
||||||
|
|
||||||
|
from oslo_log import log as logging
|
||||||
|
|
||||||
|
from octavia.amphorae.backends.utils import network_namespace
|
||||||
|
from octavia.common import constants
|
||||||
|
from octavia.common import utils as common_utils
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def garp(interface, ip_address, net_ns=None):
|
||||||
|
"""Sends a gratuitous ARP for ip_address on the interface.
|
||||||
|
|
||||||
|
:param interface: The interface name to send the GARP on.
|
||||||
|
:param ip_address: The IP address to advertise in the GARP.
|
||||||
|
:param net_ns: The network namespace to send the GARP from.
|
||||||
|
:returns: None
|
||||||
|
"""
|
||||||
|
ARP_ETHERTYPE = 0x0806
|
||||||
|
BROADCAST_MAC = b'\xff\xff\xff\xff\xff\xff'
|
||||||
|
|
||||||
|
# Get a socket, optionally inside a network namespace
|
||||||
|
garp_socket = None
|
||||||
|
if net_ns:
|
||||||
|
with network_namespace.NetworkNamespace(net_ns):
|
||||||
|
garp_socket = socket.socket(socket.AF_PACKET, socket.SOCK_RAW)
|
||||||
|
else:
|
||||||
|
garp_socket = socket.socket(socket.AF_PACKET, socket.SOCK_RAW)
|
||||||
|
|
||||||
|
# Bind the socket with the ARP ethertype protocol
|
||||||
|
garp_socket.bind((interface, ARP_ETHERTYPE))
|
||||||
|
|
||||||
|
# Get the MAC address of the interface
|
||||||
|
source_mac = garp_socket.getsockname()[4]
|
||||||
|
|
||||||
|
garp_msg = [
|
||||||
|
pack('!h', 1), # Hardware type ethernet
|
||||||
|
pack('!h', 0x0800), # Protocol type IPv4
|
||||||
|
pack('!B', 6), # Hardware size
|
||||||
|
pack('!B', 4), # Protocol size
|
||||||
|
pack('!h', 1), # Opcode request
|
||||||
|
source_mac, # Sender MAC address
|
||||||
|
socket.inet_aton(ip_address), # Sender IP address
|
||||||
|
BROADCAST_MAC, # Target MAC address
|
||||||
|
socket.inet_aton(ip_address)] # Target IP address
|
||||||
|
|
||||||
|
garp_ethernet = [
|
||||||
|
BROADCAST_MAC, # Ethernet destination
|
||||||
|
source_mac, # Ethernet source
|
||||||
|
pack('!h', ARP_ETHERTYPE), # Ethernet type
|
||||||
|
b''.join(garp_msg)] # The GARP message
|
||||||
|
|
||||||
|
garp_socket.send(b''.join(garp_ethernet))
|
||||||
|
garp_socket.close()
|
||||||
|
|
||||||
|
|
||||||
|
def calculate_icmpv6_checksum(packet):
|
||||||
|
"""Calculate the ICMPv6 checksum for a packet.
|
||||||
|
|
||||||
|
:param packet: The packet bytes to checksum.
|
||||||
|
:returns: The checksum integer.
|
||||||
|
"""
|
||||||
|
total = 0
|
||||||
|
|
||||||
|
# Add up 16-bit words
|
||||||
|
num_words = len(packet) // 2
|
||||||
|
for chunk in unpack("!%sH" % num_words, packet[0:num_words * 2]):
|
||||||
|
total += chunk
|
||||||
|
|
||||||
|
# Add any left over byte
|
||||||
|
if len(packet) % 2:
|
||||||
|
total += packet[-1] << 8
|
||||||
|
|
||||||
|
# Fold 32-bits into 16-bits
|
||||||
|
total = (total >> 16) + (total & 0xffff)
|
||||||
|
total += total >> 16
|
||||||
|
return ~total + 0x10000 & 0xffff
|
||||||
|
|
||||||
|
|
||||||
|
def neighbor_advertisement(interface, ip_address, net_ns=None):
|
||||||
|
"""Sends a unsolicited neighbor advertisement for an ip on the interface.
|
||||||
|
|
||||||
|
:param interface: The interface name to send the GARP on.
|
||||||
|
:param ip_address: The IP address to advertise in the GARP.
|
||||||
|
:param net_ns: The network namespace to send the GARP from.
|
||||||
|
:returns: None
|
||||||
|
"""
|
||||||
|
ALL_NODES_ADDR = 'ff02::1'
|
||||||
|
SIOCGIFHWADDR = 0x8927
|
||||||
|
|
||||||
|
# Get a socket, optionally inside a network namespace
|
||||||
|
na_socket = None
|
||||||
|
if net_ns:
|
||||||
|
with network_namespace.NetworkNamespace(net_ns):
|
||||||
|
na_socket = socket.socket(
|
||||||
|
socket.AF_INET6, socket.SOCK_RAW,
|
||||||
|
socket.getprotobyname(constants.IPV6_ICMP))
|
||||||
|
else:
|
||||||
|
na_socket = socket.socket(socket.AF_INET6, socket.SOCK_RAW,
|
||||||
|
socket.getprotobyname(constants.IPV6_ICMP))
|
||||||
|
|
||||||
|
# Per RFC 4861 section 4.4, the hop limit should be 255
|
||||||
|
na_socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_HOPS, 255)
|
||||||
|
|
||||||
|
# Bind the socket with the source address
|
||||||
|
na_socket.bind((ip_address, 0))
|
||||||
|
|
||||||
|
# Get the byte representation of the MAC address of the interface
|
||||||
|
# Note: You can't use getsockname() to get the MAC on this type of socket
|
||||||
|
source_mac = fcntl.ioctl(na_socket.fileno(), SIOCGIFHWADDR, pack('256s',
|
||||||
|
bytes(interface, 'utf-8')))[18:24]
|
||||||
|
|
||||||
|
# Get the byte representation of the source IP address
|
||||||
|
source_ip_bytes = socket.inet_pton(socket.AF_INET6, ip_address)
|
||||||
|
|
||||||
|
icmpv6_na_msg_prefix = [
|
||||||
|
pack('!B', 136), # ICMP Type Neighbor Advertisement
|
||||||
|
pack('!B', 0)] # ICMP Code
|
||||||
|
icmpv6_na_msg_postfix = [
|
||||||
|
pack('!I', 0xa0000000), # Flags (Router, Override)
|
||||||
|
source_ip_bytes, # Target address
|
||||||
|
pack('!B', 2), # ICMPv6 option type target link-layer address
|
||||||
|
pack('!B', 1), # ICMPv6 option length
|
||||||
|
source_mac] # ICMPv6 option link-layer address
|
||||||
|
|
||||||
|
# Calculate the ICMPv6 checksum
|
||||||
|
icmpv6_pseudo_header = [
|
||||||
|
source_ip_bytes, # Source IP address
|
||||||
|
socket.inet_pton(socket.AF_INET6, ALL_NODES_ADDR), # Destination IP
|
||||||
|
pack('!I', 58), # IPv6 next header (ICMPv6)
|
||||||
|
pack('!h', 32)] # IPv6 payload length
|
||||||
|
icmpv6_tmp_chksum = pack('!H', 0) # Checksum are zeros for calculation
|
||||||
|
tmp_chksum_msg = b''.join(icmpv6_pseudo_header + icmpv6_na_msg_prefix +
|
||||||
|
[icmpv6_tmp_chksum] + icmpv6_pseudo_header)
|
||||||
|
checksum = pack('!H', calculate_icmpv6_checksum(tmp_chksum_msg))
|
||||||
|
|
||||||
|
# Build the ICMPv6 unsolicitated neighbor advertisement
|
||||||
|
icmpv6_msg = b''.join(icmpv6_na_msg_prefix + [checksum] +
|
||||||
|
icmpv6_na_msg_postfix)
|
||||||
|
|
||||||
|
na_socket.sendto(icmpv6_msg, (ALL_NODES_ADDR, 0, 0, 0))
|
||||||
|
na_socket.close()
|
||||||
|
|
||||||
|
|
||||||
|
def send_ip_advertisement(interface, ip_address, net_ns=None):
|
||||||
|
"""Send an address advertisement.
|
||||||
|
|
||||||
|
This method will send either GARP (IPv4) or neighbor advertisements (IPv6)
|
||||||
|
for the ip address specified.
|
||||||
|
|
||||||
|
:param interface: The interface name to send the advertisement on.
|
||||||
|
:param ip_address: The IP address to advertise.
|
||||||
|
:param net_ns: The network namespace to send the advertisement from.
|
||||||
|
:returns: None
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
if common_utils.is_ipv4(ip_address):
|
||||||
|
garp(interface, ip_address, net_ns)
|
||||||
|
elif common_utils.is_ipv6(ip_address):
|
||||||
|
neighbor_advertisement(interface, ip_address, net_ns)
|
||||||
|
else:
|
||||||
|
LOG.error('Unknown IP version for address: "%s". Skipping',
|
||||||
|
ip_address)
|
||||||
|
except Exception as e:
|
||||||
|
LOG.warning('Unable to send address advertisement for address: "%s", '
|
||||||
|
'error: %s. Skipping', ip_address, str(e))
|
50
octavia/amphorae/backends/utils/network_namespace.py
Normal file
50
octavia/amphorae/backends/utils/network_namespace.py
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
# Copyright 2020 Red Hat, Inc. All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
import ctypes
|
||||||
|
import os
|
||||||
|
|
||||||
|
|
||||||
|
class NetworkNamespace(object):
|
||||||
|
"""A network namespace context manager.
|
||||||
|
|
||||||
|
Runs wrapped code inside the specified network namespace.
|
||||||
|
|
||||||
|
:param netns: The network namespace name to enter.
|
||||||
|
"""
|
||||||
|
# from linux/sched.h - We want to enter a network namespace
|
||||||
|
CLONE_NEWNET = 0x40000000
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _error_handler(result, func, arguments):
|
||||||
|
if result == -1:
|
||||||
|
errno = ctypes.get_errno()
|
||||||
|
raise OSError(errno, os.strerror(errno))
|
||||||
|
|
||||||
|
def __init__(self, netns):
|
||||||
|
self.current_netns = '/proc/{pid}/ns/net'.format(pid=os.getpid())
|
||||||
|
self.target_netns = '/var/run/netns/{netns}'.format(netns=netns)
|
||||||
|
# reference: man setns(2)
|
||||||
|
self.set_netns = ctypes.CDLL('libc.so.6', use_errno=True).setns
|
||||||
|
self.set_netns.errcheck = self._error_handler
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
# Save the current network namespace
|
||||||
|
self.current_netns_fd = open(self.current_netns)
|
||||||
|
with open(self.target_netns) as fd:
|
||||||
|
self.set_netns(fd.fileno(), self.CLONE_NEWNET)
|
||||||
|
|
||||||
|
def __exit__(self, *args):
|
||||||
|
# Return to the previous network namespace
|
||||||
|
self.set_netns(self.current_netns_fd.fileno(), self.CLONE_NEWNET)
|
||||||
|
self.current_netns_fd.close()
|
83
octavia/amphorae/backends/utils/network_utils.py
Normal file
83
octavia/amphorae/backends/utils/network_utils.py
Normal file
@ -0,0 +1,83 @@
|
|||||||
|
# Copyright 2020 Red Hat, Inc. All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
import ipaddress
|
||||||
|
|
||||||
|
import pyroute2
|
||||||
|
|
||||||
|
from octavia.common import exceptions
|
||||||
|
|
||||||
|
|
||||||
|
def _find_interface(ip_address, rtnl_api, normalized_addr):
|
||||||
|
"""Find the interface using a routing netlink API.
|
||||||
|
|
||||||
|
:param ip_address: The IP address to search with.
|
||||||
|
:param rtnl_api: A pyroute2 rtnl_api instance. (IPRoute, NetNS, etc.)
|
||||||
|
:returns: The interface name if found, None if not found.
|
||||||
|
:raises exceptions.InvalidIPAddress: Invalid IP address provided.
|
||||||
|
"""
|
||||||
|
for addr in rtnl_api.get_addr(address=ip_address):
|
||||||
|
# Save the interface index as IPv6 records don't list a textual
|
||||||
|
# interface
|
||||||
|
interface_idx = addr['index']
|
||||||
|
# Search through the attributes of each address record
|
||||||
|
for attr in addr['attrs']:
|
||||||
|
# Look for the attribute name/value pair for the address
|
||||||
|
if attr[0] == 'IFA_ADDRESS':
|
||||||
|
# Compare the normalized address with the address we are
|
||||||
|
# looking for. Since we have matched the name above, attr[1]
|
||||||
|
# is the address value
|
||||||
|
if normalized_addr == ipaddress.ip_address(attr[1]).compressed:
|
||||||
|
# Lookup the matching interface name by getting the
|
||||||
|
# interface with the index we found in the above address
|
||||||
|
# search
|
||||||
|
lookup_int = rtnl_api.get_links(interface_idx)
|
||||||
|
# Search through the attributes of the matching interface
|
||||||
|
# record
|
||||||
|
for int_attr in lookup_int[0]['attrs']:
|
||||||
|
# Look for the attribute name/value pair that includes
|
||||||
|
# the interface name
|
||||||
|
if int_attr[0] == 'IFLA_IFNAME':
|
||||||
|
# Return the matching interface name that is in
|
||||||
|
# int_attr[1] for the matching interface attribute
|
||||||
|
# name
|
||||||
|
return int_attr[1]
|
||||||
|
# We didn't find an interface with that IP address.
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def get_interface_name(ip_address, net_ns=None):
|
||||||
|
"""Gets the interface name from an IP address.
|
||||||
|
|
||||||
|
:param ip_address: The IP address to lookup.
|
||||||
|
:param net_ns: The network namespace to find the interface in.
|
||||||
|
:returns: The interface name.
|
||||||
|
:raises exceptions.InvalidIPAddress: Invalid IP address provided.
|
||||||
|
:raises octavia.common.exceptions.NotFound: No interface was found.
|
||||||
|
"""
|
||||||
|
# We need to normalize the address as IPv6 has multiple representations
|
||||||
|
# fe80:0000:0000:0000:f816:3eff:fef2:2058 == fe80::f816:3eff:fef2:2058
|
||||||
|
try:
|
||||||
|
normalized_addr = ipaddress.ip_address(ip_address).compressed
|
||||||
|
except ValueError:
|
||||||
|
raise exceptions.InvalidIPAddress(ip_addr=ip_address)
|
||||||
|
|
||||||
|
if net_ns:
|
||||||
|
with pyroute2.NetNS(net_ns) as rtnl_api:
|
||||||
|
interface = _find_interface(ip_address, rtnl_api, normalized_addr)
|
||||||
|
else:
|
||||||
|
with pyroute2.IPRoute() as rtnl_api:
|
||||||
|
interface = _find_interface(ip_address, rtnl_api, normalized_addr)
|
||||||
|
if interface is not None:
|
||||||
|
return interface
|
||||||
|
raise exceptions.NotFound(resource='IP address', id=ip_address)
|
@ -202,6 +202,21 @@ class AmphoraLoadBalancerDriver(object, metaclass=abc.ABCMeta):
|
|||||||
:type agent_config: string
|
:type agent_config: string
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def get_interface_from_ip(self, amphora, ip_address, timeout_dict=None):
|
||||||
|
"""Get the interface name from an IP address.
|
||||||
|
|
||||||
|
:param amphora: The amphora to query.
|
||||||
|
:type amphora: octavia.db.models.Amphora
|
||||||
|
:param ip_address: The IP address to lookup. (IPv4 or IPv6)
|
||||||
|
:type ip_address: string
|
||||||
|
:param timeout_dict: Dictionary of timeout values for calls to the
|
||||||
|
amphora. May contain: req_conn_timeout,
|
||||||
|
req_read_timeout, conn_max_retries,
|
||||||
|
conn_retry_interval
|
||||||
|
:type timeout_dict: dict
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
class HealthMixin(object, metaclass=abc.ABCMeta):
|
class HealthMixin(object, metaclass=abc.ABCMeta):
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
@ -252,10 +267,17 @@ class VRRPDriverMixin(object, metaclass=abc.ABCMeta):
|
|||||||
class XYZ: ...
|
class XYZ: ...
|
||||||
"""
|
"""
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
def update_vrrp_conf(self, loadbalancer):
|
def update_vrrp_conf(self, loadbalancer, amphorae_network_config, amphora,
|
||||||
|
timeout_dict=None):
|
||||||
"""Update amphorae of the loadbalancer with a new VRRP configuration
|
"""Update amphorae of the loadbalancer with a new VRRP configuration
|
||||||
|
|
||||||
:param loadbalancer: loadbalancer object
|
:param loadbalancer: loadbalancer object
|
||||||
|
:param amphorae_network_config: amphorae network configurations
|
||||||
|
:param amphora: The amphora object to update.
|
||||||
|
:param timeout_dict: Dictionary of timeout values for calls to the
|
||||||
|
amphora. May contain: req_conn_timeout,
|
||||||
|
req_read_timeout, conn_max_retries,
|
||||||
|
conn_retry_interval
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
@ -266,10 +288,14 @@ class VRRPDriverMixin(object, metaclass=abc.ABCMeta):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
def start_vrrp_service(self, loadbalancer):
|
def start_vrrp_service(self, amphora, timeout_dict=None):
|
||||||
"""Start the VRRP services of all amphorae of the loadbalancer
|
"""Start the VRRP services on the amphora
|
||||||
|
|
||||||
:param loadbalancer: loadbalancer object
|
:param amphora: The amphora object to start the service on.
|
||||||
|
:param timeout_dict: Dictionary of timeout values for calls to the
|
||||||
|
amphora. May contain: req_conn_timeout,
|
||||||
|
req_read_timeout, conn_max_retries,
|
||||||
|
conn_retry_interval
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
@ -278,10 +304,3 @@ class VRRPDriverMixin(object, metaclass=abc.ABCMeta):
|
|||||||
|
|
||||||
:param loadbalancer: loadbalancer object
|
:param loadbalancer: loadbalancer object
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@abc.abstractmethod
|
|
||||||
def get_vrrp_interface(self, amphora):
|
|
||||||
"""Get the VRRP interface object for a specific amphora
|
|
||||||
|
|
||||||
:param amphora: amphora object
|
|
||||||
"""
|
|
||||||
|
@ -20,7 +20,7 @@ from oslo_log import log as logging
|
|||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def check_exception(response, ignore=tuple()):
|
def check_exception(response, ignore=tuple(), log_error=True):
|
||||||
status_code = response.status_code
|
status_code = response.status_code
|
||||||
responses = {
|
responses = {
|
||||||
400: InvalidRequest,
|
400: InvalidRequest,
|
||||||
@ -34,8 +34,9 @@ def check_exception(response, ignore=tuple()):
|
|||||||
}
|
}
|
||||||
if (status_code not in ignore) and (status_code in responses):
|
if (status_code not in ignore) and (status_code in responses):
|
||||||
try:
|
try:
|
||||||
LOG.error('Amphora agent returned unexpected result code %s with '
|
if log_error:
|
||||||
'response %s', status_code, response.json())
|
LOG.error('Amphora agent returned unexpected result code %s '
|
||||||
|
'with response %s', status_code, response.json())
|
||||||
except Exception:
|
except Exception:
|
||||||
# Handle the odd case where there is no response body
|
# Handle the odd case where there is no response body
|
||||||
# like when using requests_mock which doesn't support has_body
|
# like when using requests_mock which doesn't support has_body
|
||||||
|
@ -90,7 +90,7 @@ class HaproxyAmphoraLoadBalancerDriver(
|
|||||||
|
|
||||||
return haproxy_version_string.split('.')[:2]
|
return haproxy_version_string.split('.')[:2]
|
||||||
|
|
||||||
def _populate_amphora_api_version(self, amphora,
|
def _populate_amphora_api_version(self, amphora, timeout_dict=None,
|
||||||
raise_retry_exception=False):
|
raise_retry_exception=False):
|
||||||
"""Populate the amphora object with the api_version
|
"""Populate the amphora object with the api_version
|
||||||
|
|
||||||
@ -102,7 +102,7 @@ class HaproxyAmphoraLoadBalancerDriver(
|
|||||||
if not getattr(amphora, 'api_version', None):
|
if not getattr(amphora, 'api_version', None):
|
||||||
try:
|
try:
|
||||||
amphora.api_version = self.clients['base'].get_api_version(
|
amphora.api_version = self.clients['base'].get_api_version(
|
||||||
amphora,
|
amphora, timeout_dict=timeout_dict,
|
||||||
raise_retry_exception=raise_retry_exception)['api_version']
|
raise_retry_exception=raise_retry_exception)['api_version']
|
||||||
except exc.NotFound:
|
except exc.NotFound:
|
||||||
# Amphora is too old for version discovery, default to 0.5
|
# Amphora is too old for version discovery, default to 0.5
|
||||||
@ -291,8 +291,11 @@ class HaproxyAmphoraLoadBalancerDriver(
|
|||||||
getattr(self.clients[amp.api_version], func_name)(
|
getattr(self.clients[amp.api_version], func_name)(
|
||||||
amp, loadbalancer.id, *args)
|
amp, loadbalancer.id, *args)
|
||||||
|
|
||||||
def start(self, loadbalancer, amphora=None):
|
def reload(self, loadbalancer, amphora=None, timeout_dict=None):
|
||||||
self._apply('start_listener', loadbalancer, amphora)
|
self._apply('reload_listener', loadbalancer, amphora, timeout_dict)
|
||||||
|
|
||||||
|
def start(self, loadbalancer, amphora=None, timeout_dict=None):
|
||||||
|
self._apply('start_listener', loadbalancer, amphora, timeout_dict)
|
||||||
|
|
||||||
def delete(self, listener):
|
def delete(self, listener):
|
||||||
# Delete any UDP listeners the old way (we didn't update the way they
|
# Delete any UDP listeners the old way (we didn't update the way they
|
||||||
@ -588,6 +591,28 @@ class HaproxyAmphoraLoadBalancerDriver(
|
|||||||
'API.'.format(amphora.id))
|
'API.'.format(amphora.id))
|
||||||
raise driver_except.AmpDriverNotImplementedError()
|
raise driver_except.AmpDriverNotImplementedError()
|
||||||
|
|
||||||
|
def get_interface_from_ip(self, amphora, ip_address, timeout_dict=None):
|
||||||
|
"""Get the interface name for an IP address.
|
||||||
|
|
||||||
|
:param amphora: The amphora to query.
|
||||||
|
:type amphora: octavia.db.models.Amphora
|
||||||
|
:param ip_address: The IP address to lookup. (IPv4 or IPv6)
|
||||||
|
:type ip_address: string
|
||||||
|
:param timeout_dict: Dictionary of timeout values for calls to the
|
||||||
|
amphora. May contain: req_conn_timeout,
|
||||||
|
req_read_timeout, conn_max_retries,
|
||||||
|
conn_retry_interval
|
||||||
|
:type timeout_dict: dict
|
||||||
|
:returns: None if not found, the interface name string if found.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
self._populate_amphora_api_version(amphora, timeout_dict)
|
||||||
|
response_json = self.clients[amphora.api_version].get_interface(
|
||||||
|
amphora, ip_address, timeout_dict, log_error=False)
|
||||||
|
return response_json.get('interface', None)
|
||||||
|
except (exc.NotFound, driver_except.TimeOutException):
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
# Check a custom hostname
|
# Check a custom hostname
|
||||||
class CustomHostNameCheckingAdapter(requests.adapters.HTTPAdapter):
|
class CustomHostNameCheckingAdapter(requests.adapters.HTTPAdapter):
|
||||||
@ -713,9 +738,10 @@ class AmphoraAPIClientBase(object):
|
|||||||
'exception': exception})
|
'exception': exception})
|
||||||
raise driver_except.TimeOutException()
|
raise driver_except.TimeOutException()
|
||||||
|
|
||||||
def get_api_version(self, amp, raise_retry_exception=False):
|
def get_api_version(self, amp, timeout_dict=None,
|
||||||
|
raise_retry_exception=False):
|
||||||
amp.api_version = None
|
amp.api_version = None
|
||||||
r = self.get(amp, retry_404=False,
|
r = self.get(amp, retry_404=False, timeout_dict=timeout_dict,
|
||||||
raise_retry_exception=raise_retry_exception)
|
raise_retry_exception=raise_retry_exception)
|
||||||
# Handle 404 special as we don't want to log an ERROR on 404
|
# Handle 404 special as we don't want to log an ERROR on 404
|
||||||
exc.check_exception(r, (404,))
|
exc.check_exception(r, (404,))
|
||||||
@ -816,16 +842,15 @@ class AmphoraAPIClient0_5(AmphoraAPIClientBase):
|
|||||||
r = self.put(amp, 'vrrp/upload', data=config)
|
r = self.put(amp, 'vrrp/upload', data=config)
|
||||||
return exc.check_exception(r)
|
return exc.check_exception(r)
|
||||||
|
|
||||||
def _vrrp_action(self, action, amp):
|
def _vrrp_action(self, action, amp, timeout_dict=None):
|
||||||
r = self.put(amp, 'vrrp/{action}'.format(action=action))
|
r = self.put(amp, 'vrrp/{action}'.format(action=action),
|
||||||
|
timeout_dict=timeout_dict)
|
||||||
return exc.check_exception(r)
|
return exc.check_exception(r)
|
||||||
|
|
||||||
def get_interface(self, amp, ip_addr, timeout_dict=None):
|
def get_interface(self, amp, ip_addr, timeout_dict=None, log_error=True):
|
||||||
r = self.get(amp, 'interface/{ip_addr}'.format(ip_addr=ip_addr),
|
r = self.get(amp, 'interface/{ip_addr}'.format(ip_addr=ip_addr),
|
||||||
timeout_dict=timeout_dict)
|
timeout_dict=timeout_dict)
|
||||||
if exc.check_exception(r):
|
return exc.check_exception(r, log_error=log_error).json()
|
||||||
return r.json()
|
|
||||||
return None
|
|
||||||
|
|
||||||
def upload_udp_config(self, amp, listener_id, config, timeout_dict=None):
|
def upload_udp_config(self, amp, listener_id, config, timeout_dict=None):
|
||||||
r = self.put(
|
r = self.put(
|
||||||
@ -946,16 +971,15 @@ class AmphoraAPIClient1_0(AmphoraAPIClientBase):
|
|||||||
r = self.put(amp, 'vrrp/upload', data=config)
|
r = self.put(amp, 'vrrp/upload', data=config)
|
||||||
return exc.check_exception(r)
|
return exc.check_exception(r)
|
||||||
|
|
||||||
def _vrrp_action(self, action, amp):
|
def _vrrp_action(self, action, amp, timeout_dict=None):
|
||||||
r = self.put(amp, 'vrrp/{action}'.format(action=action))
|
r = self.put(amp, 'vrrp/{action}'.format(action=action),
|
||||||
|
timeout_dict=timeout_dict)
|
||||||
return exc.check_exception(r)
|
return exc.check_exception(r)
|
||||||
|
|
||||||
def get_interface(self, amp, ip_addr, timeout_dict=None):
|
def get_interface(self, amp, ip_addr, timeout_dict=None, log_error=True):
|
||||||
r = self.get(amp, 'interface/{ip_addr}'.format(ip_addr=ip_addr),
|
r = self.get(amp, 'interface/{ip_addr}'.format(ip_addr=ip_addr),
|
||||||
timeout_dict=timeout_dict)
|
timeout_dict=timeout_dict)
|
||||||
if exc.check_exception(r):
|
return exc.check_exception(r, log_error=log_error).json()
|
||||||
return r.json()
|
|
||||||
return None
|
|
||||||
|
|
||||||
def upload_udp_config(self, amp, listener_id, config, timeout_dict=None):
|
def upload_udp_config(self, amp, listener_id, config, timeout_dict=None):
|
||||||
r = self.put(
|
r = self.put(
|
||||||
|
@ -29,34 +29,40 @@ class KeepalivedAmphoraDriverMixin(driver_base.VRRPDriverMixin):
|
|||||||
# The Mixed class must define a self.client object for the
|
# The Mixed class must define a self.client object for the
|
||||||
# AmphoraApiClient
|
# AmphoraApiClient
|
||||||
|
|
||||||
def update_vrrp_conf(self, loadbalancer, amphorae_network_config):
|
def update_vrrp_conf(self, loadbalancer, amphorae_network_config, amphora,
|
||||||
"""Update amphorae of the loadbalancer with a new VRRP configuration
|
timeout_dict=None):
|
||||||
|
"""Update amphora of the loadbalancer with a new VRRP configuration
|
||||||
|
|
||||||
:param loadbalancer: loadbalancer object
|
:param loadbalancer: loadbalancer object
|
||||||
:param amphorae_network_config: amphorae network configurations
|
:param amphorae_network_config: amphorae network configurations
|
||||||
|
:param amphora: The amphora object to update.
|
||||||
|
:param timeout_dict: Dictionary of timeout values for calls to the
|
||||||
|
amphora. May contain: req_conn_timeout,
|
||||||
|
req_read_timeout, conn_max_retries,
|
||||||
|
conn_retry_interval
|
||||||
"""
|
"""
|
||||||
|
if amphora.status != constants.AMPHORA_ALLOCATED:
|
||||||
|
LOG.debug('update_vrrp_conf called for un-allocated amphora %s. '
|
||||||
|
'Ignoring.', amphora.id)
|
||||||
|
return
|
||||||
|
|
||||||
templater = jinja_cfg.KeepalivedJinjaTemplater()
|
templater = jinja_cfg.KeepalivedJinjaTemplater()
|
||||||
|
|
||||||
LOG.debug("Update loadbalancer %s amphora VRRP configuration.",
|
LOG.debug("Update amphora %s VRRP configuration.", amphora.id)
|
||||||
loadbalancer.id)
|
|
||||||
|
|
||||||
for amp in filter(
|
self._populate_amphora_api_version(amphora)
|
||||||
lambda amp: amp.status == constants.AMPHORA_ALLOCATED,
|
# Get the VIP subnet prefix for the amphora
|
||||||
loadbalancer.amphorae):
|
# For amphorav2 amphorae_network_config will be list of dicts
|
||||||
|
try:
|
||||||
|
vip_cidr = amphorae_network_config[amphora.id].vip_subnet.cidr
|
||||||
|
except AttributeError:
|
||||||
|
vip_cidr = amphorae_network_config[amphora.id][
|
||||||
|
constants.VIP_SUBNET][constants.CIDR]
|
||||||
|
|
||||||
self._populate_amphora_api_version(amp)
|
# Generate Keepalived configuration from loadbalancer object
|
||||||
# Get the VIP subnet prefix for the amphora
|
config = templater.build_keepalived_config(
|
||||||
# For amphorav2 amphorae_network_config will be list of dicts
|
loadbalancer, amphora, vip_cidr)
|
||||||
try:
|
self.clients[amphora.api_version].upload_vrrp_config(amphora, config)
|
||||||
vip_cidr = amphorae_network_config[amp.id].vip_subnet.cidr
|
|
||||||
except AttributeError:
|
|
||||||
vip_cidr = amphorae_network_config[amp.id][
|
|
||||||
constants.VIP_SUBNET][constants.CIDR]
|
|
||||||
|
|
||||||
# Generate Keepalived configuration from loadbalancer object
|
|
||||||
config = templater.build_keepalived_config(
|
|
||||||
loadbalancer, amp, vip_cidr)
|
|
||||||
self.clients[amp.api_version].upload_vrrp_config(amp, config)
|
|
||||||
|
|
||||||
def stop_vrrp_service(self, loadbalancer):
|
def stop_vrrp_service(self, loadbalancer):
|
||||||
"""Stop the vrrp services running on the loadbalancer's amphorae
|
"""Stop the vrrp services running on the loadbalancer's amphorae
|
||||||
@ -73,21 +79,25 @@ class KeepalivedAmphoraDriverMixin(driver_base.VRRPDriverMixin):
|
|||||||
self._populate_amphora_api_version(amp)
|
self._populate_amphora_api_version(amp)
|
||||||
self.clients[amp.api_version].stop_vrrp(amp)
|
self.clients[amp.api_version].stop_vrrp(amp)
|
||||||
|
|
||||||
def start_vrrp_service(self, loadbalancer):
|
def start_vrrp_service(self, amphora, timeout_dict=None):
|
||||||
"""Start the VRRP services of all amphorae of the loadbalancer
|
"""Start the VRRP services on an amphorae.
|
||||||
|
|
||||||
:param loadbalancer: loadbalancer object
|
:param amphora: amphora object
|
||||||
|
:param timeout_dict: Dictionary of timeout values for calls to the
|
||||||
|
amphora. May contain: req_conn_timeout,
|
||||||
|
req_read_timeout, conn_max_retries,
|
||||||
|
conn_retry_interval
|
||||||
"""
|
"""
|
||||||
LOG.info("Start loadbalancer %s amphora VRRP Service.",
|
if amphora.status != constants.AMPHORA_ALLOCATED:
|
||||||
loadbalancer.id)
|
LOG.debug('start_vrrp_service called for un-allocated amphora %s. '
|
||||||
|
'Ignoring.', amphora.id)
|
||||||
|
return
|
||||||
|
|
||||||
for amp in filter(
|
LOG.info("Start amphora %s VRRP Service.", amphora.id)
|
||||||
lambda amp: amp.status == constants.AMPHORA_ALLOCATED,
|
|
||||||
loadbalancer.amphorae):
|
|
||||||
|
|
||||||
LOG.debug("Start VRRP Service on amphora %s .", amp.lb_network_ip)
|
self._populate_amphora_api_version(amphora)
|
||||||
self._populate_amphora_api_version(amp)
|
self.clients[amphora.api_version].start_vrrp(amphora,
|
||||||
self.clients[amp.api_version].start_vrrp(amp)
|
timeout_dict=timeout_dict)
|
||||||
|
|
||||||
def reload_vrrp_service(self, loadbalancer):
|
def reload_vrrp_service(self, loadbalancer):
|
||||||
"""Reload the VRRP services of all amphorae of the loadbalancer
|
"""Reload the VRRP services of all amphorae of the loadbalancer
|
||||||
@ -103,8 +113,3 @@ class KeepalivedAmphoraDriverMixin(driver_base.VRRPDriverMixin):
|
|||||||
|
|
||||||
self._populate_amphora_api_version(amp)
|
self._populate_amphora_api_version(amp)
|
||||||
self.clients[amp.api_version].reload_vrrp(amp)
|
self.clients[amp.api_version].reload_vrrp(amp)
|
||||||
|
|
||||||
def get_vrrp_interface(self, amphora, timeout_dict=None):
|
|
||||||
self._populate_amphora_api_version(amphora)
|
|
||||||
return self.clients[amphora.api_version].get_interface(
|
|
||||||
amphora, amphora.vrrp_ip, timeout_dict=timeout_dict)['interface']
|
|
||||||
|
@ -114,6 +114,13 @@ class NoopManager(object):
|
|||||||
self.amphoraconfig[amphora.id, agent_config] = (
|
self.amphoraconfig[amphora.id, agent_config] = (
|
||||||
amphora.id, agent_config, 'update_amphora_agent_config')
|
amphora.id, agent_config, 'update_amphora_agent_config')
|
||||||
|
|
||||||
|
def get_interface_from_ip(self, amphora, ip_address, timeout_dict=None):
|
||||||
|
LOG.debug("Amphora %s no-op, get interface from amphora %s for IP %s",
|
||||||
|
self.__class__.__name__, amphora.id, ip_address)
|
||||||
|
if ip_address == '198.51.100.99':
|
||||||
|
return "noop0"
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
class NoopAmphoraLoadBalancerDriver(
|
class NoopAmphoraLoadBalancerDriver(
|
||||||
driver_base.AmphoraLoadBalancerDriver,
|
driver_base.AmphoraLoadBalancerDriver,
|
||||||
@ -170,17 +177,19 @@ class NoopAmphoraLoadBalancerDriver(
|
|||||||
def update_amphora_agent_config(self, amphora, agent_config):
|
def update_amphora_agent_config(self, amphora, agent_config):
|
||||||
self.driver.update_amphora_agent_config(amphora, agent_config)
|
self.driver.update_amphora_agent_config(amphora, agent_config)
|
||||||
|
|
||||||
def update_vrrp_conf(self, loadbalancer):
|
def get_interface_from_ip(self, amphora, ip_address, timeout_dict=None):
|
||||||
|
return self.driver.get_interface_from_ip(amphora, ip_address,
|
||||||
|
timeout_dict)
|
||||||
|
|
||||||
|
def update_vrrp_conf(self, loadbalancer, amphorae_network_config, amphora,
|
||||||
|
timeout_dict=None):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def stop_vrrp_service(self, loadbalancer):
|
def stop_vrrp_service(self, loadbalancer):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def start_vrrp_service(self, loadbalancer):
|
def start_vrrp_service(self, amphora, timeout_dict=None):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def reload_vrrp_service(self, loadbalancer):
|
def reload_vrrp_service(self, loadbalancer):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def get_vrrp_interface(self, amphora):
|
|
||||||
pass
|
|
||||||
|
@ -72,8 +72,11 @@ class AmphoraProviderDriver(driver_base.ProviderDriver):
|
|||||||
try:
|
try:
|
||||||
vip = network_driver.allocate_vip(lb_obj)
|
vip = network_driver.allocate_vip(lb_obj)
|
||||||
except network_base.AllocateVIPException as e:
|
except network_base.AllocateVIPException as e:
|
||||||
raise exceptions.DriverError(user_fault_string=e.orig_msg,
|
message = str(e)
|
||||||
operator_fault_string=e.orig_msg)
|
if getattr(e, 'orig_msg', None) is not None:
|
||||||
|
message = e.orig_msg
|
||||||
|
raise exceptions.DriverError(user_fault_string=message,
|
||||||
|
operator_fault_string=message)
|
||||||
|
|
||||||
LOG.info('Amphora provider created VIP port %s for load balancer %s.',
|
LOG.info('Amphora provider created VIP port %s for load balancer %s.',
|
||||||
vip.port_id, loadbalancer_id)
|
vip.port_id, loadbalancer_id)
|
||||||
|
@ -22,6 +22,7 @@ from oslo_log import log as logging
|
|||||||
from oslo_utils import excutils
|
from oslo_utils import excutils
|
||||||
from stevedore import driver as stevedore_driver
|
from stevedore import driver as stevedore_driver
|
||||||
|
|
||||||
|
from octavia.common import constants
|
||||||
from octavia.common import data_models
|
from octavia.common import data_models
|
||||||
from octavia.common import exceptions
|
from octavia.common import exceptions
|
||||||
from octavia.common.tls_utils import cert_parser
|
from octavia.common.tls_utils import cert_parser
|
||||||
@ -544,6 +545,9 @@ def vip_dict_to_provider_dict(vip_dict):
|
|||||||
new_vip_dict['vip_subnet_id'] = vip_dict['subnet_id']
|
new_vip_dict['vip_subnet_id'] = vip_dict['subnet_id']
|
||||||
if 'qos_policy_id' in vip_dict:
|
if 'qos_policy_id' in vip_dict:
|
||||||
new_vip_dict['vip_qos_policy_id'] = vip_dict['qos_policy_id']
|
new_vip_dict['vip_qos_policy_id'] = vip_dict['qos_policy_id']
|
||||||
|
if constants.OCTAVIA_OWNED in vip_dict:
|
||||||
|
new_vip_dict[constants.OCTAVIA_OWNED] = vip_dict[
|
||||||
|
constants.OCTAVIA_OWNED]
|
||||||
return new_vip_dict
|
return new_vip_dict
|
||||||
|
|
||||||
|
|
||||||
@ -559,4 +563,6 @@ def provider_vip_dict_to_vip_obj(vip_dictionary):
|
|||||||
vip_obj.subnet_id = vip_dictionary['vip_subnet_id']
|
vip_obj.subnet_id = vip_dictionary['vip_subnet_id']
|
||||||
if 'vip_qos_policy_id' in vip_dictionary:
|
if 'vip_qos_policy_id' in vip_dictionary:
|
||||||
vip_obj.qos_policy_id = vip_dictionary['vip_qos_policy_id']
|
vip_obj.qos_policy_id = vip_dictionary['vip_qos_policy_id']
|
||||||
|
if constants.OCTAVIA_OWNED in vip_dictionary:
|
||||||
|
vip_obj.octavia_owned = vip_dictionary[constants.OCTAVIA_OWNED]
|
||||||
return vip_obj
|
return vip_obj
|
||||||
|
@ -12,6 +12,7 @@
|
|||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
import ipaddress
|
||||||
|
|
||||||
from octavia_lib.api.drivers import data_models as driver_dm
|
from octavia_lib.api.drivers import data_models as driver_dm
|
||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
@ -187,26 +188,40 @@ class LoadBalancersController(base.BaseController):
|
|||||||
isinstance(load_balancer.vip_qos_policy_id, wtypes.UnsetType)):
|
isinstance(load_balancer.vip_qos_policy_id, wtypes.UnsetType)):
|
||||||
load_balancer.vip_qos_policy_id = port_qos_policy_id
|
load_balancer.vip_qos_policy_id = port_qos_policy_id
|
||||||
|
|
||||||
# Identify the subnet for this port
|
|
||||||
if load_balancer.vip_subnet_id:
|
if load_balancer.vip_subnet_id:
|
||||||
|
# If we were provided a subnet_id, validate it exists and that
|
||||||
|
# there is a fixed_ip on the port that matches the provided subnet
|
||||||
validate.subnet_exists(subnet_id=load_balancer.vip_subnet_id,
|
validate.subnet_exists(subnet_id=load_balancer.vip_subnet_id,
|
||||||
context=context)
|
context=context)
|
||||||
else:
|
for port_fixed_ip in port.fixed_ips:
|
||||||
if load_balancer.vip_address:
|
if port_fixed_ip.subnet_id == load_balancer.vip_subnet_id:
|
||||||
for port_fixed_ip in port.fixed_ips:
|
load_balancer.vip_address = port_fixed_ip.ip_address
|
||||||
if port_fixed_ip.ip_address == load_balancer.vip_address:
|
break # Just pick the first address found in the subnet
|
||||||
load_balancer.vip_subnet_id = port_fixed_ip.subnet_id
|
if not load_balancer.vip_address:
|
||||||
break
|
|
||||||
if not load_balancer.vip_subnet_id:
|
|
||||||
raise exceptions.ValidationException(detail=_(
|
|
||||||
"Specified VIP address not found on the "
|
|
||||||
"specified VIP port."))
|
|
||||||
elif len(port.fixed_ips) == 1:
|
|
||||||
load_balancer.vip_subnet_id = port.fixed_ips[0].subnet_id
|
|
||||||
else:
|
|
||||||
raise exceptions.ValidationException(detail=_(
|
raise exceptions.ValidationException(detail=_(
|
||||||
"VIP port's subnet could not be determined. Please "
|
"No VIP address found on the specified VIP port within "
|
||||||
"specify either a VIP subnet or address."))
|
"the specified subnet."))
|
||||||
|
elif load_balancer.vip_address:
|
||||||
|
normalized_lb_ip = ipaddress.ip_address(
|
||||||
|
load_balancer.vip_address).compressed
|
||||||
|
for port_fixed_ip in port.fixed_ips:
|
||||||
|
normalized_port_ip = ipaddress.ip_address(
|
||||||
|
port_fixed_ip.ip_address).compressed
|
||||||
|
if normalized_port_ip == normalized_lb_ip:
|
||||||
|
load_balancer.vip_subnet_id = port_fixed_ip.subnet_id
|
||||||
|
break
|
||||||
|
if not load_balancer.vip_subnet_id:
|
||||||
|
raise exceptions.ValidationException(detail=_(
|
||||||
|
"Specified VIP address not found on the "
|
||||||
|
"specified VIP port."))
|
||||||
|
elif len(port.fixed_ips) == 1:
|
||||||
|
# User provided only a port, get the subnet and address from it
|
||||||
|
load_balancer.vip_subnet_id = port.fixed_ips[0].subnet_id
|
||||||
|
load_balancer.vip_address = port.fixed_ips[0].ip_address
|
||||||
|
else:
|
||||||
|
raise exceptions.ValidationException(detail=_(
|
||||||
|
"VIP port's subnet could not be determined. Please "
|
||||||
|
"specify either a VIP subnet or address."))
|
||||||
|
|
||||||
def _validate_vip_request_object(self, load_balancer, context=None):
|
def _validate_vip_request_object(self, load_balancer, context=None):
|
||||||
allowed_network_objects = []
|
allowed_network_objects = []
|
||||||
@ -450,7 +465,10 @@ class LoadBalancersController(base.BaseController):
|
|||||||
# Do the same with the availability_zone dict
|
# Do the same with the availability_zone dict
|
||||||
lb_dict['availability_zone'] = az_dict
|
lb_dict['availability_zone'] = az_dict
|
||||||
|
|
||||||
# See if the provider driver wants to create the VIP port
|
# See if the provider driver wants to manage the VIP port
|
||||||
|
# This will still be called if the user provided a port to
|
||||||
|
# allow drivers to collect any required information about the
|
||||||
|
# VIP port.
|
||||||
octavia_owned = False
|
octavia_owned = False
|
||||||
try:
|
try:
|
||||||
provider_vip_dict = driver_utils.vip_dict_to_provider_dict(
|
provider_vip_dict = driver_utils.vip_dict_to_provider_dict(
|
||||||
@ -470,6 +488,10 @@ class LoadBalancersController(base.BaseController):
|
|||||||
if 'port_id' not in vip_dict or not vip_dict['port_id']:
|
if 'port_id' not in vip_dict or not vip_dict['port_id']:
|
||||||
octavia_owned = True
|
octavia_owned = True
|
||||||
|
|
||||||
|
# Check if the driver claims octavia owns the VIP port.
|
||||||
|
if vip.octavia_owned:
|
||||||
|
octavia_owned = True
|
||||||
|
|
||||||
self.repositories.vip.update(
|
self.repositories.vip.update(
|
||||||
lock_session, db_lb.id, ip_address=vip.ip_address,
|
lock_session, db_lb.id, ip_address=vip.ip_address,
|
||||||
port_id=vip.port_id, network_id=vip.network_id,
|
port_id=vip.port_id, network_id=vip.network_id,
|
||||||
|
@ -198,6 +198,20 @@ amphora_agent_opts = [
|
|||||||
help='The UDP API backend for amphora agent.'),
|
help='The UDP API backend for amphora agent.'),
|
||||||
]
|
]
|
||||||
|
|
||||||
|
compute_opts = [
|
||||||
|
cfg.IntOpt('max_retries', default=15,
|
||||||
|
help=_('The maximum attempts to retry an action with the '
|
||||||
|
'compute service.')),
|
||||||
|
cfg.IntOpt('retry_interval', default=1,
|
||||||
|
help=_('Seconds to wait before retrying an action with the '
|
||||||
|
'compute service.')),
|
||||||
|
cfg.IntOpt('retry_backoff', default=1,
|
||||||
|
help=_('The seconds to backoff retry attempts.')),
|
||||||
|
cfg.IntOpt('retry_max', default=10,
|
||||||
|
help=_('The maximum interval in seconds between retry '
|
||||||
|
'attempts.')),
|
||||||
|
]
|
||||||
|
|
||||||
networking_opts = [
|
networking_opts = [
|
||||||
cfg.IntOpt('max_retries', default=15,
|
cfg.IntOpt('max_retries', default=15,
|
||||||
help=_('The maximum attempts to retry an action with the '
|
help=_('The maximum attempts to retry an action with the '
|
||||||
@ -205,6 +219,11 @@ networking_opts = [
|
|||||||
cfg.IntOpt('retry_interval', default=1,
|
cfg.IntOpt('retry_interval', default=1,
|
||||||
help=_('Seconds to wait before retrying an action with the '
|
help=_('Seconds to wait before retrying an action with the '
|
||||||
'networking service.')),
|
'networking service.')),
|
||||||
|
cfg.IntOpt('retry_backoff', default=1,
|
||||||
|
help=_('The seconds to backoff retry attempts.')),
|
||||||
|
cfg.IntOpt('retry_max', default=10,
|
||||||
|
help=_('The maximum interval in seconds between retry '
|
||||||
|
'attempts.')),
|
||||||
cfg.IntOpt('port_detach_timeout', default=300,
|
cfg.IntOpt('port_detach_timeout', default=300,
|
||||||
help=_('Seconds to wait for a port to detach from an '
|
help=_('Seconds to wait for a port to detach from an '
|
||||||
'amphora.')),
|
'amphora.')),
|
||||||
@ -317,6 +336,14 @@ haproxy_amphora_opts = [
|
|||||||
default=2,
|
default=2,
|
||||||
help=_('Retry timeout between connection attempts in '
|
help=_('Retry timeout between connection attempts in '
|
||||||
'seconds for active amphora.')),
|
'seconds for active amphora.')),
|
||||||
|
cfg.IntOpt('failover_connection_max_retries',
|
||||||
|
default=2,
|
||||||
|
help=_('Retry threshold for connecting to an amphora in '
|
||||||
|
'failover.')),
|
||||||
|
cfg.IntOpt('failover_connection_retry_interval',
|
||||||
|
default=5,
|
||||||
|
help=_('Retry timeout between connection attempts in '
|
||||||
|
'seconds for amphora in failover.')),
|
||||||
cfg.IntOpt('build_rate_limit',
|
cfg.IntOpt('build_rate_limit',
|
||||||
default=-1,
|
default=-1,
|
||||||
help=_('Number of amphorae that could be built per controller '
|
help=_('Number of amphorae that could be built per controller '
|
||||||
@ -380,6 +407,16 @@ haproxy_amphora_opts = [
|
|||||||
deprecated_reason='This is now automatically discovered '
|
deprecated_reason='This is now automatically discovered '
|
||||||
' and configured.',
|
' and configured.',
|
||||||
help=_("If False, use sysvinit.")),
|
help=_("If False, use sysvinit.")),
|
||||||
|
cfg.IntOpt('api_db_commit_retry_attempts', default=15,
|
||||||
|
help=_('The number of times the database action will be '
|
||||||
|
'attempted.')),
|
||||||
|
cfg.IntOpt('api_db_commit_retry_initial_delay', default=1,
|
||||||
|
help=_('The initial delay before a retry attempt.')),
|
||||||
|
cfg.IntOpt('api_db_commit_retry_backoff', default=1,
|
||||||
|
help=_('The time to backoff retry attempts.')),
|
||||||
|
cfg.IntOpt('api_db_commit_retry_max', default=5,
|
||||||
|
help=_('The maximum amount of time to wait between retry '
|
||||||
|
'attempts.')),
|
||||||
]
|
]
|
||||||
|
|
||||||
controller_worker_opts = [
|
controller_worker_opts = [
|
||||||
@ -462,7 +499,11 @@ controller_worker_opts = [
|
|||||||
help=_('If True, build cloud-init user-data that is passed '
|
help=_('If True, build cloud-init user-data that is passed '
|
||||||
'to the config drive on Amphora boot instead of '
|
'to the config drive on Amphora boot instead of '
|
||||||
'personality files. If False, utilize personality '
|
'personality files. If False, utilize personality '
|
||||||
'files.'))
|
'files.')),
|
||||||
|
cfg.IntOpt('amphora_delete_retries', default=5,
|
||||||
|
help=_('Number of times an amphora delete should be retried.')),
|
||||||
|
cfg.IntOpt('amphora_delete_retry_interval', default=5,
|
||||||
|
help=_('Time, in seconds, between amphora delete retries.')),
|
||||||
]
|
]
|
||||||
|
|
||||||
task_flow_opts = [
|
task_flow_opts = [
|
||||||
@ -790,6 +831,7 @@ driver_agent_opts = [
|
|||||||
cfg.CONF.register_opts(core_opts)
|
cfg.CONF.register_opts(core_opts)
|
||||||
cfg.CONF.register_opts(api_opts, group='api_settings')
|
cfg.CONF.register_opts(api_opts, group='api_settings')
|
||||||
cfg.CONF.register_opts(amphora_agent_opts, group='amphora_agent')
|
cfg.CONF.register_opts(amphora_agent_opts, group='amphora_agent')
|
||||||
|
cfg.CONF.register_opts(compute_opts, group='compute')
|
||||||
cfg.CONF.register_opts(networking_opts, group='networking')
|
cfg.CONF.register_opts(networking_opts, group='networking')
|
||||||
cfg.CONF.register_opts(oslo_messaging_opts, group='oslo_messaging')
|
cfg.CONF.register_opts(oslo_messaging_opts, group='oslo_messaging')
|
||||||
cfg.CONF.register_opts(haproxy_amphora_opts, group='haproxy_amphora')
|
cfg.CONF.register_opts(haproxy_amphora_opts, group='haproxy_amphora')
|
||||||
|
@ -296,7 +296,9 @@ ACTIVE_CONNECTIONS = 'active_connections'
|
|||||||
ADD_NICS = 'add_nics'
|
ADD_NICS = 'add_nics'
|
||||||
ADDED_PORTS = 'added_ports'
|
ADDED_PORTS = 'added_ports'
|
||||||
ADMIN_STATE_UP = 'admin_state_up'
|
ADMIN_STATE_UP = 'admin_state_up'
|
||||||
|
ALLOWED_ADDRESS_PAIRS = 'allowed_address_pairs'
|
||||||
AMP_DATA = 'amp_data'
|
AMP_DATA = 'amp_data'
|
||||||
|
AMP_VRRP_INT = 'amp_vrrp_int'
|
||||||
AMPHORA = 'amphora'
|
AMPHORA = 'amphora'
|
||||||
AMPHORA_ID = 'amphora_id'
|
AMPHORA_ID = 'amphora_id'
|
||||||
AMPHORA_INDEX = 'amphora_index'
|
AMPHORA_INDEX = 'amphora_index'
|
||||||
@ -305,6 +307,8 @@ AMPHORAE = 'amphorae'
|
|||||||
AMPHORAE_NETWORK_CONFIG = 'amphorae_network_config'
|
AMPHORAE_NETWORK_CONFIG = 'amphorae_network_config'
|
||||||
AMPS_DATA = 'amps_data'
|
AMPS_DATA = 'amps_data'
|
||||||
ANTI_AFFINITY = 'anti-affinity'
|
ANTI_AFFINITY = 'anti-affinity'
|
||||||
|
ATTEMPT_NUMBER = 'attempt_number'
|
||||||
|
BASE_PORT = 'base_port'
|
||||||
BYTES_IN = 'bytes_in'
|
BYTES_IN = 'bytes_in'
|
||||||
BYTES_OUT = 'bytes_out'
|
BYTES_OUT = 'bytes_out'
|
||||||
CACHED_ZONE = 'cached_zone'
|
CACHED_ZONE = 'cached_zone'
|
||||||
@ -324,7 +328,9 @@ DELETE_NICS = 'delete_nics'
|
|||||||
DELTA = 'delta'
|
DELTA = 'delta'
|
||||||
DELTAS = 'deltas'
|
DELTAS = 'deltas'
|
||||||
DESCRIPTION = 'description'
|
DESCRIPTION = 'description'
|
||||||
|
DEVICE_OWNER = 'device_owner'
|
||||||
ENABLED = 'enabled'
|
ENABLED = 'enabled'
|
||||||
|
FAILED_AMP_VRRP_PORT_ID = 'failed_amp_vrrp_port_id'
|
||||||
FAILED_AMPHORA = 'failed_amphora'
|
FAILED_AMPHORA = 'failed_amphora'
|
||||||
FAILOVER_AMPHORA = 'failover_amphora'
|
FAILOVER_AMPHORA = 'failover_amphora'
|
||||||
FAILOVER_AMPHORA_ID = 'failover_amphora_id'
|
FAILOVER_AMPHORA_ID = 'failover_amphora_id'
|
||||||
@ -341,6 +347,7 @@ HEALTH_MONITOR_UPDATES = 'health_monitor_updates'
|
|||||||
ID = 'id'
|
ID = 'id'
|
||||||
IMAGE_ID = 'image_id'
|
IMAGE_ID = 'image_id'
|
||||||
IP_ADDRESS = 'ip_address'
|
IP_ADDRESS = 'ip_address'
|
||||||
|
IPV6_ICMP = 'ipv6-icmp'
|
||||||
LB_NETWORK_IP = 'lb_network_ip'
|
LB_NETWORK_IP = 'lb_network_ip'
|
||||||
L7POLICY = 'l7policy'
|
L7POLICY = 'l7policy'
|
||||||
L7POLICY_ID = 'l7policy_id'
|
L7POLICY_ID = 'l7policy_id'
|
||||||
@ -360,6 +367,7 @@ MEMBER = 'member'
|
|||||||
MEMBER_ID = 'member_id'
|
MEMBER_ID = 'member_id'
|
||||||
MEMBER_PORTS = 'member_ports'
|
MEMBER_PORTS = 'member_ports'
|
||||||
MEMBER_UPDATES = 'member_updates'
|
MEMBER_UPDATES = 'member_updates'
|
||||||
|
MESSAGE = 'message'
|
||||||
NAME = 'name'
|
NAME = 'name'
|
||||||
NETWORK = 'network'
|
NETWORK = 'network'
|
||||||
NETWORK_ID = 'network_id'
|
NETWORK_ID = 'network_id'
|
||||||
@ -372,14 +380,16 @@ ORIGINAL_LISTENER = 'original_listener'
|
|||||||
ORIGINAL_LOADBALANCER = 'original_load_balancer'
|
ORIGINAL_LOADBALANCER = 'original_load_balancer'
|
||||||
ORIGINAL_MEMBER = 'original_member'
|
ORIGINAL_MEMBER = 'original_member'
|
||||||
ORIGINAL_POOL = 'original_pool'
|
ORIGINAL_POOL = 'original_pool'
|
||||||
|
PASSIVE_FAILURE = 'passive_failure'
|
||||||
PEER_PORT = 'peer_port'
|
PEER_PORT = 'peer_port'
|
||||||
POOL = 'pool'
|
POOL = 'pool'
|
||||||
POOL_CHILD_COUNT = 'pool_child_count'
|
POOL_CHILD_COUNT = 'pool_child_count'
|
||||||
POOL_ID = 'pool_id'
|
POOL_ID = 'pool_id'
|
||||||
PROJECT_ID = 'project_id'
|
|
||||||
POOL_UPDATES = 'pool_updates'
|
POOL_UPDATES = 'pool_updates'
|
||||||
|
PORT = 'port'
|
||||||
PORT_ID = 'port_id'
|
PORT_ID = 'port_id'
|
||||||
PORTS = 'ports'
|
PORTS = 'ports'
|
||||||
|
PROJECT_ID = 'project_id'
|
||||||
PROVIDER = 'provider'
|
PROVIDER = 'provider'
|
||||||
PROVIDER_NAME = 'provider_name'
|
PROVIDER_NAME = 'provider_name'
|
||||||
QOS_POLICY_ID = 'qos_policy_id'
|
QOS_POLICY_ID = 'qos_policy_id'
|
||||||
@ -388,15 +398,19 @@ REQ_CONN_TIMEOUT = 'req_conn_timeout'
|
|||||||
REQ_READ_TIMEOUT = 'req_read_timeout'
|
REQ_READ_TIMEOUT = 'req_read_timeout'
|
||||||
REQUEST_ERRORS = 'request_errors'
|
REQUEST_ERRORS = 'request_errors'
|
||||||
ROLE = 'role'
|
ROLE = 'role'
|
||||||
|
SECURITY_GROUPS = 'security_groups'
|
||||||
|
SECURITY_GROUP_RULES = 'security_group_rules'
|
||||||
SERVER_GROUP_ID = 'server_group_id'
|
SERVER_GROUP_ID = 'server_group_id'
|
||||||
SERVER_PEM = 'server_pem'
|
SERVER_PEM = 'server_pem'
|
||||||
SNI_CONTAINER_DATA = 'sni_container_data'
|
SNI_CONTAINER_DATA = 'sni_container_data'
|
||||||
SNI_CONTAINERS = 'sni_containers'
|
SNI_CONTAINERS = 'sni_containers'
|
||||||
SOFT_ANTI_AFFINITY = 'soft-anti-affinity'
|
SOFT_ANTI_AFFINITY = 'soft-anti-affinity'
|
||||||
STATUS = 'status'
|
STATUS = 'status'
|
||||||
|
STATUS_CODE = 'status_code'
|
||||||
SUBNET = 'subnet'
|
SUBNET = 'subnet'
|
||||||
SUBNET_ID = 'subnet_id'
|
SUBNET_ID = 'subnet_id'
|
||||||
TAGS = 'tags'
|
TAGS = 'tags'
|
||||||
|
TENANT_ID = 'tenant_id'
|
||||||
TIMEOUT_DICT = 'timeout_dict'
|
TIMEOUT_DICT = 'timeout_dict'
|
||||||
TLS_CERTIFICATE_ID = 'tls_certificate_id'
|
TLS_CERTIFICATE_ID = 'tls_certificate_id'
|
||||||
TLS_CONTAINER_ID = 'tls_container_id'
|
TLS_CONTAINER_ID = 'tls_container_id'
|
||||||
@ -410,6 +424,7 @@ VIP_ADDRESS = 'vip_address'
|
|||||||
VIP_NETWORK = 'vip_network'
|
VIP_NETWORK = 'vip_network'
|
||||||
VIP_PORT_ID = 'vip_port_id'
|
VIP_PORT_ID = 'vip_port_id'
|
||||||
VIP_QOS_POLICY_ID = 'vip_qos_policy_id'
|
VIP_QOS_POLICY_ID = 'vip_qos_policy_id'
|
||||||
|
VIP_SG_ID = 'vip_sg_id'
|
||||||
VIP_SUBNET = 'vip_subnet'
|
VIP_SUBNET = 'vip_subnet'
|
||||||
VIP_SUBNET_ID = 'vip_subnet_id'
|
VIP_SUBNET_ID = 'vip_subnet_id'
|
||||||
VRRP_ID = 'vrrp_id'
|
VRRP_ID = 'vrrp_id'
|
||||||
@ -437,6 +452,7 @@ CREATE_POOL_FLOW = 'octavia-create-pool-flow'
|
|||||||
CREATE_L7POLICY_FLOW = 'octavia-create-l7policy-flow'
|
CREATE_L7POLICY_FLOW = 'octavia-create-l7policy-flow'
|
||||||
CREATE_L7RULE_FLOW = 'octavia-create-l7rule-flow'
|
CREATE_L7RULE_FLOW = 'octavia-create-l7rule-flow'
|
||||||
DELETE_AMPHORA_FLOW = 'octavia-delete-amphora-flow'
|
DELETE_AMPHORA_FLOW = 'octavia-delete-amphora-flow'
|
||||||
|
DELETE_EXTRA_AMPHORAE_FLOW = 'octavia-delete-extra-amphorae-flow'
|
||||||
DELETE_HEALTH_MONITOR_FLOW = 'octavia-delete-health-monitor-flow'
|
DELETE_HEALTH_MONITOR_FLOW = 'octavia-delete-health-monitor-flow'
|
||||||
DELETE_LISTENER_FLOW = 'octavia-delete-listener_flow'
|
DELETE_LISTENER_FLOW = 'octavia-delete-listener_flow'
|
||||||
DELETE_LOADBALANCER_FLOW = 'octavia-delete-loadbalancer-flow'
|
DELETE_LOADBALANCER_FLOW = 'octavia-delete-loadbalancer-flow'
|
||||||
@ -445,6 +461,7 @@ DELETE_POOL_FLOW = 'octavia-delete-pool-flow'
|
|||||||
DELETE_L7POLICY_FLOW = 'octavia-delete-l7policy-flow'
|
DELETE_L7POLICY_FLOW = 'octavia-delete-l7policy-flow'
|
||||||
DELETE_L7RULE_FLOW = 'octavia-delete-l7policy-flow'
|
DELETE_L7RULE_FLOW = 'octavia-delete-l7policy-flow'
|
||||||
FAILOVER_AMPHORA_FLOW = 'octavia-failover-amphora-flow'
|
FAILOVER_AMPHORA_FLOW = 'octavia-failover-amphora-flow'
|
||||||
|
FAILOVER_LOADBALANCER_FLOW = 'octavia-failover-loadbalancer-flow'
|
||||||
FINALIZE_AMPHORA_FLOW = 'octavia-finalize-amphora-flow'
|
FINALIZE_AMPHORA_FLOW = 'octavia-finalize-amphora-flow'
|
||||||
LOADBALANCER_NETWORKING_SUBFLOW = 'octavia-new-loadbalancer-net-subflow'
|
LOADBALANCER_NETWORKING_SUBFLOW = 'octavia-new-loadbalancer-net-subflow'
|
||||||
UPDATE_HEALTH_MONITOR_FLOW = 'octavia-update-health-monitor-flow'
|
UPDATE_HEALTH_MONITOR_FLOW = 'octavia-update-health-monitor-flow'
|
||||||
@ -459,10 +476,13 @@ UPDATE_AMPHORA_CONFIG_FLOW = 'octavia-update-amp-config-flow'
|
|||||||
|
|
||||||
POST_MAP_AMP_TO_LB_SUBFLOW = 'octavia-post-map-amp-to-lb-subflow'
|
POST_MAP_AMP_TO_LB_SUBFLOW = 'octavia-post-map-amp-to-lb-subflow'
|
||||||
CREATE_AMP_FOR_LB_SUBFLOW = 'octavia-create-amp-for-lb-subflow'
|
CREATE_AMP_FOR_LB_SUBFLOW = 'octavia-create-amp-for-lb-subflow'
|
||||||
|
CREATE_AMP_FOR_FAILOVER_SUBFLOW = 'octavia-create-amp-for-failover-subflow'
|
||||||
AMP_PLUG_NET_SUBFLOW = 'octavia-plug-net-subflow'
|
AMP_PLUG_NET_SUBFLOW = 'octavia-plug-net-subflow'
|
||||||
GET_AMPHORA_FOR_LB_SUBFLOW = 'octavia-get-amphora-for-lb-subflow'
|
GET_AMPHORA_FOR_LB_SUBFLOW = 'octavia-get-amphora-for-lb-subflow'
|
||||||
POST_LB_AMP_ASSOCIATION_SUBFLOW = (
|
POST_LB_AMP_ASSOCIATION_SUBFLOW = (
|
||||||
'octavia-post-loadbalancer-amp_association-subflow')
|
'octavia-post-loadbalancer-amp_association-subflow')
|
||||||
|
AMPHORA_LISTENER_START_SUBFLOW = 'amphora-listener-start-subflow'
|
||||||
|
AMPHORA_LISTENER_RELOAD_SUBFLOW = 'amphora-listener-start-subflow'
|
||||||
|
|
||||||
MAP_LOADBALANCER_TO_AMPHORA = 'octavia-mapload-balancer-to-amphora'
|
MAP_LOADBALANCER_TO_AMPHORA = 'octavia-mapload-balancer-to-amphora'
|
||||||
RELOAD_AMPHORA = 'octavia-reload-amphora'
|
RELOAD_AMPHORA = 'octavia-reload-amphora'
|
||||||
@ -478,7 +498,7 @@ COMPUTE_WAIT = 'octavia-compute-wait'
|
|||||||
UPDATE_AMPHORA_INFO = 'octavia-update-amphora-info'
|
UPDATE_AMPHORA_INFO = 'octavia-update-amphora-info'
|
||||||
AMPHORA_FINALIZE = 'octavia-amphora-finalize'
|
AMPHORA_FINALIZE = 'octavia-amphora-finalize'
|
||||||
MARK_AMPHORA_ALLOCATED_INDB = 'octavia-mark-amphora-allocated-indb'
|
MARK_AMPHORA_ALLOCATED_INDB = 'octavia-mark-amphora-allocated-indb'
|
||||||
RELOADLOAD_BALANCER = 'octavia-reloadload-balancer'
|
MARK_AMPHORA_READY_INDB = 'octavia-mark-amphora-ready-indb'
|
||||||
MARK_LB_ACTIVE_INDB = 'octavia-mark-lb-active-indb'
|
MARK_LB_ACTIVE_INDB = 'octavia-mark-lb-active-indb'
|
||||||
MARK_AMP_MASTER_INDB = 'octavia-mark-amp-master-indb'
|
MARK_AMP_MASTER_INDB = 'octavia-mark-amp-master-indb'
|
||||||
MARK_AMP_BACKUP_INDB = 'octavia-mark-amp-backup-indb'
|
MARK_AMP_BACKUP_INDB = 'octavia-mark-amp-backup-indb'
|
||||||
@ -492,6 +512,7 @@ CREATE_VRRP_GROUP_FOR_LB = 'octavia-create-vrrp-group-for-lb'
|
|||||||
CREATE_VRRP_SECURITY_RULES = 'octavia-create-vrrp-security-rules'
|
CREATE_VRRP_SECURITY_RULES = 'octavia-create-vrrp-security-rules'
|
||||||
AMP_COMPUTE_CONNECTIVITY_WAIT = 'octavia-amp-compute-connectivity-wait'
|
AMP_COMPUTE_CONNECTIVITY_WAIT = 'octavia-amp-compute-connectivity-wait'
|
||||||
AMP_LISTENER_UPDATE = 'octavia-amp-listeners-update'
|
AMP_LISTENER_UPDATE = 'octavia-amp-listeners-update'
|
||||||
|
AMP_LISTENER_START = 'octavia-amp-listeners-start'
|
||||||
PLUG_VIP_AMPHORA = 'octavia-amp-plug-vip'
|
PLUG_VIP_AMPHORA = 'octavia-amp-plug-vip'
|
||||||
APPLY_QOS_AMP = 'octavia-amp-apply-qos'
|
APPLY_QOS_AMP = 'octavia-amp-apply-qos'
|
||||||
UPDATE_AMPHORA_VIP_DATA = 'ocatvia-amp-update-vip-data'
|
UPDATE_AMPHORA_VIP_DATA = 'ocatvia-amp-update-vip-data'
|
||||||
@ -499,6 +520,8 @@ GET_AMP_NETWORK_CONFIG = 'octavia-amp-get-network-config'
|
|||||||
AMP_POST_VIP_PLUG = 'octavia-amp-post-vip-plug'
|
AMP_POST_VIP_PLUG = 'octavia-amp-post-vip-plug'
|
||||||
GENERATE_SERVER_PEM_TASK = 'GenerateServerPEMTask'
|
GENERATE_SERVER_PEM_TASK = 'GenerateServerPEMTask'
|
||||||
AMPHORA_CONFIG_UPDATE_TASK = 'AmphoraConfigUpdateTask'
|
AMPHORA_CONFIG_UPDATE_TASK = 'AmphoraConfigUpdateTask'
|
||||||
|
FIRST_AMP_NETWORK_CONFIGS = 'first-amp-network-configs'
|
||||||
|
FIRST_AMP_VRRP_INTERFACE = 'first-amp-vrrp_interface'
|
||||||
|
|
||||||
# Batch Member Update constants
|
# Batch Member Update constants
|
||||||
UNORDERED_MEMBER_UPDATES_FLOW = 'octavia-unordered-member-updates-flow'
|
UNORDERED_MEMBER_UPDATES_FLOW = 'octavia-unordered-member-updates-flow'
|
||||||
@ -513,11 +536,30 @@ UPDATE_MEMBER_INDB = 'octavia-update-member-indb'
|
|||||||
DELETE_MEMBER_INDB = 'octavia-delete-member-indb'
|
DELETE_MEMBER_INDB = 'octavia-delete-member-indb'
|
||||||
|
|
||||||
# Task Names
|
# Task Names
|
||||||
|
ADMIN_DOWN_PORT = 'admin-down-port'
|
||||||
|
AMPHORA_POST_VIP_PLUG = 'amphora-post-vip-plug'
|
||||||
|
AMPHORA_RELOAD_LISTENER = 'amphora-reload-listener'
|
||||||
|
AMPHORA_TO_ERROR_ON_REVERT = 'amphora-to-error-on-revert'
|
||||||
|
AMPHORAE_POST_NETWORK_PLUG = 'amphorae-post-network-plug'
|
||||||
|
ATTACH_PORT = 'attach-port'
|
||||||
|
CALCULATE_AMPHORA_DELTA = 'calculate-amphora-delta'
|
||||||
|
CREATE_VIP_BASE_PORT = 'create-vip-base-port'
|
||||||
|
DELETE_AMPHORA = 'delete-amphora'
|
||||||
|
DELETE_PORT = 'delete-port'
|
||||||
|
DISABLE_AMP_HEALTH_MONITORING = 'disable-amphora-health-monitoring'
|
||||||
|
GET_AMPHORA_NETWORK_CONFIGS_BY_ID = 'get-amphora-network-configs-by-id'
|
||||||
|
GET_AMPHORAE_FROM_LB = 'get-amphorae-from-lb'
|
||||||
|
HANDLE_NETWORK_DELTA = 'handle-network-delta'
|
||||||
|
MARK_AMPHORA_DELETED = 'mark-amphora-deleted'
|
||||||
|
MARK_AMPHORA_PENDING_DELETE = 'mark-amphora-pending-delete'
|
||||||
|
MARK_AMPHORA_HEALTH_BUSY = 'mark-amphora-health-busy'
|
||||||
RELOAD_AMP_AFTER_PLUG_VIP = 'reload-amp-after-plug-vip'
|
RELOAD_AMP_AFTER_PLUG_VIP = 'reload-amp-after-plug-vip'
|
||||||
RELOAD_LB_AFTER_AMP_ASSOC = 'reload-lb-after-amp-assoc'
|
RELOAD_LB_AFTER_AMP_ASSOC = 'reload-lb-after-amp-assoc'
|
||||||
RELOAD_LB_AFTER_AMP_ASSOC_FULL_GRAPH = 'reload-lb-after-amp-assoc-full-graph'
|
RELOAD_LB_AFTER_AMP_ASSOC_FULL_GRAPH = 'reload-lb-after-amp-assoc-full-graph'
|
||||||
RELOAD_LB_AFTER_PLUG_VIP = 'reload-lb-after-plug-vip'
|
RELOAD_LB_AFTER_PLUG_VIP = 'reload-lb-after-plug-vip'
|
||||||
RELOAD_LB_BEFOR_ALLOCATE_VIP = "reload-lb-before-allocate-vip"
|
RELOAD_LB_BEFOR_ALLOCATE_VIP = 'reload-lb-before-allocate-vip'
|
||||||
|
UPDATE_AMP_FAILOVER_DETAILS = 'update-amp-failover-details'
|
||||||
|
|
||||||
|
|
||||||
NOVA_1 = '1.1'
|
NOVA_1 = '1.1'
|
||||||
NOVA_21 = '2.1'
|
NOVA_21 = '2.1'
|
||||||
@ -785,6 +827,7 @@ CIPHERS_OWASP_SUITE_B = ('TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256:'
|
|||||||
'ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-SHA256:'
|
'ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-SHA256:'
|
||||||
'DHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA384:'
|
'DHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA384:'
|
||||||
'ECDHE-RSA-AES128-SHA256')
|
'ECDHE-RSA-AES128-SHA256')
|
||||||
|
|
||||||
TLS_VERSIONS_OWASP_SUITE_B = [lib_consts.TLS_VERSION_1_2,
|
TLS_VERSIONS_OWASP_SUITE_B = [lib_consts.TLS_VERSION_1_2,
|
||||||
lib_consts.TLS_VERSION_1_3]
|
lib_consts.TLS_VERSION_1_3]
|
||||||
|
|
||||||
@ -796,3 +839,8 @@ TLS_ALL_VERSIONS = [
|
|||||||
lib_consts.TLS_VERSION_1_2,
|
lib_consts.TLS_VERSION_1_2,
|
||||||
lib_consts.TLS_VERSION_1_3
|
lib_consts.TLS_VERSION_1_3
|
||||||
]
|
]
|
||||||
|
|
||||||
|
VIP_SECURITY_GROUP_PREFIX = 'lb-'
|
||||||
|
|
||||||
|
AMP_BASE_PORT_PREFIX = 'octavia-lb-vrrp-'
|
||||||
|
OCTAVIA_OWNED = 'octavia_owned'
|
||||||
|
@ -202,7 +202,8 @@ class ComputeBuildQueueTimeoutException(OctaviaException):
|
|||||||
|
|
||||||
|
|
||||||
class ComputeDeleteException(OctaviaException):
|
class ComputeDeleteException(OctaviaException):
|
||||||
message = _('Failed to delete compute instance.')
|
message = _('Failed to delete compute instance. The compute service '
|
||||||
|
'reports: %(compute_msg)s')
|
||||||
|
|
||||||
|
|
||||||
class ComputeGetException(OctaviaException):
|
class ComputeGetException(OctaviaException):
|
||||||
@ -243,6 +244,14 @@ class ComputeWaitTimeoutException(OctaviaException):
|
|||||||
message = _('Waiting for compute id %(id)s to go active timeout.')
|
message = _('Waiting for compute id %(id)s to go active timeout.')
|
||||||
|
|
||||||
|
|
||||||
|
class ComputePortInUseException(OctaviaException):
|
||||||
|
message = _('Compute driver reports port %(port)s is already in use.')
|
||||||
|
|
||||||
|
|
||||||
|
class ComputeUnknownException(OctaviaException):
|
||||||
|
message = _('Unknown exception from the compute driver: %(exc)s.')
|
||||||
|
|
||||||
|
|
||||||
class InvalidTopology(OctaviaException):
|
class InvalidTopology(OctaviaException):
|
||||||
message = _('Invalid topology specified: %(topology)s')
|
message = _('Invalid topology specified: %(topology)s')
|
||||||
|
|
||||||
@ -396,3 +405,12 @@ class VolumeDeleteException(OctaviaException):
|
|||||||
|
|
||||||
class VolumeGetException(OctaviaException):
|
class VolumeGetException(OctaviaException):
|
||||||
message = _('Failed to retrieve volume instance.')
|
message = _('Failed to retrieve volume instance.')
|
||||||
|
|
||||||
|
|
||||||
|
class NetworkServiceError(OctaviaException):
|
||||||
|
message = _('The networking service had a failure: %(net_error)s')
|
||||||
|
|
||||||
|
|
||||||
|
class InvalidIPAddress(APIException):
|
||||||
|
msg = _('The IP Address %(ip_addr)s is invalid.')
|
||||||
|
code = 400
|
||||||
|
@ -29,6 +29,8 @@ from oslo_log import log as logging
|
|||||||
from oslo_utils import excutils
|
from oslo_utils import excutils
|
||||||
from stevedore import driver as stevedore_driver
|
from stevedore import driver as stevedore_driver
|
||||||
|
|
||||||
|
from octavia.common import constants
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -50,6 +52,15 @@ def base64_sha1_string(string_to_hash):
|
|||||||
return re.sub(r"^-", "x", b64_sha1)
|
return re.sub(r"^-", "x", b64_sha1)
|
||||||
|
|
||||||
|
|
||||||
|
def get_amphora_driver():
|
||||||
|
amphora_driver = stevedore_driver.DriverManager(
|
||||||
|
namespace='octavia.amphora.drivers',
|
||||||
|
name=CONF.controller_worker.amphora_driver,
|
||||||
|
invoke_on_load=True
|
||||||
|
).driver
|
||||||
|
return amphora_driver
|
||||||
|
|
||||||
|
|
||||||
def get_network_driver():
|
def get_network_driver():
|
||||||
CONF.import_group('controller_worker', 'octavia.common.config')
|
CONF.import_group('controller_worker', 'octavia.common.config')
|
||||||
network_driver = stevedore_driver.DriverManager(
|
network_driver = stevedore_driver.DriverManager(
|
||||||
@ -60,6 +71,12 @@ def get_network_driver():
|
|||||||
return network_driver
|
return network_driver
|
||||||
|
|
||||||
|
|
||||||
|
def is_ipv4(ip_address):
|
||||||
|
"""Check if ip address is IPv4 address."""
|
||||||
|
ip = netaddr.IPAddress(ip_address)
|
||||||
|
return ip.version == 4
|
||||||
|
|
||||||
|
|
||||||
def is_ipv6(ip_address):
|
def is_ipv6(ip_address):
|
||||||
"""Check if ip address is IPv6 address."""
|
"""Check if ip address is IPv6 address."""
|
||||||
ip = netaddr.IPAddress(ip_address)
|
ip = netaddr.IPAddress(ip_address)
|
||||||
@ -99,6 +116,12 @@ def ip_netmask_to_cidr(ip, netmask):
|
|||||||
return "{ip}/{netmask}".format(ip=net.network, netmask=net.prefixlen)
|
return "{ip}/{netmask}".format(ip=net.network, netmask=net.prefixlen)
|
||||||
|
|
||||||
|
|
||||||
|
def get_vip_security_group_name(loadbalancer_id):
|
||||||
|
if loadbalancer_id:
|
||||||
|
return constants.VIP_SECURITY_GROUP_PREFIX + loadbalancer_id
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
def get_compatible_value(value):
|
def get_compatible_value(value):
|
||||||
if isinstance(value, str):
|
if isinstance(value, str):
|
||||||
value = value.encode('utf-8')
|
value = value.encode('utf-8')
|
||||||
|
@ -84,8 +84,8 @@ class NoopManager(object):
|
|||||||
self.__class__.__name__, server_group_id)
|
self.__class__.__name__, server_group_id)
|
||||||
self.computeconfig[server_group_id] = (server_group_id, 'delete')
|
self.computeconfig[server_group_id] = (server_group_id, 'delete')
|
||||||
|
|
||||||
def attach_network_or_port(self, compute_id, network_id, ip_address=None,
|
def attach_network_or_port(self, compute_id, network_id=None,
|
||||||
port_id=None):
|
ip_address=None, port_id=None):
|
||||||
LOG.debug("Compute %s no-op, attach_network_or_port compute_id %s,"
|
LOG.debug("Compute %s no-op, attach_network_or_port compute_id %s,"
|
||||||
"network_id %s, ip_address %s, port_id %s",
|
"network_id %s, ip_address %s, port_id %s",
|
||||||
self.__class__.__name__, compute_id,
|
self.__class__.__name__, compute_id,
|
||||||
@ -153,8 +153,8 @@ class NoopComputeDriver(driver_base.ComputeBase):
|
|||||||
def delete_server_group(self, server_group_id):
|
def delete_server_group(self, server_group_id):
|
||||||
self.driver.delete_server_group(server_group_id)
|
self.driver.delete_server_group(server_group_id)
|
||||||
|
|
||||||
def attach_network_or_port(self, compute_id, network_id, ip_address=None,
|
def attach_network_or_port(self, compute_id, network_id=None,
|
||||||
port_id=None):
|
ip_address=None, port_id=None):
|
||||||
self.driver.attach_network_or_port(compute_id, network_id, ip_address,
|
self.driver.attach_network_or_port(compute_id, network_id, ip_address,
|
||||||
port_id)
|
port_id)
|
||||||
|
|
||||||
|
@ -199,9 +199,9 @@ class VirtualMachineManager(compute_base.ComputeBase):
|
|||||||
except nova_exceptions.NotFound:
|
except nova_exceptions.NotFound:
|
||||||
LOG.warning("Nova instance with id: %s not found. "
|
LOG.warning("Nova instance with id: %s not found. "
|
||||||
"Assuming already deleted.", compute_id)
|
"Assuming already deleted.", compute_id)
|
||||||
except Exception:
|
except Exception as e:
|
||||||
LOG.exception("Error deleting nova virtual machine.")
|
LOG.exception("Error deleting nova virtual machine.")
|
||||||
raise exceptions.ComputeDeleteException()
|
raise exceptions.ComputeDeleteException(compute_msg=str(e))
|
||||||
|
|
||||||
def status(self, compute_id):
|
def status(self, compute_id):
|
||||||
'''Retrieve the status of a virtual machine.
|
'''Retrieve the status of a virtual machine.
|
||||||
@ -339,8 +339,8 @@ class VirtualMachineManager(compute_base.ComputeBase):
|
|||||||
LOG.exception("Error delete server group instance.")
|
LOG.exception("Error delete server group instance.")
|
||||||
raise exceptions.ServerGroupObjectDeleteException()
|
raise exceptions.ServerGroupObjectDeleteException()
|
||||||
|
|
||||||
def attach_network_or_port(self, compute_id, network_id, ip_address=None,
|
def attach_network_or_port(self, compute_id, network_id=None,
|
||||||
port_id=None):
|
ip_address=None, port_id=None):
|
||||||
"""Attaching a port or a network to an existing amphora
|
"""Attaching a port or a network to an existing amphora
|
||||||
|
|
||||||
:param compute_id: id of an amphora in the compute service
|
:param compute_id: id of an amphora in the compute service
|
||||||
@ -348,13 +348,39 @@ class VirtualMachineManager(compute_base.ComputeBase):
|
|||||||
:param ip_address: ip address to attempt to be assigned to interface
|
:param ip_address: ip address to attempt to be assigned to interface
|
||||||
:param port_id: id of the neutron port
|
:param port_id: id of the neutron port
|
||||||
:return: nova interface instance
|
:return: nova interface instance
|
||||||
:raises: Exception
|
:raises ComputePortInUseException: The port is in use somewhere else
|
||||||
|
:raises ComputeUnknownException: Unknown nova error
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
interface = self.manager.interface_attach(
|
interface = self.manager.interface_attach(
|
||||||
server=compute_id, net_id=network_id, fixed_ip=ip_address,
|
server=compute_id, net_id=network_id, fixed_ip=ip_address,
|
||||||
port_id=port_id)
|
port_id=port_id)
|
||||||
except Exception:
|
except nova_exceptions.Conflict as e:
|
||||||
|
# The port is already in use.
|
||||||
|
if port_id:
|
||||||
|
# Check if the port we want is already attached
|
||||||
|
try:
|
||||||
|
interfaces = self.manager.interface_list(compute_id)
|
||||||
|
for interface in interfaces:
|
||||||
|
if interface.id == port_id:
|
||||||
|
return interface
|
||||||
|
except Exception as e:
|
||||||
|
raise exceptions.ComputeUnknownException(exc=str(e))
|
||||||
|
|
||||||
|
raise exceptions.ComputePortInUseException(port=port_id)
|
||||||
|
|
||||||
|
# Nova should have created the port, so something is really
|
||||||
|
# wrong in nova if we get here.
|
||||||
|
raise exceptions.ComputeUnknownException(exc=str(e))
|
||||||
|
except nova_exceptions.NotFound as e:
|
||||||
|
if 'Instance' in str(e):
|
||||||
|
raise exceptions.NotFound(resource='Instance', id=compute_id)
|
||||||
|
if 'Network' in str(e):
|
||||||
|
raise exceptions.NotFound(resource='Network', id=network_id)
|
||||||
|
if 'Port' in str(e):
|
||||||
|
raise exceptions.NotFound(resource='Port', id=port_id)
|
||||||
|
raise exceptions.NotFound(resource=str(e), id=compute_id)
|
||||||
|
except Exception as e:
|
||||||
LOG.error('Error attaching network %(network_id)s with ip '
|
LOG.error('Error attaching network %(network_id)s with ip '
|
||||||
'%(ip_address)s and port %(port)s to amphora '
|
'%(ip_address)s and port %(port)s to amphora '
|
||||||
'(compute_id: %(compute_id)s) ',
|
'(compute_id: %(compute_id)s) ',
|
||||||
@ -364,7 +390,7 @@ class VirtualMachineManager(compute_base.ComputeBase):
|
|||||||
'ip_address': ip_address,
|
'ip_address': ip_address,
|
||||||
'port': port_id
|
'port': port_id
|
||||||
})
|
})
|
||||||
raise
|
raise exceptions.ComputeUnknownException(exc=str(e))
|
||||||
return interface
|
return interface
|
||||||
|
|
||||||
def detach_port(self, compute_id, port_id):
|
def detach_port(self, compute_id, port_id):
|
||||||
|
@ -23,6 +23,8 @@ import tenacity
|
|||||||
|
|
||||||
from octavia.common import base_taskflow
|
from octavia.common import base_taskflow
|
||||||
from octavia.common import constants
|
from octavia.common import constants
|
||||||
|
from octavia.common import exceptions
|
||||||
|
from octavia.common import utils
|
||||||
from octavia.controller.worker.v1.flows import amphora_flows
|
from octavia.controller.worker.v1.flows import amphora_flows
|
||||||
from octavia.controller.worker.v1.flows import health_monitor_flows
|
from octavia.controller.worker.v1.flows import health_monitor_flows
|
||||||
from octavia.controller.worker.v1.flows import l7policy_flows
|
from octavia.controller.worker.v1.flows import l7policy_flows
|
||||||
@ -37,11 +39,6 @@ from octavia.db import repositories as repo
|
|||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
RETRY_ATTEMPTS = 15
|
|
||||||
RETRY_INITIAL_DELAY = 1
|
|
||||||
RETRY_BACKOFF = 1
|
|
||||||
RETRY_MAX = 5
|
|
||||||
|
|
||||||
|
|
||||||
def _is_provisioning_status_pending_update(lb_obj):
|
def _is_provisioning_status_pending_update(lb_obj):
|
||||||
return not lb_obj.provisioning_status == constants.PENDING_UPDATE
|
return not lb_obj.provisioning_status == constants.PENDING_UPDATE
|
||||||
@ -79,8 +76,11 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
|
|||||||
tenacity.retry_if_result(_is_provisioning_status_pending_update) |
|
tenacity.retry_if_result(_is_provisioning_status_pending_update) |
|
||||||
tenacity.retry_if_exception_type()),
|
tenacity.retry_if_exception_type()),
|
||||||
wait=tenacity.wait_incrementing(
|
wait=tenacity.wait_incrementing(
|
||||||
RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX),
|
CONF.haproxy_amphora.api_db_commit_retry_initial_delay,
|
||||||
stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
|
CONF.haproxy_amphora.api_db_commit_retry_backoff,
|
||||||
|
CONF.haproxy_amphora.api_db_commit_retry_max),
|
||||||
|
stop=tenacity.stop_after_attempt(
|
||||||
|
CONF.haproxy_amphora.api_db_commit_retry_attempts))
|
||||||
def _get_db_obj_until_pending_update(self, repo, id):
|
def _get_db_obj_until_pending_update(self, repo, id):
|
||||||
|
|
||||||
return repo.get(db_apis.get_session(), id=id)
|
return repo.get(db_apis.get_session(), id=id)
|
||||||
@ -96,6 +96,7 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
|
|||||||
store = {constants.BUILD_TYPE_PRIORITY:
|
store = {constants.BUILD_TYPE_PRIORITY:
|
||||||
constants.LB_CREATE_SPARES_POOL_PRIORITY,
|
constants.LB_CREATE_SPARES_POOL_PRIORITY,
|
||||||
constants.FLAVOR: None,
|
constants.FLAVOR: None,
|
||||||
|
constants.SERVER_GROUP_ID: None,
|
||||||
constants.AVAILABILITY_ZONE: None}
|
constants.AVAILABILITY_ZONE: None}
|
||||||
if availability_zone:
|
if availability_zone:
|
||||||
store[constants.AVAILABILITY_ZONE] = (
|
store[constants.AVAILABILITY_ZONE] = (
|
||||||
@ -111,27 +112,14 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error('Failed to create an amphora due to: {}'.format(str(e)))
|
LOG.error('Failed to create an amphora due to: {}'.format(str(e)))
|
||||||
|
|
||||||
def delete_amphora(self, amphora_id):
|
|
||||||
"""Deletes an existing Amphora.
|
|
||||||
|
|
||||||
:param amphora_id: ID of the amphora to delete
|
|
||||||
:returns: None
|
|
||||||
:raises AmphoraNotFound: The referenced Amphora was not found
|
|
||||||
"""
|
|
||||||
amphora = self._amphora_repo.get(db_apis.get_session(),
|
|
||||||
id=amphora_id)
|
|
||||||
delete_amp_tf = self._taskflow_load(self._amphora_flows.
|
|
||||||
get_delete_amphora_flow(),
|
|
||||||
store={constants.AMPHORA: amphora})
|
|
||||||
with tf_logging.DynamicLoggingListener(delete_amp_tf,
|
|
||||||
log=LOG):
|
|
||||||
delete_amp_tf.run()
|
|
||||||
|
|
||||||
@tenacity.retry(
|
@tenacity.retry(
|
||||||
retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
|
retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
|
||||||
wait=tenacity.wait_incrementing(
|
wait=tenacity.wait_incrementing(
|
||||||
RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX),
|
CONF.haproxy_amphora.api_db_commit_retry_initial_delay,
|
||||||
stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
|
CONF.haproxy_amphora.api_db_commit_retry_backoff,
|
||||||
|
CONF.haproxy_amphora.api_db_commit_retry_max),
|
||||||
|
stop=tenacity.stop_after_attempt(
|
||||||
|
CONF.haproxy_amphora.api_db_commit_retry_attempts))
|
||||||
def create_health_monitor(self, health_monitor_id):
|
def create_health_monitor(self, health_monitor_id):
|
||||||
"""Creates a health monitor.
|
"""Creates a health monitor.
|
||||||
|
|
||||||
@ -224,8 +212,11 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
|
|||||||
@tenacity.retry(
|
@tenacity.retry(
|
||||||
retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
|
retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
|
||||||
wait=tenacity.wait_incrementing(
|
wait=tenacity.wait_incrementing(
|
||||||
RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX),
|
CONF.haproxy_amphora.api_db_commit_retry_initial_delay,
|
||||||
stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
|
CONF.haproxy_amphora.api_db_commit_retry_backoff,
|
||||||
|
CONF.haproxy_amphora.api_db_commit_retry_max),
|
||||||
|
stop=tenacity.stop_after_attempt(
|
||||||
|
CONF.haproxy_amphora.api_db_commit_retry_attempts))
|
||||||
def create_listener(self, listener_id):
|
def create_listener(self, listener_id):
|
||||||
"""Creates a listener.
|
"""Creates a listener.
|
||||||
|
|
||||||
@ -310,8 +301,11 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
|
|||||||
@tenacity.retry(
|
@tenacity.retry(
|
||||||
retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
|
retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
|
||||||
wait=tenacity.wait_incrementing(
|
wait=tenacity.wait_incrementing(
|
||||||
RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX),
|
CONF.haproxy_amphora.api_db_commit_retry_initial_delay,
|
||||||
stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
|
CONF.haproxy_amphora.api_db_commit_retry_backoff,
|
||||||
|
CONF.haproxy_amphora.api_db_commit_retry_max),
|
||||||
|
stop=tenacity.stop_after_attempt(
|
||||||
|
CONF.haproxy_amphora.api_db_commit_retry_attempts))
|
||||||
def create_load_balancer(self, load_balancer_id, flavor=None,
|
def create_load_balancer(self, load_balancer_id, flavor=None,
|
||||||
availability_zone=None):
|
availability_zone=None):
|
||||||
"""Creates a load balancer by allocating Amphorae.
|
"""Creates a load balancer by allocating Amphorae.
|
||||||
@ -338,6 +332,9 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
|
|||||||
constants.FLAVOR: flavor,
|
constants.FLAVOR: flavor,
|
||||||
constants.AVAILABILITY_ZONE: availability_zone}
|
constants.AVAILABILITY_ZONE: availability_zone}
|
||||||
|
|
||||||
|
if not CONF.nova.enable_anti_affinity:
|
||||||
|
store[constants.SERVER_GROUP_ID] = None
|
||||||
|
|
||||||
topology = lb.topology
|
topology = lb.topology
|
||||||
|
|
||||||
store[constants.UPDATE_DICT] = {
|
store[constants.UPDATE_DICT] = {
|
||||||
@ -411,8 +408,11 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
|
|||||||
@tenacity.retry(
|
@tenacity.retry(
|
||||||
retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
|
retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
|
||||||
wait=tenacity.wait_incrementing(
|
wait=tenacity.wait_incrementing(
|
||||||
RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX),
|
CONF.haproxy_amphora.api_db_commit_retry_initial_delay,
|
||||||
stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
|
CONF.haproxy_amphora.api_db_commit_retry_backoff,
|
||||||
|
CONF.haproxy_amphora.api_db_commit_retry_max),
|
||||||
|
stop=tenacity.stop_after_attempt(
|
||||||
|
CONF.haproxy_amphora.api_db_commit_retry_attempts))
|
||||||
def create_member(self, member_id):
|
def create_member(self, member_id):
|
||||||
"""Creates a pool member.
|
"""Creates a pool member.
|
||||||
|
|
||||||
@ -486,8 +486,11 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
|
|||||||
@tenacity.retry(
|
@tenacity.retry(
|
||||||
retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
|
retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
|
||||||
wait=tenacity.wait_incrementing(
|
wait=tenacity.wait_incrementing(
|
||||||
RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX),
|
CONF.haproxy_amphora.api_db_commit_retry_initial_delay,
|
||||||
stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
|
CONF.haproxy_amphora.api_db_commit_retry_backoff,
|
||||||
|
CONF.haproxy_amphora.api_db_commit_retry_max),
|
||||||
|
stop=tenacity.stop_after_attempt(
|
||||||
|
CONF.haproxy_amphora.api_db_commit_retry_attempts))
|
||||||
def batch_update_members(self, old_member_ids, new_member_ids,
|
def batch_update_members(self, old_member_ids, new_member_ids,
|
||||||
updated_members):
|
updated_members):
|
||||||
new_members = [self._member_repo.get(db_apis.get_session(), id=mid)
|
new_members = [self._member_repo.get(db_apis.get_session(), id=mid)
|
||||||
@ -577,8 +580,11 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
|
|||||||
@tenacity.retry(
|
@tenacity.retry(
|
||||||
retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
|
retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
|
||||||
wait=tenacity.wait_incrementing(
|
wait=tenacity.wait_incrementing(
|
||||||
RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX),
|
CONF.haproxy_amphora.api_db_commit_retry_initial_delay,
|
||||||
stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
|
CONF.haproxy_amphora.api_db_commit_retry_backoff,
|
||||||
|
CONF.haproxy_amphora.api_db_commit_retry_max),
|
||||||
|
stop=tenacity.stop_after_attempt(
|
||||||
|
CONF.haproxy_amphora.api_db_commit_retry_attempts))
|
||||||
def create_pool(self, pool_id):
|
def create_pool(self, pool_id):
|
||||||
"""Creates a node pool.
|
"""Creates a node pool.
|
||||||
|
|
||||||
@ -667,8 +673,11 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
|
|||||||
@tenacity.retry(
|
@tenacity.retry(
|
||||||
retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
|
retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
|
||||||
wait=tenacity.wait_incrementing(
|
wait=tenacity.wait_incrementing(
|
||||||
RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX),
|
CONF.haproxy_amphora.api_db_commit_retry_initial_delay,
|
||||||
stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
|
CONF.haproxy_amphora.api_db_commit_retry_backoff,
|
||||||
|
CONF.haproxy_amphora.api_db_commit_retry_max),
|
||||||
|
stop=tenacity.stop_after_attempt(
|
||||||
|
CONF.haproxy_amphora.api_db_commit_retry_attempts))
|
||||||
def create_l7policy(self, l7policy_id):
|
def create_l7policy(self, l7policy_id):
|
||||||
"""Creates an L7 Policy.
|
"""Creates an L7 Policy.
|
||||||
|
|
||||||
@ -753,8 +762,11 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
|
|||||||
@tenacity.retry(
|
@tenacity.retry(
|
||||||
retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
|
retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
|
||||||
wait=tenacity.wait_incrementing(
|
wait=tenacity.wait_incrementing(
|
||||||
RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX),
|
CONF.haproxy_amphora.api_db_commit_retry_initial_delay,
|
||||||
stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
|
CONF.haproxy_amphora.api_db_commit_retry_backoff,
|
||||||
|
CONF.haproxy_amphora.api_db_commit_retry_max),
|
||||||
|
stop=tenacity.stop_after_attempt(
|
||||||
|
CONF.haproxy_amphora.api_db_commit_retry_attempts))
|
||||||
def create_l7rule(self, l7rule_id):
|
def create_l7rule(self, l7rule_id):
|
||||||
"""Creates an L7 Rule.
|
"""Creates an L7 Rule.
|
||||||
|
|
||||||
@ -841,155 +853,248 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
|
|||||||
log=LOG):
|
log=LOG):
|
||||||
update_l7rule_tf.run()
|
update_l7rule_tf.run()
|
||||||
|
|
||||||
def _perform_amphora_failover(self, amp, priority):
|
|
||||||
"""Internal method to perform failover operations for an amphora.
|
|
||||||
|
|
||||||
:param amp: The amphora to failover
|
|
||||||
:param priority: The create priority
|
|
||||||
:returns: None
|
|
||||||
"""
|
|
||||||
|
|
||||||
stored_params = {constants.FAILED_AMPHORA: amp,
|
|
||||||
constants.LOADBALANCER_ID: amp.load_balancer_id,
|
|
||||||
constants.BUILD_TYPE_PRIORITY: priority, }
|
|
||||||
|
|
||||||
if amp.role in (constants.ROLE_MASTER, constants.ROLE_BACKUP):
|
|
||||||
amp_role = 'master_or_backup'
|
|
||||||
elif amp.role == constants.ROLE_STANDALONE:
|
|
||||||
amp_role = 'standalone'
|
|
||||||
elif amp.role is None:
|
|
||||||
amp_role = 'spare'
|
|
||||||
else:
|
|
||||||
amp_role = 'undefined'
|
|
||||||
|
|
||||||
LOG.info("Perform failover for an amphora: %s",
|
|
||||||
{"id": amp.id,
|
|
||||||
"load_balancer_id": amp.load_balancer_id,
|
|
||||||
"lb_network_ip": amp.lb_network_ip,
|
|
||||||
"compute_id": amp.compute_id,
|
|
||||||
"role": amp_role})
|
|
||||||
|
|
||||||
if amp.status == constants.DELETED:
|
|
||||||
LOG.warning('Amphora %s is marked DELETED in the database but '
|
|
||||||
'was submitted for failover. Deleting it from the '
|
|
||||||
'amphora health table to exclude it from health '
|
|
||||||
'checks and skipping the failover.', amp.id)
|
|
||||||
self._amphora_health_repo.delete(db_apis.get_session(),
|
|
||||||
amphora_id=amp.id)
|
|
||||||
return
|
|
||||||
|
|
||||||
if (CONF.house_keeping.spare_amphora_pool_size == 0) and (
|
|
||||||
CONF.nova.enable_anti_affinity is False):
|
|
||||||
LOG.warning("Failing over amphora with no spares pool may "
|
|
||||||
"cause delays in failover times while a new "
|
|
||||||
"amphora instance boots.")
|
|
||||||
|
|
||||||
# if we run with anti-affinity we need to set the server group
|
|
||||||
# as well
|
|
||||||
lb = self._amphora_repo.get_lb_for_amphora(
|
|
||||||
db_apis.get_session(), amp.id)
|
|
||||||
if CONF.nova.enable_anti_affinity and lb:
|
|
||||||
stored_params[constants.SERVER_GROUP_ID] = lb.server_group_id
|
|
||||||
if lb and lb.flavor_id:
|
|
||||||
stored_params[constants.FLAVOR] = (
|
|
||||||
self._flavor_repo.get_flavor_metadata_dict(
|
|
||||||
db_apis.get_session(), lb.flavor_id))
|
|
||||||
else:
|
|
||||||
stored_params[constants.FLAVOR] = {}
|
|
||||||
if lb and lb.availability_zone:
|
|
||||||
stored_params[constants.AVAILABILITY_ZONE] = (
|
|
||||||
self._az_repo.get_availability_zone_metadata_dict(
|
|
||||||
db_apis.get_session(), lb.availability_zone))
|
|
||||||
else:
|
|
||||||
stored_params[constants.AVAILABILITY_ZONE] = {}
|
|
||||||
|
|
||||||
failover_amphora_tf = self._taskflow_load(
|
|
||||||
self._amphora_flows.get_failover_flow(
|
|
||||||
role=amp.role, load_balancer=lb),
|
|
||||||
store=stored_params)
|
|
||||||
|
|
||||||
with tf_logging.DynamicLoggingListener(failover_amphora_tf, log=LOG):
|
|
||||||
failover_amphora_tf.run()
|
|
||||||
|
|
||||||
LOG.info("Successfully completed the failover for an amphora: %s",
|
|
||||||
{"id": amp.id,
|
|
||||||
"load_balancer_id": amp.load_balancer_id,
|
|
||||||
"lb_network_ip": amp.lb_network_ip,
|
|
||||||
"compute_id": amp.compute_id,
|
|
||||||
"role": amp_role})
|
|
||||||
|
|
||||||
def failover_amphora(self, amphora_id):
|
def failover_amphora(self, amphora_id):
|
||||||
"""Perform failover operations for an amphora.
|
"""Perform failover operations for an amphora.
|
||||||
|
|
||||||
|
Note: This expects the load balancer to already be in
|
||||||
|
provisioning_status=PENDING_UPDATE state.
|
||||||
|
|
||||||
:param amphora_id: ID for amphora to failover
|
:param amphora_id: ID for amphora to failover
|
||||||
:returns: None
|
:returns: None
|
||||||
:raises AmphoraNotFound: The referenced amphora was not found
|
:raises octavia.common.exceptions.NotFound: The referenced amphora was
|
||||||
|
not found
|
||||||
"""
|
"""
|
||||||
|
amphora = None
|
||||||
try:
|
try:
|
||||||
amp = self._amphora_repo.get(db_apis.get_session(),
|
amphora = self._amphora_repo.get(db_apis.get_session(),
|
||||||
id=amphora_id)
|
id=amphora_id)
|
||||||
if not amp:
|
if amphora is None:
|
||||||
LOG.warning("Could not fetch Amphora %s from DB, ignoring "
|
LOG.error('Amphora failover for amphora %s failed because '
|
||||||
"failover request.", amphora_id)
|
'there is no record of this amphora in the '
|
||||||
|
'database. Check that the [house_keeping] '
|
||||||
|
'amphora_expiry_age configuration setting is not '
|
||||||
|
'too short. Skipping failover.', amphora_id)
|
||||||
|
raise exceptions.NotFound(resource=constants.AMPHORA,
|
||||||
|
id=amphora_id)
|
||||||
|
|
||||||
|
if amphora.status == constants.DELETED:
|
||||||
|
LOG.warning('Amphora %s is marked DELETED in the database but '
|
||||||
|
'was submitted for failover. Deleting it from the '
|
||||||
|
'amphora health table to exclude it from health '
|
||||||
|
'checks and skipping the failover.', amphora.id)
|
||||||
|
self._amphora_health_repo.delete(db_apis.get_session(),
|
||||||
|
amphora_id=amphora.id)
|
||||||
return
|
return
|
||||||
self._perform_amphora_failover(
|
|
||||||
amp, constants.LB_CREATE_FAILOVER_PRIORITY)
|
loadbalancer = None
|
||||||
if amp.load_balancer_id:
|
if amphora.load_balancer_id:
|
||||||
LOG.info("Mark ACTIVE in DB for load balancer id: %s",
|
loadbalancer = self._lb_repo.get(db_apis.get_session(),
|
||||||
amp.load_balancer_id)
|
id=amphora.load_balancer_id)
|
||||||
self._lb_repo.update(
|
lb_amp_count = None
|
||||||
db_apis.get_session(), amp.load_balancer_id,
|
if loadbalancer:
|
||||||
provisioning_status=constants.ACTIVE)
|
if loadbalancer.topology == constants.TOPOLOGY_ACTIVE_STANDBY:
|
||||||
|
lb_amp_count = 2
|
||||||
|
elif loadbalancer.topology == constants.TOPOLOGY_SINGLE:
|
||||||
|
lb_amp_count = 1
|
||||||
|
|
||||||
|
amp_failover_flow = self._amphora_flows.get_failover_amphora_flow(
|
||||||
|
amphora, lb_amp_count)
|
||||||
|
|
||||||
|
az_metadata = {}
|
||||||
|
flavor = {}
|
||||||
|
lb_id = None
|
||||||
|
vip = None
|
||||||
|
server_group_id = None
|
||||||
|
if loadbalancer:
|
||||||
|
lb_id = loadbalancer.id
|
||||||
|
if loadbalancer.flavor_id:
|
||||||
|
flavor = self._flavor_repo.get_flavor_metadata_dict(
|
||||||
|
db_apis.get_session(), loadbalancer.flavor_id)
|
||||||
|
flavor[constants.LOADBALANCER_TOPOLOGY] = (
|
||||||
|
loadbalancer.topology)
|
||||||
|
else:
|
||||||
|
flavor = {constants.LOADBALANCER_TOPOLOGY:
|
||||||
|
loadbalancer.topology}
|
||||||
|
if loadbalancer.availability_zone:
|
||||||
|
az_metadata = (
|
||||||
|
self._az_repo.get_availability_zone_metadata_dict(
|
||||||
|
db_apis.get_session(),
|
||||||
|
loadbalancer.availability_zone))
|
||||||
|
vip = loadbalancer.vip
|
||||||
|
server_group_id = loadbalancer.server_group_id
|
||||||
|
|
||||||
|
stored_params = {constants.AVAILABILITY_ZONE: az_metadata,
|
||||||
|
constants.BUILD_TYPE_PRIORITY:
|
||||||
|
constants.LB_CREATE_FAILOVER_PRIORITY,
|
||||||
|
constants.FLAVOR: flavor,
|
||||||
|
constants.LOADBALANCER: loadbalancer,
|
||||||
|
constants.SERVER_GROUP_ID: server_group_id,
|
||||||
|
constants.LOADBALANCER_ID: lb_id,
|
||||||
|
constants.VIP: vip}
|
||||||
|
|
||||||
|
failover_amphora_tf = self._taskflow_load(amp_failover_flow,
|
||||||
|
store=stored_params)
|
||||||
|
|
||||||
|
with tf_logging.DynamicLoggingListener(failover_amphora_tf,
|
||||||
|
log=LOG):
|
||||||
|
failover_amphora_tf.run()
|
||||||
|
|
||||||
|
LOG.info("Successfully completed the failover for an amphora: %s",
|
||||||
|
{"id": amphora_id,
|
||||||
|
"load_balancer_id": lb_id,
|
||||||
|
"lb_network_ip": amphora.lb_network_ip,
|
||||||
|
"compute_id": amphora.compute_id,
|
||||||
|
"role": amphora.role})
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
try:
|
with excutils.save_and_reraise_exception(reraise=False):
|
||||||
self._lb_repo.update(
|
LOG.exception("Amphora %s failover exception: %s",
|
||||||
db_apis.get_session(), amp.load_balancer_id,
|
amphora_id, str(e))
|
||||||
provisioning_status=constants.ERROR)
|
self._amphora_repo.update(db_apis.get_session(),
|
||||||
except Exception:
|
amphora_id, status=constants.ERROR)
|
||||||
LOG.error("Unable to revert LB status to ERROR.")
|
if amphora and amphora.load_balancer_id:
|
||||||
with excutils.save_and_reraise_exception():
|
self._lb_repo.update(
|
||||||
LOG.error("Amphora %(id)s failover exception: %(exc)s",
|
db_apis.get_session(), amphora.load_balancer_id,
|
||||||
{'id': amphora_id, 'exc': e})
|
provisioning_status=constants.ERROR)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _get_amphorae_for_failover(load_balancer):
|
||||||
|
"""Returns an ordered list of amphora to failover.
|
||||||
|
|
||||||
|
:param load_balancer: The load balancer being failed over.
|
||||||
|
:returns: An ordered list of amphora to failover,
|
||||||
|
first amp to failover is last in the list
|
||||||
|
:raises octavia.common.exceptions.InvalidTopology: LB has an unknown
|
||||||
|
topology.
|
||||||
|
"""
|
||||||
|
if load_balancer.topology == constants.TOPOLOGY_SINGLE:
|
||||||
|
# In SINGLE topology, amp failover order does not matter
|
||||||
|
return [a for a in load_balancer.amphorae
|
||||||
|
if a.status != constants.DELETED]
|
||||||
|
|
||||||
|
if load_balancer.topology == constants.TOPOLOGY_ACTIVE_STANDBY:
|
||||||
|
# In Active/Standby we should preference the standby amp
|
||||||
|
# for failover first in case the Active is still able to pass
|
||||||
|
# traffic.
|
||||||
|
# Note: The active amp can switch at any time and in less than a
|
||||||
|
# second, so this is "best effort".
|
||||||
|
amphora_driver = utils.get_amphora_driver()
|
||||||
|
timeout_dict = {
|
||||||
|
constants.CONN_MAX_RETRIES:
|
||||||
|
CONF.haproxy_amphora.failover_connection_max_retries,
|
||||||
|
constants.CONN_RETRY_INTERVAL:
|
||||||
|
CONF.haproxy_amphora.failover_connection_retry_interval}
|
||||||
|
amps = []
|
||||||
|
selected_amp = None
|
||||||
|
for amp in load_balancer.amphorae:
|
||||||
|
if amp.status == constants.DELETED:
|
||||||
|
continue
|
||||||
|
if selected_amp is None:
|
||||||
|
try:
|
||||||
|
if amphora_driver.get_interface_from_ip(
|
||||||
|
amp, load_balancer.vip.ip_address,
|
||||||
|
timeout_dict):
|
||||||
|
# This is a potential ACTIVE, add it to the list
|
||||||
|
amps.append(amp)
|
||||||
|
else:
|
||||||
|
# This one doesn't have the VIP IP, so start
|
||||||
|
# failovers here.
|
||||||
|
selected_amp = amp
|
||||||
|
LOG.debug("Selected amphora %s as the initial "
|
||||||
|
"failover amphora.", amp.id)
|
||||||
|
except Exception:
|
||||||
|
# This amphora is broken, so start failovers here.
|
||||||
|
selected_amp = amp
|
||||||
|
else:
|
||||||
|
# We have already found a STANDBY, so add the rest to the
|
||||||
|
# list without querying them.
|
||||||
|
amps.append(amp)
|
||||||
|
# Put the selected amphora at the end of the list so it is
|
||||||
|
# first to failover.
|
||||||
|
if selected_amp:
|
||||||
|
amps.append(selected_amp)
|
||||||
|
return amps
|
||||||
|
|
||||||
|
LOG.error('Unknown load balancer topology found: %s, aborting '
|
||||||
|
'failover.', load_balancer.topology)
|
||||||
|
raise exceptions.InvalidTopology(topology=load_balancer.topology)
|
||||||
|
|
||||||
def failover_loadbalancer(self, load_balancer_id):
|
def failover_loadbalancer(self, load_balancer_id):
|
||||||
"""Perform failover operations for a load balancer.
|
"""Perform failover operations for a load balancer.
|
||||||
|
|
||||||
|
Note: This expects the load balancer to already be in
|
||||||
|
provisioning_status=PENDING_UPDATE state.
|
||||||
|
|
||||||
:param load_balancer_id: ID for load balancer to failover
|
:param load_balancer_id: ID for load balancer to failover
|
||||||
:returns: None
|
:returns: None
|
||||||
:raises LBNotFound: The referenced load balancer was not found
|
:raises octavia.commom.exceptions.NotFound: The load balancer was not
|
||||||
|
found.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# Note: This expects that the load balancer is already in
|
|
||||||
# provisioning_status=PENDING_UPDATE state
|
|
||||||
try:
|
try:
|
||||||
lb = self._lb_repo.get(db_apis.get_session(),
|
lb = self._lb_repo.get(db_apis.get_session(),
|
||||||
id=load_balancer_id)
|
id=load_balancer_id)
|
||||||
|
if lb is None:
|
||||||
|
raise exceptions.NotFound(resource=constants.LOADBALANCER,
|
||||||
|
id=load_balancer_id)
|
||||||
|
|
||||||
# Exclude amphora already deleted
|
# Get the ordered list of amphorae to failover for this LB.
|
||||||
amps = [a for a in lb.amphorae if a.status != constants.DELETED]
|
amps = self._get_amphorae_for_failover(lb)
|
||||||
for amp in amps:
|
|
||||||
# failover amphora in backup role
|
|
||||||
# Note: this amp may not currently be the backup
|
|
||||||
# TODO(johnsom) Change this to query the amp state
|
|
||||||
# once the amp API supports it.
|
|
||||||
if amp.role == constants.ROLE_BACKUP:
|
|
||||||
self._perform_amphora_failover(
|
|
||||||
amp, constants.LB_CREATE_ADMIN_FAILOVER_PRIORITY)
|
|
||||||
|
|
||||||
for amp in amps:
|
if lb.topology == constants.TOPOLOGY_SINGLE:
|
||||||
# failover everyhting else
|
if len(amps) != 1:
|
||||||
if amp.role != constants.ROLE_BACKUP:
|
LOG.warning('%d amphorae found on load balancer %s where '
|
||||||
self._perform_amphora_failover(
|
'one should exist. Repairing.', len(amps),
|
||||||
amp, constants.LB_CREATE_ADMIN_FAILOVER_PRIORITY)
|
load_balancer_id)
|
||||||
|
elif lb.topology == constants.TOPOLOGY_ACTIVE_STANDBY:
|
||||||
|
|
||||||
self._lb_repo.update(
|
if len(amps) != 2:
|
||||||
db_apis.get_session(), load_balancer_id,
|
LOG.warning('%d amphorae found on load balancer %s where '
|
||||||
provisioning_status=constants.ACTIVE)
|
'two should exist. Repairing.', len(amps),
|
||||||
|
load_balancer_id)
|
||||||
|
else:
|
||||||
|
LOG.error('Unknown load balancer topology found: %s, aborting '
|
||||||
|
'failover!', lb.topology)
|
||||||
|
raise exceptions.InvalidTopology(topology=lb.topology)
|
||||||
|
|
||||||
|
# Build our failover flow.
|
||||||
|
lb_failover_flow = self._lb_flows.get_failover_LB_flow(amps, lb)
|
||||||
|
|
||||||
|
# We must provide a topology in the flavor definition
|
||||||
|
# here for the amphora to be created with the correct
|
||||||
|
# configuration.
|
||||||
|
if lb.flavor_id:
|
||||||
|
flavor = self._flavor_repo.get_flavor_metadata_dict(
|
||||||
|
db_apis.get_session(), lb.flavor_id)
|
||||||
|
flavor[constants.LOADBALANCER_TOPOLOGY] = lb.topology
|
||||||
|
else:
|
||||||
|
flavor = {constants.LOADBALANCER_TOPOLOGY: lb.topology}
|
||||||
|
|
||||||
|
stored_params = {constants.LOADBALANCER: lb,
|
||||||
|
constants.BUILD_TYPE_PRIORITY:
|
||||||
|
constants.LB_CREATE_FAILOVER_PRIORITY,
|
||||||
|
constants.SERVER_GROUP_ID: lb.server_group_id,
|
||||||
|
constants.LOADBALANCER_ID: lb.id,
|
||||||
|
constants.FLAVOR: flavor}
|
||||||
|
|
||||||
|
if lb.availability_zone:
|
||||||
|
stored_params[constants.AVAILABILITY_ZONE] = (
|
||||||
|
self._az_repo.get_availability_zone_metadata_dict(
|
||||||
|
db_apis.get_session(), lb.availability_zone))
|
||||||
|
else:
|
||||||
|
stored_params[constants.AVAILABILITY_ZONE] = {}
|
||||||
|
|
||||||
|
failover_lb_tf = self._taskflow_load(lb_failover_flow,
|
||||||
|
store=stored_params)
|
||||||
|
|
||||||
|
with tf_logging.DynamicLoggingListener(failover_lb_tf, log=LOG):
|
||||||
|
failover_lb_tf.run()
|
||||||
|
LOG.info('Failover of load balancer %s completed successfully.',
|
||||||
|
lb.id)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception(reraise=False):
|
||||||
LOG.error("LB %(lbid)s failover exception: %(exc)s",
|
LOG.exception("LB %(lbid)s failover exception: %(exc)s",
|
||||||
{'lbid': load_balancer_id, 'exc': e})
|
{'lbid': load_balancer_id, 'exc': e})
|
||||||
self._lb_repo.update(
|
self._lb_repo.update(
|
||||||
db_apis.get_session(), load_balancer_id,
|
db_apis.get_session(), load_balancer_id,
|
||||||
provisioning_status=constants.ERROR)
|
provisioning_status=constants.ERROR)
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
# Copyright 2015 Hewlett-Packard Development Company, L.P.
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
|
||||||
|
# Copyright 2020 Red Hat, Inc. All rights reserved.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
# not use this file except in compliance with the License. You may obtain
|
# not use this file except in compliance with the License. You may obtain
|
||||||
@ -14,26 +15,26 @@
|
|||||||
#
|
#
|
||||||
|
|
||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
|
from oslo_log import log as logging
|
||||||
from taskflow.patterns import graph_flow
|
from taskflow.patterns import graph_flow
|
||||||
from taskflow.patterns import linear_flow
|
from taskflow.patterns import linear_flow
|
||||||
from taskflow.patterns import unordered_flow
|
from taskflow.patterns import unordered_flow
|
||||||
|
|
||||||
from octavia.common import constants
|
from octavia.common import constants
|
||||||
|
from octavia.common import utils
|
||||||
from octavia.controller.worker.v1.tasks import amphora_driver_tasks
|
from octavia.controller.worker.v1.tasks import amphora_driver_tasks
|
||||||
from octavia.controller.worker.v1.tasks import cert_task
|
from octavia.controller.worker.v1.tasks import cert_task
|
||||||
from octavia.controller.worker.v1.tasks import compute_tasks
|
from octavia.controller.worker.v1.tasks import compute_tasks
|
||||||
from octavia.controller.worker.v1.tasks import database_tasks
|
from octavia.controller.worker.v1.tasks import database_tasks
|
||||||
from octavia.controller.worker.v1.tasks import lifecycle_tasks
|
from octavia.controller.worker.v1.tasks import lifecycle_tasks
|
||||||
from octavia.controller.worker.v1.tasks import network_tasks
|
from octavia.controller.worker.v1.tasks import network_tasks
|
||||||
|
from octavia.controller.worker.v1.tasks import retry_tasks
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class AmphoraFlows(object):
|
class AmphoraFlows(object):
|
||||||
def __init__(self):
|
|
||||||
# for some reason only this has the values from the config file
|
|
||||||
self.REST_AMPHORA_DRIVER = (CONF.controller_worker.amphora_driver ==
|
|
||||||
'amphora_haproxy_rest_driver')
|
|
||||||
|
|
||||||
def get_create_amphora_flow(self):
|
def get_create_amphora_flow(self):
|
||||||
"""Creates a flow to create an amphora.
|
"""Creates a flow to create an amphora.
|
||||||
@ -45,24 +46,16 @@ class AmphoraFlows(object):
|
|||||||
provides=constants.AMPHORA_ID))
|
provides=constants.AMPHORA_ID))
|
||||||
create_amphora_flow.add(lifecycle_tasks.AmphoraIDToErrorOnRevertTask(
|
create_amphora_flow.add(lifecycle_tasks.AmphoraIDToErrorOnRevertTask(
|
||||||
requires=constants.AMPHORA_ID))
|
requires=constants.AMPHORA_ID))
|
||||||
if self.REST_AMPHORA_DRIVER:
|
create_amphora_flow.add(cert_task.GenerateServerPEMTask(
|
||||||
create_amphora_flow.add(cert_task.GenerateServerPEMTask(
|
provides=constants.SERVER_PEM))
|
||||||
provides=constants.SERVER_PEM))
|
create_amphora_flow.add(
|
||||||
|
database_tasks.UpdateAmphoraDBCertExpiration(
|
||||||
create_amphora_flow.add(
|
requires=(constants.AMPHORA_ID, constants.SERVER_PEM)))
|
||||||
database_tasks.UpdateAmphoraDBCertExpiration(
|
create_amphora_flow.add(compute_tasks.CertComputeCreate(
|
||||||
requires=(constants.AMPHORA_ID, constants.SERVER_PEM)))
|
requires=(constants.AMPHORA_ID, constants.SERVER_PEM,
|
||||||
|
constants.SERVER_GROUP_ID, constants.BUILD_TYPE_PRIORITY,
|
||||||
create_amphora_flow.add(compute_tasks.CertComputeCreate(
|
constants.FLAVOR, constants.AVAILABILITY_ZONE),
|
||||||
requires=(constants.AMPHORA_ID, constants.SERVER_PEM,
|
provides=constants.COMPUTE_ID))
|
||||||
constants.BUILD_TYPE_PRIORITY, constants.FLAVOR,
|
|
||||||
constants.AVAILABILITY_ZONE),
|
|
||||||
provides=constants.COMPUTE_ID))
|
|
||||||
else:
|
|
||||||
create_amphora_flow.add(compute_tasks.ComputeCreate(
|
|
||||||
requires=(constants.AMPHORA_ID, constants.BUILD_TYPE_PRIORITY,
|
|
||||||
constants.FLAVOR, constants.AVAILABILITY_ZONE),
|
|
||||||
provides=constants.COMPUTE_ID))
|
|
||||||
create_amphora_flow.add(database_tasks.MarkAmphoraBootingInDB(
|
create_amphora_flow.add(database_tasks.MarkAmphoraBootingInDB(
|
||||||
requires=(constants.AMPHORA_ID, constants.COMPUTE_ID)))
|
requires=(constants.AMPHORA_ID, constants.COMPUTE_ID)))
|
||||||
create_amphora_flow.add(compute_tasks.ComputeActiveWait(
|
create_amphora_flow.add(compute_tasks.ComputeActiveWait(
|
||||||
@ -115,7 +108,7 @@ class AmphoraFlows(object):
|
|||||||
|
|
||||||
return post_map_amp_to_lb
|
return post_map_amp_to_lb
|
||||||
|
|
||||||
def _get_create_amp_for_lb_subflow(self, prefix, role):
|
def _get_create_amp_for_lb_subflow(self, prefix, role, is_spare=False):
|
||||||
"""Create a new amphora for lb."""
|
"""Create a new amphora for lb."""
|
||||||
|
|
||||||
sf_name = prefix + '-' + constants.CREATE_AMP_FOR_LB_SUBFLOW
|
sf_name = prefix + '-' + constants.CREATE_AMP_FOR_LB_SUBFLOW
|
||||||
@ -125,66 +118,22 @@ class AmphoraFlows(object):
|
|||||||
requires=constants.LOADBALANCER_ID,
|
requires=constants.LOADBALANCER_ID,
|
||||||
provides=constants.AMPHORA_ID))
|
provides=constants.AMPHORA_ID))
|
||||||
|
|
||||||
require_server_group_id_condition = (
|
create_amp_for_lb_subflow.add(cert_task.GenerateServerPEMTask(
|
||||||
role in (constants.ROLE_BACKUP, constants.ROLE_MASTER) and
|
name=sf_name + '-' + constants.GENERATE_SERVER_PEM,
|
||||||
CONF.nova.enable_anti_affinity)
|
provides=constants.SERVER_PEM))
|
||||||
|
|
||||||
if self.REST_AMPHORA_DRIVER:
|
create_amp_for_lb_subflow.add(
|
||||||
create_amp_for_lb_subflow.add(cert_task.GenerateServerPEMTask(
|
database_tasks.UpdateAmphoraDBCertExpiration(
|
||||||
name=sf_name + '-' + constants.GENERATE_SERVER_PEM,
|
name=sf_name + '-' + constants.UPDATE_CERT_EXPIRATION,
|
||||||
provides=constants.SERVER_PEM))
|
requires=(constants.AMPHORA_ID, constants.SERVER_PEM)))
|
||||||
|
|
||||||
create_amp_for_lb_subflow.add(
|
|
||||||
database_tasks.UpdateAmphoraDBCertExpiration(
|
|
||||||
name=sf_name + '-' + constants.UPDATE_CERT_EXPIRATION,
|
|
||||||
requires=(constants.AMPHORA_ID, constants.SERVER_PEM)))
|
|
||||||
|
|
||||||
if require_server_group_id_condition:
|
|
||||||
create_amp_for_lb_subflow.add(compute_tasks.CertComputeCreate(
|
|
||||||
name=sf_name + '-' + constants.CERT_COMPUTE_CREATE,
|
|
||||||
requires=(
|
|
||||||
constants.AMPHORA_ID,
|
|
||||||
constants.SERVER_PEM,
|
|
||||||
constants.BUILD_TYPE_PRIORITY,
|
|
||||||
constants.SERVER_GROUP_ID,
|
|
||||||
constants.FLAVOR,
|
|
||||||
constants.AVAILABILITY_ZONE,
|
|
||||||
),
|
|
||||||
provides=constants.COMPUTE_ID))
|
|
||||||
else:
|
|
||||||
create_amp_for_lb_subflow.add(compute_tasks.CertComputeCreate(
|
|
||||||
name=sf_name + '-' + constants.CERT_COMPUTE_CREATE,
|
|
||||||
requires=(
|
|
||||||
constants.AMPHORA_ID,
|
|
||||||
constants.SERVER_PEM,
|
|
||||||
constants.BUILD_TYPE_PRIORITY,
|
|
||||||
constants.FLAVOR,
|
|
||||||
constants.AVAILABILITY_ZONE,
|
|
||||||
),
|
|
||||||
provides=constants.COMPUTE_ID))
|
|
||||||
else:
|
|
||||||
if require_server_group_id_condition:
|
|
||||||
create_amp_for_lb_subflow.add(compute_tasks.ComputeCreate(
|
|
||||||
name=sf_name + '-' + constants.COMPUTE_CREATE,
|
|
||||||
requires=(
|
|
||||||
constants.AMPHORA_ID,
|
|
||||||
constants.BUILD_TYPE_PRIORITY,
|
|
||||||
constants.SERVER_GROUP_ID,
|
|
||||||
constants.FLAVOR,
|
|
||||||
constants.AVAILABILITY_ZONE,
|
|
||||||
),
|
|
||||||
provides=constants.COMPUTE_ID))
|
|
||||||
else:
|
|
||||||
create_amp_for_lb_subflow.add(compute_tasks.ComputeCreate(
|
|
||||||
name=sf_name + '-' + constants.COMPUTE_CREATE,
|
|
||||||
requires=(
|
|
||||||
constants.AMPHORA_ID,
|
|
||||||
constants.BUILD_TYPE_PRIORITY,
|
|
||||||
constants.FLAVOR,
|
|
||||||
constants.AVAILABILITY_ZONE,
|
|
||||||
),
|
|
||||||
provides=constants.COMPUTE_ID))
|
|
||||||
|
|
||||||
|
create_amp_for_lb_subflow.add(compute_tasks.CertComputeCreate(
|
||||||
|
name=sf_name + '-' + constants.CERT_COMPUTE_CREATE,
|
||||||
|
requires=(constants.AMPHORA_ID, constants.SERVER_PEM,
|
||||||
|
constants.BUILD_TYPE_PRIORITY,
|
||||||
|
constants.SERVER_GROUP_ID,
|
||||||
|
constants.FLAVOR, constants.AVAILABILITY_ZONE),
|
||||||
|
provides=constants.COMPUTE_ID))
|
||||||
create_amp_for_lb_subflow.add(database_tasks.UpdateAmphoraComputeId(
|
create_amp_for_lb_subflow.add(database_tasks.UpdateAmphoraComputeId(
|
||||||
name=sf_name + '-' + constants.UPDATE_AMPHORA_COMPUTEID,
|
name=sf_name + '-' + constants.UPDATE_AMPHORA_COMPUTEID,
|
||||||
requires=(constants.AMPHORA_ID, constants.COMPUTE_ID)))
|
requires=(constants.AMPHORA_ID, constants.COMPUTE_ID)))
|
||||||
@ -207,10 +156,16 @@ class AmphoraFlows(object):
|
|||||||
create_amp_for_lb_subflow.add(amphora_driver_tasks.AmphoraFinalize(
|
create_amp_for_lb_subflow.add(amphora_driver_tasks.AmphoraFinalize(
|
||||||
name=sf_name + '-' + constants.AMPHORA_FINALIZE,
|
name=sf_name + '-' + constants.AMPHORA_FINALIZE,
|
||||||
requires=constants.AMPHORA))
|
requires=constants.AMPHORA))
|
||||||
create_amp_for_lb_subflow.add(
|
if is_spare:
|
||||||
database_tasks.MarkAmphoraAllocatedInDB(
|
create_amp_for_lb_subflow.add(
|
||||||
name=sf_name + '-' + constants.MARK_AMPHORA_ALLOCATED_INDB,
|
database_tasks.MarkAmphoraReadyInDB(
|
||||||
requires=(constants.AMPHORA, constants.LOADBALANCER_ID)))
|
name=sf_name + '-' + constants.MARK_AMPHORA_READY_INDB,
|
||||||
|
requires=constants.AMPHORA))
|
||||||
|
else:
|
||||||
|
create_amp_for_lb_subflow.add(
|
||||||
|
database_tasks.MarkAmphoraAllocatedInDB(
|
||||||
|
name=sf_name + '-' + constants.MARK_AMPHORA_ALLOCATED_INDB,
|
||||||
|
requires=(constants.AMPHORA, constants.LOADBALANCER_ID)))
|
||||||
create_amp_for_lb_subflow.add(database_tasks.ReloadAmphora(
|
create_amp_for_lb_subflow.add(database_tasks.ReloadAmphora(
|
||||||
name=sf_name + '-' + constants.RELOAD_AMPHORA,
|
name=sf_name + '-' + constants.RELOAD_AMPHORA,
|
||||||
requires=constants.AMPHORA_ID,
|
requires=constants.AMPHORA_ID,
|
||||||
@ -249,7 +204,7 @@ class AmphoraFlows(object):
|
|||||||
return list(history.values())[0] is None
|
return list(history.values())[0] is None
|
||||||
|
|
||||||
def get_amphora_for_lb_subflow(
|
def get_amphora_for_lb_subflow(
|
||||||
self, prefix, role=constants.ROLE_STANDALONE):
|
self, prefix, role=constants.ROLE_STANDALONE, is_spare=False):
|
||||||
"""Tries to allocate a spare amphora to a loadbalancer if none
|
"""Tries to allocate a spare amphora to a loadbalancer if none
|
||||||
|
|
||||||
exists, create a new amphora.
|
exists, create a new amphora.
|
||||||
@ -257,6 +212,14 @@ class AmphoraFlows(object):
|
|||||||
|
|
||||||
sf_name = prefix + '-' + constants.GET_AMPHORA_FOR_LB_SUBFLOW
|
sf_name = prefix + '-' + constants.GET_AMPHORA_FOR_LB_SUBFLOW
|
||||||
|
|
||||||
|
# Don't replace a spare with another spare, just build a fresh one.
|
||||||
|
if is_spare:
|
||||||
|
get_spare_amp_flow = linear_flow.Flow(sf_name)
|
||||||
|
|
||||||
|
get_spare_amp_flow.add(self._get_create_amp_for_lb_subflow(
|
||||||
|
prefix, role, is_spare=is_spare))
|
||||||
|
return get_spare_amp_flow
|
||||||
|
|
||||||
# We need a graph flow here for a conditional flow
|
# We need a graph flow here for a conditional flow
|
||||||
amp_for_lb_flow = graph_flow.Flow(sf_name)
|
amp_for_lb_flow = graph_flow.Flow(sf_name)
|
||||||
|
|
||||||
@ -285,287 +248,145 @@ class AmphoraFlows(object):
|
|||||||
decider=self._create_new_amp_for_lb_decider,
|
decider=self._create_new_amp_for_lb_decider,
|
||||||
decider_depth='flow')
|
decider_depth='flow')
|
||||||
|
|
||||||
# Plug the network
|
|
||||||
# todo(xgerman): Rework failover flow
|
|
||||||
if prefix != constants.FAILOVER_AMPHORA_FLOW:
|
|
||||||
sf_name = prefix + '-' + constants.AMP_PLUG_NET_SUBFLOW
|
|
||||||
amp_for_lb_net_flow = linear_flow.Flow(sf_name)
|
|
||||||
amp_for_lb_net_flow.add(amp_for_lb_flow)
|
|
||||||
amp_for_lb_net_flow.add(*self._get_amp_net_subflow(sf_name))
|
|
||||||
return amp_for_lb_net_flow
|
|
||||||
|
|
||||||
return amp_for_lb_flow
|
return amp_for_lb_flow
|
||||||
|
|
||||||
def _get_amp_net_subflow(self, sf_name):
|
def get_delete_amphora_flow(
|
||||||
flows = []
|
self, amphora,
|
||||||
flows.append(network_tasks.PlugVIPAmpphora(
|
retry_attempts=CONF.controller_worker.amphora_delete_retries,
|
||||||
name=sf_name + '-' + constants.PLUG_VIP_AMPHORA,
|
retry_interval=(
|
||||||
requires=(constants.LOADBALANCER, constants.AMPHORA,
|
CONF.controller_worker.amphora_delete_retry_interval)):
|
||||||
constants.SUBNET),
|
"""Creates a subflow to delete an amphora and it's port.
|
||||||
provides=constants.AMP_DATA))
|
|
||||||
|
|
||||||
flows.append(network_tasks.ApplyQosAmphora(
|
This flow is idempotent and safe to retry.
|
||||||
name=sf_name + '-' + constants.APPLY_QOS_AMP,
|
|
||||||
requires=(constants.LOADBALANCER, constants.AMP_DATA,
|
|
||||||
constants.UPDATE_DICT)))
|
|
||||||
flows.append(database_tasks.UpdateAmphoraVIPData(
|
|
||||||
name=sf_name + '-' + constants.UPDATE_AMPHORA_VIP_DATA,
|
|
||||||
requires=constants.AMP_DATA))
|
|
||||||
flows.append(database_tasks.ReloadAmphora(
|
|
||||||
name=sf_name + '-' + constants.RELOAD_AMP_AFTER_PLUG_VIP,
|
|
||||||
requires=constants.AMPHORA_ID,
|
|
||||||
provides=constants.AMPHORA))
|
|
||||||
flows.append(database_tasks.ReloadLoadBalancer(
|
|
||||||
name=sf_name + '-' + constants.RELOAD_LB_AFTER_PLUG_VIP,
|
|
||||||
requires=constants.LOADBALANCER_ID,
|
|
||||||
provides=constants.LOADBALANCER))
|
|
||||||
flows.append(network_tasks.GetAmphoraNetworkConfigs(
|
|
||||||
name=sf_name + '-' + constants.GET_AMP_NETWORK_CONFIG,
|
|
||||||
requires=(constants.LOADBALANCER, constants.AMPHORA),
|
|
||||||
provides=constants.AMPHORA_NETWORK_CONFIG))
|
|
||||||
flows.append(amphora_driver_tasks.AmphoraPostVIPPlug(
|
|
||||||
name=sf_name + '-' + constants.AMP_POST_VIP_PLUG,
|
|
||||||
rebind={constants.AMPHORAE_NETWORK_CONFIG:
|
|
||||||
constants.AMPHORA_NETWORK_CONFIG},
|
|
||||||
requires=(constants.LOADBALANCER,
|
|
||||||
constants.AMPHORAE_NETWORK_CONFIG)))
|
|
||||||
return flows
|
|
||||||
|
|
||||||
def get_delete_amphora_flow(self):
|
:param amphora: An amphora object.
|
||||||
"""Creates a flow to delete an amphora.
|
:param retry_attempts: The number of times the flow is retried.
|
||||||
|
:param retry_interval: The time to wait, in seconds, between retries.
|
||||||
This should be configurable in the config file
|
:returns: The subflow for deleting the amphora.
|
||||||
:returns: The flow for deleting the amphora
|
:raises AmphoraNotFound: The referenced Amphora was not found.
|
||||||
:raises AmphoraNotFound: The referenced Amphora was not found
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
delete_amphora_flow = linear_flow.Flow(constants.DELETE_AMPHORA_FLOW)
|
delete_amphora_flow = linear_flow.Flow(
|
||||||
|
name=constants.DELETE_AMPHORA_FLOW + '-' + amphora.id,
|
||||||
|
retry=retry_tasks.SleepingRetryTimesController(
|
||||||
|
name='retry-' + constants.DELETE_AMPHORA_FLOW + '-' +
|
||||||
|
amphora.id,
|
||||||
|
attempts=retry_attempts, interval=retry_interval))
|
||||||
delete_amphora_flow.add(lifecycle_tasks.AmphoraToErrorOnRevertTask(
|
delete_amphora_flow.add(lifecycle_tasks.AmphoraToErrorOnRevertTask(
|
||||||
requires=constants.AMPHORA))
|
name=constants.AMPHORA_TO_ERROR_ON_REVERT + '-' + amphora.id,
|
||||||
delete_amphora_flow.add(database_tasks.
|
inject={constants.AMPHORA: amphora}))
|
||||||
MarkAmphoraPendingDeleteInDB(
|
delete_amphora_flow.add(
|
||||||
requires=constants.AMPHORA))
|
database_tasks.MarkAmphoraPendingDeleteInDB(
|
||||||
delete_amphora_flow.add(database_tasks.
|
name=constants.MARK_AMPHORA_PENDING_DELETE + '-' + amphora.id,
|
||||||
MarkAmphoraHealthBusy(
|
inject={constants.AMPHORA: amphora}))
|
||||||
requires=constants.AMPHORA))
|
delete_amphora_flow.add(database_tasks.MarkAmphoraHealthBusy(
|
||||||
|
name=constants.MARK_AMPHORA_HEALTH_BUSY + '-' + amphora.id,
|
||||||
|
inject={constants.AMPHORA: amphora}))
|
||||||
delete_amphora_flow.add(compute_tasks.ComputeDelete(
|
delete_amphora_flow.add(compute_tasks.ComputeDelete(
|
||||||
requires=constants.AMPHORA))
|
name=constants.DELETE_AMPHORA + '-' + amphora.id,
|
||||||
delete_amphora_flow.add(database_tasks.
|
inject={constants.AMPHORA: amphora,
|
||||||
DisableAmphoraHealthMonitoring(
|
constants.PASSIVE_FAILURE: True}))
|
||||||
requires=constants.AMPHORA))
|
delete_amphora_flow.add(database_tasks.DisableAmphoraHealthMonitoring(
|
||||||
delete_amphora_flow.add(database_tasks.
|
name=constants.DISABLE_AMP_HEALTH_MONITORING + '-' + amphora.id,
|
||||||
MarkAmphoraDeletedInDB(
|
inject={constants.AMPHORA: amphora}))
|
||||||
requires=constants.AMPHORA))
|
delete_amphora_flow.add(database_tasks.MarkAmphoraDeletedInDB(
|
||||||
|
name=constants.MARK_AMPHORA_DELETED + '-' + amphora.id,
|
||||||
|
inject={constants.AMPHORA: amphora}))
|
||||||
|
if amphora.vrrp_port_id:
|
||||||
|
delete_amphora_flow.add(network_tasks.DeletePort(
|
||||||
|
name=(constants.DELETE_PORT + '-' + str(amphora.id) + '-' +
|
||||||
|
str(amphora.vrrp_port_id)),
|
||||||
|
inject={constants.PORT_ID: amphora.vrrp_port_id,
|
||||||
|
constants.PASSIVE_FAILURE: True}))
|
||||||
|
# TODO(johnsom) What about cleaning up any member ports?
|
||||||
|
# maybe we should get the list of attached ports prior to delete
|
||||||
|
# and call delete on them here. Fix this as part of
|
||||||
|
# https://storyboard.openstack.org/#!/story/2007077
|
||||||
|
|
||||||
return delete_amphora_flow
|
return delete_amphora_flow
|
||||||
|
|
||||||
def get_failover_flow(self, role=constants.ROLE_STANDALONE,
|
def get_vrrp_subflow(self, prefix, timeout_dict=None,
|
||||||
load_balancer=None):
|
create_vrrp_group=True):
|
||||||
"""Creates a flow to failover a stale amphora
|
|
||||||
|
|
||||||
:returns: The flow for amphora failover
|
|
||||||
"""
|
|
||||||
|
|
||||||
failover_amphora_flow = linear_flow.Flow(
|
|
||||||
constants.FAILOVER_AMPHORA_FLOW)
|
|
||||||
|
|
||||||
failover_amphora_flow.add(lifecycle_tasks.AmphoraToErrorOnRevertTask(
|
|
||||||
rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
|
|
||||||
requires=constants.AMPHORA))
|
|
||||||
|
|
||||||
failover_amphora_flow.add(network_tasks.FailoverPreparationForAmphora(
|
|
||||||
rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
|
|
||||||
requires=constants.AMPHORA))
|
|
||||||
|
|
||||||
# Note: It seems intuitive to boot an amphora prior to deleting
|
|
||||||
# the old amphora, however this is a complicated issue.
|
|
||||||
# If the target host (due to anit-affinity) is resource
|
|
||||||
# constrained, this will fail where a post-delete will
|
|
||||||
# succeed. Since this is async with the API it would result
|
|
||||||
# in the LB ending in ERROR though the amps are still alive.
|
|
||||||
# Consider in the future making this a complicated
|
|
||||||
# try-on-failure-retry flow, or move upgrade failovers to be
|
|
||||||
# synchronous with the API. For now spares pool and act/stdby
|
|
||||||
# will mitigate most of this delay.
|
|
||||||
|
|
||||||
# Delete the old amphora
|
|
||||||
failover_amphora_flow.add(
|
|
||||||
database_tasks.MarkAmphoraPendingDeleteInDB(
|
|
||||||
rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
|
|
||||||
requires=constants.AMPHORA))
|
|
||||||
failover_amphora_flow.add(
|
|
||||||
database_tasks.MarkAmphoraHealthBusy(
|
|
||||||
rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
|
|
||||||
requires=constants.AMPHORA))
|
|
||||||
failover_amphora_flow.add(compute_tasks.ComputeDelete(
|
|
||||||
rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
|
|
||||||
requires=constants.AMPHORA))
|
|
||||||
failover_amphora_flow.add(network_tasks.WaitForPortDetach(
|
|
||||||
rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
|
|
||||||
requires=constants.AMPHORA))
|
|
||||||
failover_amphora_flow.add(database_tasks.MarkAmphoraDeletedInDB(
|
|
||||||
rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
|
|
||||||
requires=constants.AMPHORA))
|
|
||||||
|
|
||||||
# If this is an unallocated amp (spares pool), we're done
|
|
||||||
if not load_balancer:
|
|
||||||
failover_amphora_flow.add(
|
|
||||||
database_tasks.DisableAmphoraHealthMonitoring(
|
|
||||||
rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
|
|
||||||
requires=constants.AMPHORA))
|
|
||||||
return failover_amphora_flow
|
|
||||||
|
|
||||||
# Save failed amphora details for later
|
|
||||||
failover_amphora_flow.add(
|
|
||||||
database_tasks.GetAmphoraDetails(
|
|
||||||
rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
|
|
||||||
requires=constants.AMPHORA,
|
|
||||||
provides=constants.AMP_DATA))
|
|
||||||
|
|
||||||
# Get a new amphora
|
|
||||||
# Note: Role doesn't matter here. We will update it later.
|
|
||||||
get_amp_subflow = self.get_amphora_for_lb_subflow(
|
|
||||||
prefix=constants.FAILOVER_AMPHORA_FLOW)
|
|
||||||
failover_amphora_flow.add(get_amp_subflow)
|
|
||||||
|
|
||||||
# Update the new amphora with the failed amphora details
|
|
||||||
failover_amphora_flow.add(database_tasks.UpdateAmpFailoverDetails(
|
|
||||||
requires=(constants.AMPHORA, constants.AMP_DATA)))
|
|
||||||
|
|
||||||
# Update the data stored in the flow from the database
|
|
||||||
failover_amphora_flow.add(database_tasks.ReloadLoadBalancer(
|
|
||||||
requires=constants.LOADBALANCER_ID,
|
|
||||||
provides=constants.LOADBALANCER))
|
|
||||||
|
|
||||||
failover_amphora_flow.add(database_tasks.ReloadAmphora(
|
|
||||||
requires=constants.AMPHORA_ID,
|
|
||||||
provides=constants.AMPHORA))
|
|
||||||
|
|
||||||
# Prepare to reconnect the network interface(s)
|
|
||||||
failover_amphora_flow.add(network_tasks.GetAmphoraeNetworkConfigs(
|
|
||||||
requires=constants.LOADBALANCER,
|
|
||||||
provides=constants.AMPHORAE_NETWORK_CONFIG))
|
|
||||||
failover_amphora_flow.add(database_tasks.GetListenersFromLoadbalancer(
|
|
||||||
requires=constants.LOADBALANCER, provides=constants.LISTENERS))
|
|
||||||
failover_amphora_flow.add(database_tasks.GetAmphoraeFromLoadbalancer(
|
|
||||||
requires=constants.LOADBALANCER, provides=constants.AMPHORAE))
|
|
||||||
|
|
||||||
# Plug the VIP ports into the new amphora
|
|
||||||
# The reason for moving these steps here is the udp listeners want to
|
|
||||||
# do some kernel configuration before Listener update for forbidding
|
|
||||||
# failure during rebuild amphora.
|
|
||||||
failover_amphora_flow.add(network_tasks.PlugVIPPort(
|
|
||||||
requires=(constants.AMPHORA, constants.AMPHORAE_NETWORK_CONFIG)))
|
|
||||||
failover_amphora_flow.add(amphora_driver_tasks.AmphoraPostVIPPlug(
|
|
||||||
requires=(constants.AMPHORA, constants.LOADBALANCER,
|
|
||||||
constants.AMPHORAE_NETWORK_CONFIG)))
|
|
||||||
|
|
||||||
# Listeners update needs to be run on all amphora to update
|
|
||||||
# their peer configurations. So parallelize this with an
|
|
||||||
# unordered subflow.
|
|
||||||
update_amps_subflow = unordered_flow.Flow(
|
|
||||||
constants.UPDATE_AMPS_SUBFLOW)
|
|
||||||
|
|
||||||
timeout_dict = {
|
|
||||||
constants.CONN_MAX_RETRIES:
|
|
||||||
CONF.haproxy_amphora.active_connection_max_retries,
|
|
||||||
constants.CONN_RETRY_INTERVAL:
|
|
||||||
CONF.haproxy_amphora.active_connection_rety_interval}
|
|
||||||
|
|
||||||
# Setup parallel flows for each amp. We don't know the new amp
|
|
||||||
# details at flow creation time, so setup a subflow for each
|
|
||||||
# amp on the LB, they let the task index into a list of amps
|
|
||||||
# to find the amphora it should work on.
|
|
||||||
amp_index = 0
|
|
||||||
for amp in load_balancer.amphorae:
|
|
||||||
if amp.status == constants.DELETED:
|
|
||||||
continue
|
|
||||||
update_amps_subflow.add(
|
|
||||||
amphora_driver_tasks.AmpListenersUpdate(
|
|
||||||
name=constants.AMP_LISTENER_UPDATE + '-' + str(amp_index),
|
|
||||||
requires=(constants.LOADBALANCER, constants.AMPHORAE),
|
|
||||||
inject={constants.AMPHORA_INDEX: amp_index,
|
|
||||||
constants.TIMEOUT_DICT: timeout_dict}))
|
|
||||||
amp_index += 1
|
|
||||||
|
|
||||||
failover_amphora_flow.add(update_amps_subflow)
|
|
||||||
|
|
||||||
# Plug the member networks into the new amphora
|
|
||||||
failover_amphora_flow.add(network_tasks.CalculateAmphoraDelta(
|
|
||||||
requires=(constants.LOADBALANCER, constants.AMPHORA,
|
|
||||||
constants.AVAILABILITY_ZONE),
|
|
||||||
provides=constants.DELTA))
|
|
||||||
|
|
||||||
failover_amphora_flow.add(network_tasks.HandleNetworkDelta(
|
|
||||||
requires=(constants.AMPHORA, constants.DELTA),
|
|
||||||
provides=constants.ADDED_PORTS))
|
|
||||||
|
|
||||||
failover_amphora_flow.add(amphora_driver_tasks.AmphoraePostNetworkPlug(
|
|
||||||
requires=(constants.LOADBALANCER, constants.ADDED_PORTS)))
|
|
||||||
|
|
||||||
failover_amphora_flow.add(database_tasks.ReloadLoadBalancer(
|
|
||||||
name='octavia-failover-LB-reload-2',
|
|
||||||
requires=constants.LOADBALANCER_ID,
|
|
||||||
provides=constants.LOADBALANCER))
|
|
||||||
|
|
||||||
# Handle the amphora role and VRRP if necessary
|
|
||||||
if role == constants.ROLE_MASTER:
|
|
||||||
failover_amphora_flow.add(database_tasks.MarkAmphoraMasterInDB(
|
|
||||||
name=constants.MARK_AMP_MASTER_INDB,
|
|
||||||
requires=constants.AMPHORA))
|
|
||||||
vrrp_subflow = self.get_vrrp_subflow(role)
|
|
||||||
failover_amphora_flow.add(vrrp_subflow)
|
|
||||||
elif role == constants.ROLE_BACKUP:
|
|
||||||
failover_amphora_flow.add(database_tasks.MarkAmphoraBackupInDB(
|
|
||||||
name=constants.MARK_AMP_BACKUP_INDB,
|
|
||||||
requires=constants.AMPHORA))
|
|
||||||
vrrp_subflow = self.get_vrrp_subflow(role)
|
|
||||||
failover_amphora_flow.add(vrrp_subflow)
|
|
||||||
elif role == constants.ROLE_STANDALONE:
|
|
||||||
failover_amphora_flow.add(
|
|
||||||
database_tasks.MarkAmphoraStandAloneInDB(
|
|
||||||
name=constants.MARK_AMP_STANDALONE_INDB,
|
|
||||||
requires=constants.AMPHORA))
|
|
||||||
|
|
||||||
failover_amphora_flow.add(amphora_driver_tasks.ListenersStart(
|
|
||||||
requires=(constants.LOADBALANCER, constants.AMPHORA)))
|
|
||||||
failover_amphora_flow.add(
|
|
||||||
database_tasks.DisableAmphoraHealthMonitoring(
|
|
||||||
rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
|
|
||||||
requires=constants.AMPHORA))
|
|
||||||
|
|
||||||
return failover_amphora_flow
|
|
||||||
|
|
||||||
def get_vrrp_subflow(self, prefix):
|
|
||||||
sf_name = prefix + '-' + constants.GET_VRRP_SUBFLOW
|
sf_name = prefix + '-' + constants.GET_VRRP_SUBFLOW
|
||||||
vrrp_subflow = linear_flow.Flow(sf_name)
|
vrrp_subflow = linear_flow.Flow(sf_name)
|
||||||
|
|
||||||
|
# Optimization for failover flow. No reason to call this
|
||||||
|
# when configuring the secondary amphora.
|
||||||
|
if create_vrrp_group:
|
||||||
|
vrrp_subflow.add(database_tasks.CreateVRRPGroupForLB(
|
||||||
|
name=sf_name + '-' + constants.CREATE_VRRP_GROUP_FOR_LB,
|
||||||
|
requires=constants.LOADBALANCER_ID))
|
||||||
|
|
||||||
vrrp_subflow.add(network_tasks.GetAmphoraeNetworkConfigs(
|
vrrp_subflow.add(network_tasks.GetAmphoraeNetworkConfigs(
|
||||||
name=sf_name + '-' + constants.GET_AMP_NETWORK_CONFIG,
|
name=sf_name + '-' + constants.GET_AMP_NETWORK_CONFIG,
|
||||||
requires=constants.LOADBALANCER,
|
requires=constants.LOADBALANCER_ID,
|
||||||
provides=constants.AMPHORAE_NETWORK_CONFIG))
|
provides=constants.AMPHORAE_NETWORK_CONFIG))
|
||||||
vrrp_subflow.add(amphora_driver_tasks.AmphoraUpdateVRRPInterface(
|
|
||||||
name=sf_name + '-' + constants.AMP_UPDATE_VRRP_INTF,
|
# VRRP update needs to be run on all amphora to update
|
||||||
requires=constants.LOADBALANCER,
|
# their peer configurations. So parallelize this with an
|
||||||
provides=constants.LOADBALANCER))
|
# unordered subflow.
|
||||||
vrrp_subflow.add(database_tasks.CreateVRRPGroupForLB(
|
update_amps_subflow = unordered_flow.Flow('VRRP-update-subflow')
|
||||||
name=sf_name + '-' + constants.CREATE_VRRP_GROUP_FOR_LB,
|
|
||||||
requires=constants.LOADBALANCER,
|
# We have three tasks to run in order, per amphora
|
||||||
provides=constants.LOADBALANCER))
|
amp_0_subflow = linear_flow.Flow('VRRP-amp-0-update-subflow')
|
||||||
vrrp_subflow.add(amphora_driver_tasks.AmphoraVRRPUpdate(
|
|
||||||
name=sf_name + '-' + constants.AMP_VRRP_UPDATE,
|
amp_0_subflow.add(amphora_driver_tasks.AmphoraIndexUpdateVRRPInterface(
|
||||||
requires=(constants.LOADBALANCER,
|
name=sf_name + '-0-' + constants.AMP_UPDATE_VRRP_INTF,
|
||||||
constants.AMPHORAE_NETWORK_CONFIG)))
|
requires=constants.AMPHORAE,
|
||||||
vrrp_subflow.add(amphora_driver_tasks.AmphoraVRRPStart(
|
inject={constants.AMPHORA_INDEX: 0,
|
||||||
name=sf_name + '-' + constants.AMP_VRRP_START,
|
constants.TIMEOUT_DICT: timeout_dict},
|
||||||
requires=constants.LOADBALANCER))
|
provides=constants.AMP_VRRP_INT))
|
||||||
|
|
||||||
|
amp_0_subflow.add(amphora_driver_tasks.AmphoraIndexVRRPUpdate(
|
||||||
|
name=sf_name + '-0-' + constants.AMP_VRRP_UPDATE,
|
||||||
|
requires=(constants.LOADBALANCER_ID,
|
||||||
|
constants.AMPHORAE_NETWORK_CONFIG, constants.AMPHORAE,
|
||||||
|
constants.AMP_VRRP_INT),
|
||||||
|
inject={constants.AMPHORA_INDEX: 0,
|
||||||
|
constants.TIMEOUT_DICT: timeout_dict}))
|
||||||
|
|
||||||
|
amp_0_subflow.add(amphora_driver_tasks.AmphoraIndexVRRPStart(
|
||||||
|
name=sf_name + '-0-' + constants.AMP_VRRP_START,
|
||||||
|
requires=constants.AMPHORAE,
|
||||||
|
inject={constants.AMPHORA_INDEX: 0,
|
||||||
|
constants.TIMEOUT_DICT: timeout_dict}))
|
||||||
|
|
||||||
|
amp_1_subflow = linear_flow.Flow('VRRP-amp-1-update-subflow')
|
||||||
|
|
||||||
|
amp_1_subflow.add(amphora_driver_tasks.AmphoraIndexUpdateVRRPInterface(
|
||||||
|
name=sf_name + '-1-' + constants.AMP_UPDATE_VRRP_INTF,
|
||||||
|
requires=constants.AMPHORAE,
|
||||||
|
inject={constants.AMPHORA_INDEX: 1,
|
||||||
|
constants.TIMEOUT_DICT: timeout_dict},
|
||||||
|
provides=constants.AMP_VRRP_INT))
|
||||||
|
|
||||||
|
amp_1_subflow.add(amphora_driver_tasks.AmphoraIndexVRRPUpdate(
|
||||||
|
name=sf_name + '-1-' + constants.AMP_VRRP_UPDATE,
|
||||||
|
requires=(constants.LOADBALANCER_ID,
|
||||||
|
constants.AMPHORAE_NETWORK_CONFIG, constants.AMPHORAE,
|
||||||
|
constants.AMP_VRRP_INT),
|
||||||
|
inject={constants.AMPHORA_INDEX: 1,
|
||||||
|
constants.TIMEOUT_DICT: timeout_dict}))
|
||||||
|
amp_1_subflow.add(amphora_driver_tasks.AmphoraIndexVRRPStart(
|
||||||
|
name=sf_name + '-1-' + constants.AMP_VRRP_START,
|
||||||
|
requires=constants.AMPHORAE,
|
||||||
|
inject={constants.AMPHORA_INDEX: 1,
|
||||||
|
constants.TIMEOUT_DICT: timeout_dict}))
|
||||||
|
|
||||||
|
update_amps_subflow.add(amp_0_subflow)
|
||||||
|
update_amps_subflow.add(amp_1_subflow)
|
||||||
|
|
||||||
|
vrrp_subflow.add(update_amps_subflow)
|
||||||
|
|
||||||
return vrrp_subflow
|
return vrrp_subflow
|
||||||
|
|
||||||
def cert_rotate_amphora_flow(self):
|
def cert_rotate_amphora_flow(self):
|
||||||
"""Implement rotation for amphora's cert.
|
"""Implement rotation for amphora's cert.
|
||||||
|
|
||||||
1. Create a new certificate
|
1. Create a new certificate
|
||||||
2. Upload the cert to amphora
|
2. Upload the cert to amphora
|
||||||
3. update the newly created certificate info to amphora
|
3. update the newly created certificate info to amphora
|
||||||
4. update the cert_busy flag to be false after rotation
|
4. update the cert_busy flag to be false after rotation
|
||||||
|
|
||||||
:returns: The flow for updating an amphora
|
:returns: The flow for updating an amphora
|
||||||
"""
|
"""
|
||||||
@ -609,3 +430,258 @@ class AmphoraFlows(object):
|
|||||||
requires=(constants.AMPHORA, constants.FLAVOR)))
|
requires=(constants.AMPHORA, constants.FLAVOR)))
|
||||||
|
|
||||||
return update_amphora_flow
|
return update_amphora_flow
|
||||||
|
|
||||||
|
def get_amphora_for_lb_failover_subflow(
|
||||||
|
self, prefix, role=constants.ROLE_STANDALONE,
|
||||||
|
failed_amp_vrrp_port_id=None, is_vrrp_ipv6=False, is_spare=False):
|
||||||
|
"""Creates a new amphora that will be used in a failover flow.
|
||||||
|
|
||||||
|
:requires: loadbalancer_id, flavor, vip, vip_sg_id, loadbalancer
|
||||||
|
:provides: amphora_id, amphora
|
||||||
|
:param prefix: The flow name prefix to use on the flow and tasks.
|
||||||
|
:param role: The role this amphora will have in the topology.
|
||||||
|
:param failed_amp_vrrp_port_id: The base port ID of the failed amp.
|
||||||
|
:param is_vrrp_ipv6: True if the base port IP is IPv6.
|
||||||
|
:param is_spare: True if we are getting a spare amphroa.
|
||||||
|
:return: A Taskflow sub-flow that will create the amphora.
|
||||||
|
"""
|
||||||
|
|
||||||
|
sf_name = prefix + '-' + constants.CREATE_AMP_FOR_FAILOVER_SUBFLOW
|
||||||
|
|
||||||
|
amp_for_failover_flow = linear_flow.Flow(sf_name)
|
||||||
|
|
||||||
|
# Try to allocate or boot an amphora instance (unconfigured)
|
||||||
|
amp_for_failover_flow.add(self.get_amphora_for_lb_subflow(
|
||||||
|
prefix=prefix + '-' + constants.FAILOVER_LOADBALANCER_FLOW,
|
||||||
|
role=role, is_spare=is_spare))
|
||||||
|
|
||||||
|
# If we are getting a spare amphora, this is all we need to do.
|
||||||
|
if is_spare:
|
||||||
|
return amp_for_failover_flow
|
||||||
|
|
||||||
|
# Create the VIP base (aka VRRP) port for the amphora.
|
||||||
|
amp_for_failover_flow.add(network_tasks.CreateVIPBasePort(
|
||||||
|
name=prefix + '-' + constants.CREATE_VIP_BASE_PORT,
|
||||||
|
requires=(constants.VIP, constants.VIP_SG_ID,
|
||||||
|
constants.AMPHORA_ID),
|
||||||
|
provides=constants.BASE_PORT))
|
||||||
|
|
||||||
|
# Attach the VIP base (aka VRRP) port to the amphora.
|
||||||
|
amp_for_failover_flow.add(compute_tasks.AttachPort(
|
||||||
|
name=prefix + '-' + constants.ATTACH_PORT,
|
||||||
|
requires=(constants.AMPHORA, constants.PORT),
|
||||||
|
rebind={constants.PORT: constants.BASE_PORT}))
|
||||||
|
|
||||||
|
# Update the amphora database record with the VIP base port info.
|
||||||
|
amp_for_failover_flow.add(database_tasks.UpdateAmpFailoverDetails(
|
||||||
|
name=prefix + '-' + constants.UPDATE_AMP_FAILOVER_DETAILS,
|
||||||
|
requires=(constants.AMPHORA, constants.VIP, constants.BASE_PORT)))
|
||||||
|
|
||||||
|
# Make sure the amphora in the flow storage is up to date
|
||||||
|
# or the vrrp_ip will be empty
|
||||||
|
amp_for_failover_flow.add(database_tasks.ReloadAmphora(
|
||||||
|
name=prefix + '-' + constants.RELOAD_AMPHORA,
|
||||||
|
requires=constants.AMPHORA_ID, provides=constants.AMPHORA))
|
||||||
|
|
||||||
|
# Update the amphora networking for the plugged VIP port
|
||||||
|
amp_for_failover_flow.add(network_tasks.GetAmphoraNetworkConfigsByID(
|
||||||
|
name=prefix + '-' + constants.GET_AMPHORA_NETWORK_CONFIGS_BY_ID,
|
||||||
|
requires=(constants.LOADBALANCER_ID, constants.AMPHORA_ID),
|
||||||
|
provides=constants.AMPHORAE_NETWORK_CONFIG))
|
||||||
|
|
||||||
|
# Disable the base (vrrp) port on the failed amphora
|
||||||
|
# This prevents a DAD failure when bringing up the new amphora.
|
||||||
|
# Keepalived will handle this for act/stdby.
|
||||||
|
if (role == constants.ROLE_STANDALONE and failed_amp_vrrp_port_id and
|
||||||
|
is_vrrp_ipv6):
|
||||||
|
amp_for_failover_flow.add(network_tasks.AdminDownPort(
|
||||||
|
name=prefix + '-' + constants.ADMIN_DOWN_PORT,
|
||||||
|
inject={constants.PORT_ID: failed_amp_vrrp_port_id}))
|
||||||
|
|
||||||
|
amp_for_failover_flow.add(amphora_driver_tasks.AmphoraPostVIPPlug(
|
||||||
|
name=prefix + '-' + constants.AMPHORA_POST_VIP_PLUG,
|
||||||
|
requires=(constants.AMPHORA, constants.LOADBALANCER,
|
||||||
|
constants.AMPHORAE_NETWORK_CONFIG)))
|
||||||
|
|
||||||
|
# Plug member ports
|
||||||
|
amp_for_failover_flow.add(network_tasks.CalculateAmphoraDelta(
|
||||||
|
name=prefix + '-' + constants.CALCULATE_AMPHORA_DELTA,
|
||||||
|
requires=(constants.LOADBALANCER, constants.AMPHORA,
|
||||||
|
constants.AVAILABILITY_ZONE, constants.VRRP_PORT),
|
||||||
|
rebind={constants.VRRP_PORT: constants.BASE_PORT},
|
||||||
|
provides=constants.DELTA))
|
||||||
|
|
||||||
|
amp_for_failover_flow.add(network_tasks.HandleNetworkDelta(
|
||||||
|
name=prefix + '-' + constants.HANDLE_NETWORK_DELTA,
|
||||||
|
requires=(constants.AMPHORA, constants.DELTA),
|
||||||
|
provides=constants.ADDED_PORTS))
|
||||||
|
|
||||||
|
amp_for_failover_flow.add(amphora_driver_tasks.AmphoraePostNetworkPlug(
|
||||||
|
name=prefix + '-' + constants.AMPHORAE_POST_NETWORK_PLUG,
|
||||||
|
requires=(constants.LOADBALANCER, constants.ADDED_PORTS)))
|
||||||
|
|
||||||
|
return amp_for_failover_flow
|
||||||
|
|
||||||
|
def get_failover_amphora_flow(self, failed_amphora, lb_amp_count):
|
||||||
|
"""Get a Taskflow flow to failover an amphora.
|
||||||
|
|
||||||
|
1. Build a replacement amphora.
|
||||||
|
2. Delete the old amphora.
|
||||||
|
3. Update the amphorae listener configurations.
|
||||||
|
4. Update the VRRP configurations if needed.
|
||||||
|
|
||||||
|
:param failed_amphora: The amphora object to failover.
|
||||||
|
:param lb_amp_count: The number of amphora on this load balancer.
|
||||||
|
:returns: The flow that will provide the failover.
|
||||||
|
"""
|
||||||
|
failover_amp_flow = linear_flow.Flow(
|
||||||
|
constants.FAILOVER_AMPHORA_FLOW)
|
||||||
|
|
||||||
|
# Revert amphora to status ERROR if this flow goes wrong
|
||||||
|
failover_amp_flow.add(lifecycle_tasks.AmphoraToErrorOnRevertTask(
|
||||||
|
requires=constants.AMPHORA,
|
||||||
|
inject={constants.AMPHORA: failed_amphora}))
|
||||||
|
|
||||||
|
if failed_amphora.role in (constants.ROLE_MASTER,
|
||||||
|
constants.ROLE_BACKUP):
|
||||||
|
amp_role = 'master_or_backup'
|
||||||
|
elif failed_amphora.role == constants.ROLE_STANDALONE:
|
||||||
|
amp_role = 'standalone'
|
||||||
|
elif failed_amphora.role is None:
|
||||||
|
amp_role = 'spare'
|
||||||
|
else:
|
||||||
|
amp_role = 'undefined'
|
||||||
|
LOG.info("Performing failover for amphora: %s",
|
||||||
|
{"id": failed_amphora.id,
|
||||||
|
"load_balancer_id": failed_amphora.load_balancer_id,
|
||||||
|
"lb_network_ip": failed_amphora.lb_network_ip,
|
||||||
|
"compute_id": failed_amphora.compute_id,
|
||||||
|
"role": amp_role})
|
||||||
|
|
||||||
|
failover_amp_flow.add(database_tasks.MarkAmphoraPendingDeleteInDB(
|
||||||
|
requires=constants.AMPHORA,
|
||||||
|
inject={constants.AMPHORA: failed_amphora}))
|
||||||
|
|
||||||
|
failover_amp_flow.add(database_tasks.MarkAmphoraHealthBusy(
|
||||||
|
requires=constants.AMPHORA,
|
||||||
|
inject={constants.AMPHORA: failed_amphora}))
|
||||||
|
|
||||||
|
failover_amp_flow.add(network_tasks.GetVIPSecurityGroupID(
|
||||||
|
requires=constants.LOADBALANCER_ID,
|
||||||
|
provides=constants.VIP_SG_ID))
|
||||||
|
|
||||||
|
is_spare = True
|
||||||
|
is_vrrp_ipv6 = False
|
||||||
|
if failed_amphora.load_balancer_id:
|
||||||
|
is_spare = False
|
||||||
|
if failed_amphora.vrrp_ip:
|
||||||
|
is_vrrp_ipv6 = utils.is_ipv6(failed_amphora.vrrp_ip)
|
||||||
|
|
||||||
|
# Get a replacement amphora and plug all of the networking.
|
||||||
|
#
|
||||||
|
# Do this early as the compute services have been observed to be
|
||||||
|
# unreliable. The community decided the chance that deleting first
|
||||||
|
# would open resources for an instance is less likely than the
|
||||||
|
# compute service failing to boot an instance for other reasons.
|
||||||
|
|
||||||
|
# TODO(johnsom) Move this back out to run for spares after
|
||||||
|
# delete amphora API is available.
|
||||||
|
failover_amp_flow.add(self.get_amphora_for_lb_failover_subflow(
|
||||||
|
prefix=constants.FAILOVER_LOADBALANCER_FLOW,
|
||||||
|
role=failed_amphora.role,
|
||||||
|
failed_amp_vrrp_port_id=failed_amphora.vrrp_port_id,
|
||||||
|
is_vrrp_ipv6=is_vrrp_ipv6,
|
||||||
|
is_spare=is_spare))
|
||||||
|
|
||||||
|
failover_amp_flow.add(
|
||||||
|
self.get_delete_amphora_flow(
|
||||||
|
failed_amphora,
|
||||||
|
retry_attempts=CONF.controller_worker.amphora_delete_retries,
|
||||||
|
retry_interval=(
|
||||||
|
CONF.controller_worker.amphora_delete_retry_interval)))
|
||||||
|
failover_amp_flow.add(
|
||||||
|
database_tasks.DisableAmphoraHealthMonitoring(
|
||||||
|
requires=constants.AMPHORA,
|
||||||
|
inject={constants.AMPHORA: failed_amphora}))
|
||||||
|
|
||||||
|
if not failed_amphora.load_balancer_id:
|
||||||
|
# This is an unallocated amphora (spares pool), we are done.
|
||||||
|
return failover_amp_flow
|
||||||
|
|
||||||
|
failover_amp_flow.add(database_tasks.GetLoadBalancer(
|
||||||
|
requires=constants.LOADBALANCER_ID,
|
||||||
|
inject={constants.LOADBALANCER_ID:
|
||||||
|
failed_amphora.load_balancer_id},
|
||||||
|
provides=constants.LOADBALANCER))
|
||||||
|
|
||||||
|
failover_amp_flow.add(database_tasks.GetAmphoraeFromLoadbalancer(
|
||||||
|
name=constants.GET_AMPHORAE_FROM_LB,
|
||||||
|
requires=constants.LOADBALANCER_ID,
|
||||||
|
inject={constants.LOADBALANCER_ID:
|
||||||
|
failed_amphora.load_balancer_id},
|
||||||
|
provides=constants.AMPHORAE))
|
||||||
|
|
||||||
|
# Setup timeouts for our requests to the amphorae
|
||||||
|
timeout_dict = {
|
||||||
|
constants.CONN_MAX_RETRIES:
|
||||||
|
CONF.haproxy_amphora.active_connection_max_retries,
|
||||||
|
constants.CONN_RETRY_INTERVAL:
|
||||||
|
CONF.haproxy_amphora.active_connection_rety_interval}
|
||||||
|
|
||||||
|
# Listeners update needs to be run on all amphora to update
|
||||||
|
# their peer configurations. So parallelize this with an
|
||||||
|
# unordered subflow.
|
||||||
|
update_amps_subflow = unordered_flow.Flow(
|
||||||
|
constants.UPDATE_AMPS_SUBFLOW)
|
||||||
|
|
||||||
|
for amp_index in range(0, lb_amp_count):
|
||||||
|
update_amps_subflow.add(
|
||||||
|
amphora_driver_tasks.AmphoraIndexListenerUpdate(
|
||||||
|
name=str(amp_index) + '-' + constants.AMP_LISTENER_UPDATE,
|
||||||
|
requires=(constants.LOADBALANCER, constants.AMPHORAE),
|
||||||
|
inject={constants.AMPHORA_INDEX: amp_index,
|
||||||
|
constants.TIMEOUT_DICT: timeout_dict}))
|
||||||
|
|
||||||
|
failover_amp_flow.add(update_amps_subflow)
|
||||||
|
|
||||||
|
# Configure and enable keepalived in the amphora
|
||||||
|
if lb_amp_count == 2:
|
||||||
|
failover_amp_flow.add(
|
||||||
|
self.get_vrrp_subflow(constants.GET_VRRP_SUBFLOW,
|
||||||
|
timeout_dict, create_vrrp_group=False))
|
||||||
|
|
||||||
|
# Reload the listener. This needs to be done here because
|
||||||
|
# it will create the required haproxy check scripts for
|
||||||
|
# the VRRP deployed above.
|
||||||
|
# A "U" or newer amphora-agent will remove the need for this
|
||||||
|
# task here.
|
||||||
|
# TODO(johnsom) Remove this in the "W" cycle
|
||||||
|
reload_listener_subflow = unordered_flow.Flow(
|
||||||
|
constants.AMPHORA_LISTENER_RELOAD_SUBFLOW)
|
||||||
|
|
||||||
|
for amp_index in range(0, lb_amp_count):
|
||||||
|
reload_listener_subflow.add(
|
||||||
|
amphora_driver_tasks.AmphoraIndexListenersReload(
|
||||||
|
name=(str(amp_index) + '-' +
|
||||||
|
constants.AMPHORA_RELOAD_LISTENER),
|
||||||
|
requires=(constants.LOADBALANCER, constants.AMPHORAE),
|
||||||
|
inject={constants.AMPHORA_INDEX: amp_index,
|
||||||
|
constants.TIMEOUT_DICT: timeout_dict}))
|
||||||
|
|
||||||
|
failover_amp_flow.add(reload_listener_subflow)
|
||||||
|
|
||||||
|
# Remove any extraneous ports
|
||||||
|
# Note: Nova sometimes fails to delete ports attached to an instance.
|
||||||
|
# For example, if you create an LB with a listener, then
|
||||||
|
# 'openstack server delete' the amphora, you will see the vrrp
|
||||||
|
# port attached to that instance will remain after the instance
|
||||||
|
# is deleted.
|
||||||
|
# TODO(johnsom) Fix this as part of
|
||||||
|
# https://storyboard.openstack.org/#!/story/2007077
|
||||||
|
|
||||||
|
# Mark LB ACTIVE
|
||||||
|
failover_amp_flow.add(
|
||||||
|
database_tasks.MarkLBActiveInDB(mark_subobjects=True,
|
||||||
|
requires=constants.LOADBALANCER))
|
||||||
|
|
||||||
|
return failover_amp_flow
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
# Copyright 2015 Hewlett-Packard Development Company, L.P.
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
|
||||||
|
# Copyright 2020 Red Hat, Inc. All rights reserved.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
# not use this file except in compliance with the License. You may obtain
|
# not use this file except in compliance with the License. You may obtain
|
||||||
@ -20,6 +21,7 @@ from taskflow.patterns import unordered_flow
|
|||||||
|
|
||||||
from octavia.common import constants
|
from octavia.common import constants
|
||||||
from octavia.common import exceptions
|
from octavia.common import exceptions
|
||||||
|
from octavia.common import utils
|
||||||
from octavia.controller.worker.v1.flows import amphora_flows
|
from octavia.controller.worker.v1.flows import amphora_flows
|
||||||
from octavia.controller.worker.v1.flows import listener_flows
|
from octavia.controller.worker.v1.flows import listener_flows
|
||||||
from octavia.controller.worker.v1.flows import member_flows
|
from octavia.controller.worker.v1.flows import member_flows
|
||||||
@ -68,7 +70,7 @@ class LoadBalancerFlows(object):
|
|||||||
requires=(constants.LOADBALANCER_ID, constants.VIP),
|
requires=(constants.LOADBALANCER_ID, constants.VIP),
|
||||||
provides=constants.LOADBALANCER))
|
provides=constants.LOADBALANCER))
|
||||||
lb_create_flow.add(network_tasks.UpdateVIPSecurityGroup(
|
lb_create_flow.add(network_tasks.UpdateVIPSecurityGroup(
|
||||||
requires=constants.LOADBALANCER))
|
requires=constants.LOADBALANCER_ID))
|
||||||
lb_create_flow.add(network_tasks.GetSubnetFromVIP(
|
lb_create_flow.add(network_tasks.GetSubnetFromVIP(
|
||||||
requires=constants.LOADBALANCER,
|
requires=constants.LOADBALANCER,
|
||||||
provides=constants.SUBNET))
|
provides=constants.SUBNET))
|
||||||
@ -93,9 +95,15 @@ class LoadBalancerFlows(object):
|
|||||||
return lb_create_flow
|
return lb_create_flow
|
||||||
|
|
||||||
def _create_single_topology(self):
|
def _create_single_topology(self):
|
||||||
return (self.amp_flows.get_amphora_for_lb_subflow(
|
sf_name = (constants.ROLE_STANDALONE + '-' +
|
||||||
|
constants.AMP_PLUG_NET_SUBFLOW)
|
||||||
|
amp_for_lb_net_flow = linear_flow.Flow(sf_name)
|
||||||
|
amp_for_lb_flow = self.amp_flows.get_amphora_for_lb_subflow(
|
||||||
prefix=constants.ROLE_STANDALONE,
|
prefix=constants.ROLE_STANDALONE,
|
||||||
role=constants.ROLE_STANDALONE), )
|
role=constants.ROLE_STANDALONE)
|
||||||
|
amp_for_lb_net_flow.add(amp_for_lb_flow)
|
||||||
|
amp_for_lb_net_flow.add(*self._get_amp_net_subflow(sf_name))
|
||||||
|
return amp_for_lb_net_flow
|
||||||
|
|
||||||
def _create_active_standby_topology(
|
def _create_active_standby_topology(
|
||||||
self, lf_name=constants.CREATE_LOADBALANCER_FLOW):
|
self, lf_name=constants.CREATE_LOADBALANCER_FLOW):
|
||||||
@ -124,16 +132,60 @@ class LoadBalancerFlows(object):
|
|||||||
|
|
||||||
f_name = constants.CREATE_LOADBALANCER_FLOW
|
f_name = constants.CREATE_LOADBALANCER_FLOW
|
||||||
amps_flow = unordered_flow.Flow(f_name)
|
amps_flow = unordered_flow.Flow(f_name)
|
||||||
master_amp_sf = self.amp_flows.get_amphora_for_lb_subflow(
|
|
||||||
prefix=constants.ROLE_MASTER, role=constants.ROLE_MASTER
|
|
||||||
)
|
|
||||||
|
|
||||||
backup_amp_sf = self.amp_flows.get_amphora_for_lb_subflow(
|
master_sf_name = (constants.ROLE_MASTER + '-' +
|
||||||
prefix=constants.ROLE_BACKUP, role=constants.ROLE_BACKUP)
|
constants.AMP_PLUG_NET_SUBFLOW)
|
||||||
|
master_amp_sf = linear_flow.Flow(master_sf_name)
|
||||||
|
master_amp_sf.add(self.amp_flows.get_amphora_for_lb_subflow(
|
||||||
|
prefix=constants.ROLE_MASTER, role=constants.ROLE_MASTER))
|
||||||
|
master_amp_sf.add(*self._get_amp_net_subflow(master_sf_name))
|
||||||
|
|
||||||
|
backup_sf_name = (constants.ROLE_BACKUP + '-' +
|
||||||
|
constants.AMP_PLUG_NET_SUBFLOW)
|
||||||
|
backup_amp_sf = linear_flow.Flow(backup_sf_name)
|
||||||
|
backup_amp_sf.add(self.amp_flows.get_amphora_for_lb_subflow(
|
||||||
|
prefix=constants.ROLE_BACKUP, role=constants.ROLE_BACKUP))
|
||||||
|
backup_amp_sf.add(*self._get_amp_net_subflow(backup_sf_name))
|
||||||
|
|
||||||
amps_flow.add(master_amp_sf, backup_amp_sf)
|
amps_flow.add(master_amp_sf, backup_amp_sf)
|
||||||
|
|
||||||
return flows + [amps_flow]
|
return flows + [amps_flow]
|
||||||
|
|
||||||
|
def _get_amp_net_subflow(self, sf_name):
|
||||||
|
flows = []
|
||||||
|
flows.append(network_tasks.PlugVIPAmpphora(
|
||||||
|
name=sf_name + '-' + constants.PLUG_VIP_AMPHORA,
|
||||||
|
requires=(constants.LOADBALANCER, constants.AMPHORA,
|
||||||
|
constants.SUBNET),
|
||||||
|
provides=constants.AMP_DATA))
|
||||||
|
|
||||||
|
flows.append(network_tasks.ApplyQosAmphora(
|
||||||
|
name=sf_name + '-' + constants.APPLY_QOS_AMP,
|
||||||
|
requires=(constants.LOADBALANCER, constants.AMP_DATA,
|
||||||
|
constants.UPDATE_DICT)))
|
||||||
|
flows.append(database_tasks.UpdateAmphoraVIPData(
|
||||||
|
name=sf_name + '-' + constants.UPDATE_AMPHORA_VIP_DATA,
|
||||||
|
requires=constants.AMP_DATA))
|
||||||
|
flows.append(database_tasks.ReloadAmphora(
|
||||||
|
name=sf_name + '-' + constants.RELOAD_AMP_AFTER_PLUG_VIP,
|
||||||
|
requires=constants.AMPHORA_ID,
|
||||||
|
provides=constants.AMPHORA))
|
||||||
|
flows.append(database_tasks.ReloadLoadBalancer(
|
||||||
|
name=sf_name + '-' + constants.RELOAD_LB_AFTER_PLUG_VIP,
|
||||||
|
requires=constants.LOADBALANCER_ID,
|
||||||
|
provides=constants.LOADBALANCER))
|
||||||
|
flows.append(network_tasks.GetAmphoraNetworkConfigs(
|
||||||
|
name=sf_name + '-' + constants.GET_AMP_NETWORK_CONFIG,
|
||||||
|
requires=(constants.LOADBALANCER, constants.AMPHORA),
|
||||||
|
provides=constants.AMPHORA_NETWORK_CONFIG))
|
||||||
|
flows.append(amphora_driver_tasks.AmphoraPostVIPPlug(
|
||||||
|
name=sf_name + '-' + constants.AMP_POST_VIP_PLUG,
|
||||||
|
rebind={constants.AMPHORAE_NETWORK_CONFIG:
|
||||||
|
constants.AMPHORA_NETWORK_CONFIG},
|
||||||
|
requires=(constants.LOADBALANCER,
|
||||||
|
constants.AMPHORAE_NETWORK_CONFIG)))
|
||||||
|
return flows
|
||||||
|
|
||||||
def _create_listeners_flow(self):
|
def _create_listeners_flow(self):
|
||||||
flows = []
|
flows = []
|
||||||
flows.append(
|
flows.append(
|
||||||
@ -177,13 +229,6 @@ class LoadBalancerFlows(object):
|
|||||||
created/allocated amphorae.
|
created/allocated amphorae.
|
||||||
:return: Post amphorae association subflow
|
:return: Post amphorae association subflow
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# Note: If any task in this flow failed, the created amphorae will be
|
|
||||||
# left ''incorrectly'' allocated to the loadbalancer. Likely,
|
|
||||||
# the get_new_LB_networking_subflow is the most prune to failure
|
|
||||||
# shall deallocate the amphora from its loadbalancer and put it in a
|
|
||||||
# READY state.
|
|
||||||
|
|
||||||
sf_name = prefix + '-' + constants.POST_LB_AMP_ASSOCIATION_SUBFLOW
|
sf_name = prefix + '-' + constants.POST_LB_AMP_ASSOCIATION_SUBFLOW
|
||||||
post_create_LB_flow = linear_flow.Flow(sf_name)
|
post_create_LB_flow = linear_flow.Flow(sf_name)
|
||||||
post_create_LB_flow.add(
|
post_create_LB_flow.add(
|
||||||
@ -193,6 +238,10 @@ class LoadBalancerFlows(object):
|
|||||||
provides=constants.LOADBALANCER))
|
provides=constants.LOADBALANCER))
|
||||||
|
|
||||||
if topology == constants.TOPOLOGY_ACTIVE_STANDBY:
|
if topology == constants.TOPOLOGY_ACTIVE_STANDBY:
|
||||||
|
post_create_LB_flow.add(database_tasks.GetAmphoraeFromLoadbalancer(
|
||||||
|
requires=constants.LOADBALANCER_ID,
|
||||||
|
provides=constants.AMPHORAE))
|
||||||
|
|
||||||
vrrp_subflow = self.amp_flows.get_vrrp_subflow(prefix)
|
vrrp_subflow = self.amp_flows.get_vrrp_subflow(prefix)
|
||||||
post_create_LB_flow.add(vrrp_subflow)
|
post_create_LB_flow.add(vrrp_subflow)
|
||||||
|
|
||||||
@ -209,9 +258,10 @@ class LoadBalancerFlows(object):
|
|||||||
|
|
||||||
Because task flow doesn't support loops we store each listener
|
Because task flow doesn't support loops we store each listener
|
||||||
we want to delete in the store part and then rebind
|
we want to delete in the store part and then rebind
|
||||||
|
|
||||||
:param lb: load balancer
|
:param lb: load balancer
|
||||||
:return: (flow, store) -- flow for the deletion and store with all
|
:return: (flow, store) -- flow for the deletion and store with all
|
||||||
the listeners stored properly
|
the listeners stored properly
|
||||||
"""
|
"""
|
||||||
listeners_delete_flow = unordered_flow.Flow('listener_delete_flow')
|
listeners_delete_flow = unordered_flow.Flow('listener_delete_flow')
|
||||||
store = {}
|
store = {}
|
||||||
@ -235,6 +285,7 @@ class LoadBalancerFlows(object):
|
|||||||
|
|
||||||
Because task flow doesn't support loops we store each pool
|
Because task flow doesn't support loops we store each pool
|
||||||
we want to delete in the store part and then rebind
|
we want to delete in the store part and then rebind
|
||||||
|
|
||||||
:param lb: load balancer
|
:param lb: load balancer
|
||||||
:return: (flow, store) -- flow for the deletion and store with all
|
:return: (flow, store) -- flow for the deletion and store with all
|
||||||
the listeners stored properly
|
the listeners stored properly
|
||||||
@ -287,41 +338,6 @@ class LoadBalancerFlows(object):
|
|||||||
"""
|
"""
|
||||||
return self._get_delete_load_balancer_flow(lb, True)
|
return self._get_delete_load_balancer_flow(lb, True)
|
||||||
|
|
||||||
def get_new_LB_networking_subflow(self):
|
|
||||||
"""Create a sub-flow to setup networking.
|
|
||||||
|
|
||||||
:returns: The flow to setup networking for a new amphora
|
|
||||||
"""
|
|
||||||
|
|
||||||
new_LB_net_subflow = linear_flow.Flow(constants.
|
|
||||||
LOADBALANCER_NETWORKING_SUBFLOW)
|
|
||||||
new_LB_net_subflow.add(network_tasks.AllocateVIP(
|
|
||||||
requires=constants.LOADBALANCER,
|
|
||||||
provides=constants.VIP))
|
|
||||||
new_LB_net_subflow.add(database_tasks.UpdateVIPAfterAllocation(
|
|
||||||
requires=(constants.LOADBALANCER_ID, constants.VIP),
|
|
||||||
provides=constants.LOADBALANCER))
|
|
||||||
new_LB_net_subflow.add(network_tasks.PlugVIP(
|
|
||||||
requires=constants.LOADBALANCER,
|
|
||||||
provides=constants.AMPS_DATA))
|
|
||||||
new_LB_net_subflow.add(network_tasks.ApplyQos(
|
|
||||||
requires=(constants.LOADBALANCER, constants.AMPS_DATA,
|
|
||||||
constants.UPDATE_DICT)))
|
|
||||||
new_LB_net_subflow.add(database_tasks.UpdateAmphoraeVIPData(
|
|
||||||
requires=constants.AMPS_DATA))
|
|
||||||
new_LB_net_subflow.add(database_tasks.ReloadLoadBalancer(
|
|
||||||
name=constants.RELOAD_LB_AFTER_PLUG_VIP,
|
|
||||||
requires=constants.LOADBALANCER_ID,
|
|
||||||
provides=constants.LOADBALANCER))
|
|
||||||
new_LB_net_subflow.add(network_tasks.GetAmphoraeNetworkConfigs(
|
|
||||||
requires=constants.LOADBALANCER,
|
|
||||||
provides=constants.AMPHORAE_NETWORK_CONFIG))
|
|
||||||
new_LB_net_subflow.add(amphora_driver_tasks.AmphoraePostVIPPlug(
|
|
||||||
requires=(constants.LOADBALANCER,
|
|
||||||
constants.AMPHORAE_NETWORK_CONFIG)))
|
|
||||||
|
|
||||||
return new_LB_net_subflow
|
|
||||||
|
|
||||||
def get_update_load_balancer_flow(self):
|
def get_update_load_balancer_flow(self):
|
||||||
"""Creates a flow to update a load balancer.
|
"""Creates a flow to update a load balancer.
|
||||||
|
|
||||||
@ -340,3 +356,335 @@ class LoadBalancerFlows(object):
|
|||||||
requires=constants.LOADBALANCER))
|
requires=constants.LOADBALANCER))
|
||||||
|
|
||||||
return update_LB_flow
|
return update_LB_flow
|
||||||
|
|
||||||
|
def get_failover_LB_flow(self, amps, lb):
|
||||||
|
"""Failover a load balancer.
|
||||||
|
|
||||||
|
1. Validate the VIP port is correct and present.
|
||||||
|
2. Build a replacement amphora.
|
||||||
|
3. Delete the failed amphora.
|
||||||
|
4. Configure the replacement amphora listeners.
|
||||||
|
5. Configure VRRP for the listeners.
|
||||||
|
6. Build the second replacement amphora.
|
||||||
|
7. Delete the second failed amphora.
|
||||||
|
8. Delete any extraneous amphora.
|
||||||
|
9. Configure the listeners on the new amphorae.
|
||||||
|
10. Configure the VRRP on the new amphorae.
|
||||||
|
11. Reload the listener configurations to pick up VRRP changes.
|
||||||
|
12. Mark the load balancer back to ACTIVE.
|
||||||
|
|
||||||
|
:returns: The flow that will provide the failover.
|
||||||
|
"""
|
||||||
|
# Pick one amphora to be failed over if any exist.
|
||||||
|
failed_amp = None
|
||||||
|
if amps:
|
||||||
|
failed_amp = amps.pop()
|
||||||
|
|
||||||
|
failover_LB_flow = linear_flow.Flow(
|
||||||
|
constants.FAILOVER_LOADBALANCER_FLOW)
|
||||||
|
|
||||||
|
# Revert LB to provisioning_status ERROR if this flow goes wrong
|
||||||
|
failover_LB_flow.add(lifecycle_tasks.LoadBalancerToErrorOnRevertTask(
|
||||||
|
requires=constants.LOADBALANCER))
|
||||||
|
|
||||||
|
# Setup timeouts for our requests to the amphorae
|
||||||
|
timeout_dict = {
|
||||||
|
constants.CONN_MAX_RETRIES:
|
||||||
|
CONF.haproxy_amphora.active_connection_max_retries,
|
||||||
|
constants.CONN_RETRY_INTERVAL:
|
||||||
|
CONF.haproxy_amphora.active_connection_rety_interval}
|
||||||
|
|
||||||
|
if failed_amp:
|
||||||
|
if failed_amp.role in (constants.ROLE_MASTER,
|
||||||
|
constants.ROLE_BACKUP):
|
||||||
|
amp_role = 'master_or_backup'
|
||||||
|
elif failed_amp.role == constants.ROLE_STANDALONE:
|
||||||
|
amp_role = 'standalone'
|
||||||
|
elif failed_amp.role is None:
|
||||||
|
amp_role = 'spare'
|
||||||
|
else:
|
||||||
|
amp_role = 'undefined'
|
||||||
|
LOG.info("Performing failover for amphora: %s",
|
||||||
|
{"id": failed_amp.id,
|
||||||
|
"load_balancer_id": lb.id,
|
||||||
|
"lb_network_ip": failed_amp.lb_network_ip,
|
||||||
|
"compute_id": failed_amp.compute_id,
|
||||||
|
"role": amp_role})
|
||||||
|
|
||||||
|
failover_LB_flow.add(database_tasks.MarkAmphoraPendingDeleteInDB(
|
||||||
|
requires=constants.AMPHORA,
|
||||||
|
inject={constants.AMPHORA: failed_amp}))
|
||||||
|
|
||||||
|
failover_LB_flow.add(database_tasks.MarkAmphoraHealthBusy(
|
||||||
|
requires=constants.AMPHORA,
|
||||||
|
inject={constants.AMPHORA: failed_amp}))
|
||||||
|
|
||||||
|
# Check that the VIP port exists and is ok
|
||||||
|
failover_LB_flow.add(
|
||||||
|
network_tasks.AllocateVIP(requires=constants.LOADBALANCER,
|
||||||
|
provides=constants.VIP))
|
||||||
|
|
||||||
|
# Update the database with the VIP information
|
||||||
|
failover_LB_flow.add(database_tasks.UpdateVIPAfterAllocation(
|
||||||
|
requires=(constants.LOADBALANCER_ID, constants.VIP),
|
||||||
|
provides=constants.LOADBALANCER))
|
||||||
|
|
||||||
|
# Make sure the SG has the correct rules and re-apply to the
|
||||||
|
# VIP port. It is not used on the VIP port, but will help lock
|
||||||
|
# the SG as in use.
|
||||||
|
failover_LB_flow.add(network_tasks.UpdateVIPSecurityGroup(
|
||||||
|
requires=constants.LOADBALANCER_ID, provides=constants.VIP_SG_ID))
|
||||||
|
|
||||||
|
new_amp_role = constants.ROLE_STANDALONE
|
||||||
|
if lb.topology == constants.TOPOLOGY_ACTIVE_STANDBY:
|
||||||
|
new_amp_role = constants.ROLE_BACKUP
|
||||||
|
|
||||||
|
# Get a replacement amphora and plug all of the networking.
|
||||||
|
#
|
||||||
|
# Do this early as the compute services have been observed to be
|
||||||
|
# unreliable. The community decided the chance that deleting first
|
||||||
|
# would open resources for an instance is less likely than the compute
|
||||||
|
# service failing to boot an instance for other reasons.
|
||||||
|
if failed_amp:
|
||||||
|
failed_vrrp_is_ipv6 = False
|
||||||
|
if failed_amp.vrrp_ip:
|
||||||
|
failed_vrrp_is_ipv6 = utils.is_ipv6(failed_amp.vrrp_ip)
|
||||||
|
failover_LB_flow.add(
|
||||||
|
self.amp_flows.get_amphora_for_lb_failover_subflow(
|
||||||
|
prefix=constants.FAILOVER_LOADBALANCER_FLOW,
|
||||||
|
role=new_amp_role,
|
||||||
|
failed_amp_vrrp_port_id=failed_amp.vrrp_port_id,
|
||||||
|
is_vrrp_ipv6=failed_vrrp_is_ipv6))
|
||||||
|
else:
|
||||||
|
failover_LB_flow.add(
|
||||||
|
self.amp_flows.get_amphora_for_lb_failover_subflow(
|
||||||
|
prefix=constants.FAILOVER_LOADBALANCER_FLOW,
|
||||||
|
role=new_amp_role))
|
||||||
|
|
||||||
|
if lb.topology == constants.TOPOLOGY_ACTIVE_STANDBY:
|
||||||
|
failover_LB_flow.add(database_tasks.MarkAmphoraBackupInDB(
|
||||||
|
name=constants.MARK_AMP_BACKUP_INDB,
|
||||||
|
requires=constants.AMPHORA))
|
||||||
|
|
||||||
|
# Delete the failed amp
|
||||||
|
if failed_amp:
|
||||||
|
failover_LB_flow.add(
|
||||||
|
self.amp_flows.get_delete_amphora_flow(failed_amp))
|
||||||
|
|
||||||
|
# Update the data stored in the flow from the database
|
||||||
|
failover_LB_flow.add(database_tasks.ReloadLoadBalancer(
|
||||||
|
requires=constants.LOADBALANCER_ID,
|
||||||
|
provides=constants.LOADBALANCER))
|
||||||
|
|
||||||
|
# Configure the listener(s)
|
||||||
|
# We will run update on this amphora again later if this is
|
||||||
|
# an active/standby load balancer because we want this amp
|
||||||
|
# functional as soon as possible. It must run again to update
|
||||||
|
# the configurations for the new peers.
|
||||||
|
failover_LB_flow.add(amphora_driver_tasks.AmpListenersUpdate(
|
||||||
|
name=constants.AMP_LISTENER_UPDATE,
|
||||||
|
requires=(constants.LOADBALANCER, constants.AMPHORA),
|
||||||
|
inject={constants.TIMEOUT_DICT: timeout_dict}))
|
||||||
|
|
||||||
|
# Bring up the new "backup" amphora VIP now to reduce the outage
|
||||||
|
# on the final failover. This dropped the outage from 8-9 seconds
|
||||||
|
# to less than one in my lab.
|
||||||
|
# This does mean some steps have to be repeated later to reconfigure
|
||||||
|
# for the second amphora as a peer.
|
||||||
|
if lb.topology == constants.TOPOLOGY_ACTIVE_STANDBY:
|
||||||
|
|
||||||
|
failover_LB_flow.add(database_tasks.CreateVRRPGroupForLB(
|
||||||
|
name=new_amp_role + '-' + constants.CREATE_VRRP_GROUP_FOR_LB,
|
||||||
|
requires=constants.LOADBALANCER_ID))
|
||||||
|
|
||||||
|
failover_LB_flow.add(network_tasks.GetAmphoraNetworkConfigsByID(
|
||||||
|
name=(new_amp_role + '-' +
|
||||||
|
constants.GET_AMPHORA_NETWORK_CONFIGS_BY_ID),
|
||||||
|
requires=(constants.LOADBALANCER_ID, constants.AMPHORA_ID),
|
||||||
|
provides=constants.FIRST_AMP_NETWORK_CONFIGS))
|
||||||
|
|
||||||
|
failover_LB_flow.add(
|
||||||
|
amphora_driver_tasks.AmphoraUpdateVRRPInterface(
|
||||||
|
name=new_amp_role + '-' + constants.AMP_UPDATE_VRRP_INTF,
|
||||||
|
requires=constants.AMPHORA,
|
||||||
|
inject={constants.TIMEOUT_DICT: timeout_dict},
|
||||||
|
provides=constants.FIRST_AMP_VRRP_INTERFACE))
|
||||||
|
|
||||||
|
failover_LB_flow.add(amphora_driver_tasks.AmphoraVRRPUpdate(
|
||||||
|
name=new_amp_role + '-' + constants.AMP_VRRP_UPDATE,
|
||||||
|
requires=(constants.LOADBALANCER_ID, constants.AMPHORA),
|
||||||
|
rebind={constants.AMPHORAE_NETWORK_CONFIG:
|
||||||
|
constants.FIRST_AMP_NETWORK_CONFIGS,
|
||||||
|
constants.AMP_VRRP_INT:
|
||||||
|
constants.FIRST_AMP_VRRP_INTERFACE},
|
||||||
|
inject={constants.TIMEOUT_DICT: timeout_dict}))
|
||||||
|
|
||||||
|
failover_LB_flow.add(amphora_driver_tasks.AmphoraVRRPStart(
|
||||||
|
name=new_amp_role + '-' + constants.AMP_VRRP_START,
|
||||||
|
requires=constants.AMPHORA,
|
||||||
|
inject={constants.TIMEOUT_DICT: timeout_dict}))
|
||||||
|
|
||||||
|
# Start the listener. This needs to be done here because
|
||||||
|
# it will create the required haproxy check scripts for
|
||||||
|
# the VRRP deployed above.
|
||||||
|
# A "V" or newer amphora-agent will remove the need for this
|
||||||
|
# task here.
|
||||||
|
# TODO(johnsom) Remove this in the "X" cycle
|
||||||
|
failover_LB_flow.add(amphora_driver_tasks.ListenersStart(
|
||||||
|
name=new_amp_role + '-' + constants.AMP_LISTENER_START,
|
||||||
|
requires=(constants.LOADBALANCER, constants.AMPHORA)))
|
||||||
|
|
||||||
|
# #### Work on standby amphora if needed #####
|
||||||
|
|
||||||
|
new_amp_role = constants.ROLE_MASTER
|
||||||
|
failed_amp = None
|
||||||
|
if amps:
|
||||||
|
failed_amp = amps.pop()
|
||||||
|
|
||||||
|
if failed_amp:
|
||||||
|
if failed_amp.role in (constants.ROLE_MASTER,
|
||||||
|
constants.ROLE_BACKUP):
|
||||||
|
amp_role = 'master_or_backup'
|
||||||
|
elif failed_amp.role == constants.ROLE_STANDALONE:
|
||||||
|
amp_role = 'standalone'
|
||||||
|
elif failed_amp.role is None:
|
||||||
|
amp_role = 'spare'
|
||||||
|
else:
|
||||||
|
amp_role = 'undefined'
|
||||||
|
LOG.info("Performing failover for amphora: %s",
|
||||||
|
{"id": failed_amp.id,
|
||||||
|
"load_balancer_id": lb.id,
|
||||||
|
"lb_network_ip": failed_amp.lb_network_ip,
|
||||||
|
"compute_id": failed_amp.compute_id,
|
||||||
|
"role": amp_role})
|
||||||
|
|
||||||
|
failover_LB_flow.add(
|
||||||
|
database_tasks.MarkAmphoraPendingDeleteInDB(
|
||||||
|
name=(new_amp_role + '-' +
|
||||||
|
constants.MARK_AMPHORA_PENDING_DELETE),
|
||||||
|
requires=constants.AMPHORA,
|
||||||
|
inject={constants.AMPHORA: failed_amp}))
|
||||||
|
|
||||||
|
failover_LB_flow.add(database_tasks.MarkAmphoraHealthBusy(
|
||||||
|
name=(new_amp_role + '-' +
|
||||||
|
constants.MARK_AMPHORA_HEALTH_BUSY),
|
||||||
|
requires=constants.AMPHORA,
|
||||||
|
inject={constants.AMPHORA: failed_amp}))
|
||||||
|
|
||||||
|
# Get a replacement amphora and plug all of the networking.
|
||||||
|
#
|
||||||
|
# Do this early as the compute services have been observed to be
|
||||||
|
# unreliable. The community decided the chance that deleting first
|
||||||
|
# would open resources for an instance is less likely than the
|
||||||
|
# compute service failing to boot an instance for other reasons.
|
||||||
|
failover_LB_flow.add(
|
||||||
|
self.amp_flows.get_amphora_for_lb_failover_subflow(
|
||||||
|
prefix=(new_amp_role + '-' +
|
||||||
|
constants.FAILOVER_LOADBALANCER_FLOW),
|
||||||
|
role=new_amp_role))
|
||||||
|
|
||||||
|
failover_LB_flow.add(database_tasks.MarkAmphoraMasterInDB(
|
||||||
|
name=constants.MARK_AMP_MASTER_INDB,
|
||||||
|
requires=constants.AMPHORA))
|
||||||
|
|
||||||
|
# Delete the failed amp
|
||||||
|
if failed_amp:
|
||||||
|
failover_LB_flow.add(
|
||||||
|
self.amp_flows.get_delete_amphora_flow(
|
||||||
|
failed_amp))
|
||||||
|
failover_LB_flow.add(
|
||||||
|
database_tasks.DisableAmphoraHealthMonitoring(
|
||||||
|
name=(new_amp_role + '-' +
|
||||||
|
constants.DISABLE_AMP_HEALTH_MONITORING),
|
||||||
|
requires=constants.AMPHORA,
|
||||||
|
inject={constants.AMPHORA: failed_amp}))
|
||||||
|
|
||||||
|
# Remove any extraneous amphora
|
||||||
|
# Note: This runs in all topology situations.
|
||||||
|
# It should run before the act/stdby final listener update so
|
||||||
|
# that we don't bother attempting to update dead amphorae.
|
||||||
|
delete_extra_amps_flow = unordered_flow.Flow(
|
||||||
|
constants.DELETE_EXTRA_AMPHORAE_FLOW)
|
||||||
|
for amp in amps:
|
||||||
|
LOG.debug('Found extraneous amphora %s on load balancer %s. '
|
||||||
|
'Deleting.', amp.id, lb.id)
|
||||||
|
delete_extra_amps_flow.add(
|
||||||
|
self.amp_flows.get_delete_amphora_flow(amp))
|
||||||
|
|
||||||
|
failover_LB_flow.add(delete_extra_amps_flow)
|
||||||
|
|
||||||
|
if lb.topology == constants.TOPOLOGY_ACTIVE_STANDBY:
|
||||||
|
# Update the data stored in the flow from the database
|
||||||
|
failover_LB_flow.add(database_tasks.ReloadLoadBalancer(
|
||||||
|
name=new_amp_role + '-' + constants.RELOAD_LB_AFTER_AMP_ASSOC,
|
||||||
|
requires=constants.LOADBALANCER_ID,
|
||||||
|
provides=constants.LOADBALANCER))
|
||||||
|
|
||||||
|
failover_LB_flow.add(database_tasks.GetAmphoraeFromLoadbalancer(
|
||||||
|
name=new_amp_role + '-' + constants.GET_AMPHORAE_FROM_LB,
|
||||||
|
requires=constants.LOADBALANCER_ID,
|
||||||
|
provides=constants.AMPHORAE))
|
||||||
|
|
||||||
|
# Listeners update needs to be run on all amphora to update
|
||||||
|
# their peer configurations. So parallelize this with an
|
||||||
|
# unordered subflow.
|
||||||
|
update_amps_subflow = unordered_flow.Flow(
|
||||||
|
constants.UPDATE_AMPS_SUBFLOW)
|
||||||
|
|
||||||
|
# Setup parallel flows for each amp. We don't know the new amp
|
||||||
|
# details at flow creation time, so setup a subflow for each
|
||||||
|
# amp on the LB, they let the task index into a list of amps
|
||||||
|
# to find the amphora it should work on.
|
||||||
|
update_amps_subflow.add(
|
||||||
|
amphora_driver_tasks.AmphoraIndexListenerUpdate(
|
||||||
|
name=(constants.AMPHORA + '-0-' +
|
||||||
|
constants.AMP_LISTENER_UPDATE),
|
||||||
|
requires=(constants.LOADBALANCER, constants.AMPHORAE),
|
||||||
|
inject={constants.AMPHORA_INDEX: 0,
|
||||||
|
constants.TIMEOUT_DICT: timeout_dict}))
|
||||||
|
update_amps_subflow.add(
|
||||||
|
amphora_driver_tasks.AmphoraIndexListenerUpdate(
|
||||||
|
name=(constants.AMPHORA + '-1-' +
|
||||||
|
constants.AMP_LISTENER_UPDATE),
|
||||||
|
requires=(constants.LOADBALANCER, constants.AMPHORAE),
|
||||||
|
inject={constants.AMPHORA_INDEX: 1,
|
||||||
|
constants.TIMEOUT_DICT: timeout_dict}))
|
||||||
|
|
||||||
|
failover_LB_flow.add(update_amps_subflow)
|
||||||
|
|
||||||
|
# Configure and enable keepalived in the amphora
|
||||||
|
failover_LB_flow.add(self.amp_flows.get_vrrp_subflow(
|
||||||
|
new_amp_role + '-' + constants.GET_VRRP_SUBFLOW,
|
||||||
|
timeout_dict, create_vrrp_group=False))
|
||||||
|
|
||||||
|
# #### End of standby ####
|
||||||
|
|
||||||
|
# Reload the listener. This needs to be done here because
|
||||||
|
# it will create the required haproxy check scripts for
|
||||||
|
# the VRRP deployed above.
|
||||||
|
# A "V" or newer amphora-agent will remove the need for this
|
||||||
|
# task here.
|
||||||
|
# TODO(johnsom) Remove this in the "X" cycle
|
||||||
|
failover_LB_flow.add(
|
||||||
|
amphora_driver_tasks.AmphoraIndexListenersReload(
|
||||||
|
name=(new_amp_role + '-' +
|
||||||
|
constants.AMPHORA_RELOAD_LISTENER),
|
||||||
|
requires=(constants.LOADBALANCER, constants.AMPHORAE),
|
||||||
|
inject={constants.AMPHORA_INDEX: 1,
|
||||||
|
constants.TIMEOUT_DICT: timeout_dict}))
|
||||||
|
|
||||||
|
# Remove any extraneous ports
|
||||||
|
# Note: Nova sometimes fails to delete ports attached to an instance.
|
||||||
|
# For example, if you create an LB with a listener, then
|
||||||
|
# 'openstack server delete' the amphora, you will see the vrrp
|
||||||
|
# port attached to that instance will remain after the instance
|
||||||
|
# is deleted.
|
||||||
|
# TODO(johnsom) Fix this as part of
|
||||||
|
# https://storyboard.openstack.org/#!/story/2007077
|
||||||
|
|
||||||
|
# Mark LB ACTIVE
|
||||||
|
failover_LB_flow.add(
|
||||||
|
database_tasks.MarkLBActiveInDB(mark_subobjects=True,
|
||||||
|
requires=constants.LOADBALANCER))
|
||||||
|
|
||||||
|
return failover_LB_flow
|
||||||
|
@ -51,7 +51,26 @@ class BaseAmphoraTask(task.Task):
|
|||||||
class AmpListenersUpdate(BaseAmphoraTask):
|
class AmpListenersUpdate(BaseAmphoraTask):
|
||||||
"""Task to update the listeners on one amphora."""
|
"""Task to update the listeners on one amphora."""
|
||||||
|
|
||||||
def execute(self, loadbalancer, amphora_index, amphorae, timeout_dict=()):
|
def execute(self, loadbalancer, amphora, timeout_dict=None):
|
||||||
|
# Note, we don't want this to cause a revert as it may be used
|
||||||
|
# in a failover flow with both amps failing. Skip it and let
|
||||||
|
# health manager fix it.
|
||||||
|
try:
|
||||||
|
self.amphora_driver.update_amphora_listeners(
|
||||||
|
loadbalancer, amphora, timeout_dict)
|
||||||
|
except Exception as e:
|
||||||
|
LOG.error('Failed to update listeners on amphora %s. Skipping '
|
||||||
|
'this amphora as it is failing to update due to: %s',
|
||||||
|
amphora.id, str(e))
|
||||||
|
self.amphora_repo.update(db_apis.get_session(), amphora.id,
|
||||||
|
status=constants.ERROR)
|
||||||
|
|
||||||
|
|
||||||
|
class AmphoraIndexListenerUpdate(BaseAmphoraTask):
|
||||||
|
"""Task to update the listeners on one amphora."""
|
||||||
|
|
||||||
|
def execute(self, loadbalancer, amphora_index, amphorae,
|
||||||
|
timeout_dict=None):
|
||||||
# Note, we don't want this to cause a revert as it may be used
|
# Note, we don't want this to cause a revert as it may be used
|
||||||
# in a failover flow with both amps failing. Skip it and let
|
# in a failover flow with both amps failing. Skip it and let
|
||||||
# health manager fix it.
|
# health manager fix it.
|
||||||
@ -100,6 +119,24 @@ class ListenersStart(BaseAmphoraTask):
|
|||||||
self.task_utils.mark_listener_prov_status_error(listener.id)
|
self.task_utils.mark_listener_prov_status_error(listener.id)
|
||||||
|
|
||||||
|
|
||||||
|
class AmphoraIndexListenersReload(BaseAmphoraTask):
|
||||||
|
"""Task to reload all listeners on an amphora."""
|
||||||
|
|
||||||
|
def execute(self, loadbalancer, amphorae, amphora_index,
|
||||||
|
timeout_dict=None):
|
||||||
|
"""Execute listener reload routines for listeners on an amphora."""
|
||||||
|
if loadbalancer.listeners:
|
||||||
|
self.amphora_driver.reload(
|
||||||
|
loadbalancer, amphorae[amphora_index], timeout_dict)
|
||||||
|
|
||||||
|
def revert(self, loadbalancer, *args, **kwargs):
|
||||||
|
"""Handle failed listeners reloads."""
|
||||||
|
|
||||||
|
LOG.warning("Reverting listener reload.")
|
||||||
|
for listener in loadbalancer.listeners:
|
||||||
|
self.task_utils.mark_listener_prov_status_error(listener.id)
|
||||||
|
|
||||||
|
|
||||||
class ListenerDelete(BaseAmphoraTask):
|
class ListenerDelete(BaseAmphoraTask):
|
||||||
"""Task to delete the listener on the vip."""
|
"""Task to delete the listener on the vip."""
|
||||||
|
|
||||||
@ -174,7 +211,11 @@ class AmphoraePostNetworkPlug(BaseAmphoraTask):
|
|||||||
def execute(self, loadbalancer, added_ports):
|
def execute(self, loadbalancer, added_ports):
|
||||||
"""Execute post_network_plug routine."""
|
"""Execute post_network_plug routine."""
|
||||||
amp_post_plug = AmphoraPostNetworkPlug()
|
amp_post_plug = AmphoraPostNetworkPlug()
|
||||||
for amphora in loadbalancer.amphorae:
|
# We need to make sure we have the fresh list of amphora
|
||||||
|
amphorae = self.amphora_repo.get_all(
|
||||||
|
db_apis.get_session(), load_balancer_id=loadbalancer.id,
|
||||||
|
status=constants.AMPHORA_ALLOCATED)[0]
|
||||||
|
for amphora in amphorae:
|
||||||
if amphora.id in added_ports:
|
if amphora.id in added_ports:
|
||||||
amp_post_plug.execute(amphora, added_ports[amphora.id])
|
amp_post_plug.execute(amphora, added_ports[amphora.id])
|
||||||
|
|
||||||
@ -183,10 +224,11 @@ class AmphoraePostNetworkPlug(BaseAmphoraTask):
|
|||||||
if isinstance(result, failure.Failure):
|
if isinstance(result, failure.Failure):
|
||||||
return
|
return
|
||||||
LOG.warning("Reverting post network plug.")
|
LOG.warning("Reverting post network plug.")
|
||||||
for amphora in filter(
|
|
||||||
lambda amp: amp.status == constants.AMPHORA_ALLOCATED,
|
|
||||||
loadbalancer.amphorae):
|
|
||||||
|
|
||||||
|
amphorae = self.amphora_repo.get_all(
|
||||||
|
db_apis.get_session(), load_balancer_id=loadbalancer.id,
|
||||||
|
status=constants.AMPHORA_ALLOCATED)[0]
|
||||||
|
for amphora in amphorae:
|
||||||
self.task_utils.mark_amphora_status_error(amphora.id)
|
self.task_utils.mark_amphora_status_error(amphora.id)
|
||||||
|
|
||||||
|
|
||||||
@ -241,64 +283,97 @@ class AmphoraCertUpload(BaseAmphoraTask):
|
|||||||
class AmphoraUpdateVRRPInterface(BaseAmphoraTask):
|
class AmphoraUpdateVRRPInterface(BaseAmphoraTask):
|
||||||
"""Task to get and update the VRRP interface device name from amphora."""
|
"""Task to get and update the VRRP interface device name from amphora."""
|
||||||
|
|
||||||
def execute(self, loadbalancer):
|
def execute(self, amphora, timeout_dict=None):
|
||||||
"""Execute post_vip_routine."""
|
try:
|
||||||
amps = []
|
interface = self.amphora_driver.get_interface_from_ip(
|
||||||
timeout_dict = {
|
amphora, amphora.vrrp_ip, timeout_dict=timeout_dict)
|
||||||
constants.CONN_MAX_RETRIES:
|
except Exception as e:
|
||||||
CONF.haproxy_amphora.active_connection_max_retries,
|
# This can occur when an active/standby LB has no listener
|
||||||
constants.CONN_RETRY_INTERVAL:
|
LOG.error('Failed to get amphora VRRP interface on amphora '
|
||||||
CONF.haproxy_amphora.active_connection_rety_interval}
|
'%s. Skipping this amphora as it is failing due to: '
|
||||||
for amp in filter(
|
'%s', amphora.id, str(e))
|
||||||
lambda amp: amp.status == constants.AMPHORA_ALLOCATED,
|
self.amphora_repo.update(db_apis.get_session(), amphora.id,
|
||||||
loadbalancer.amphorae):
|
status=constants.ERROR)
|
||||||
|
return None
|
||||||
|
|
||||||
try:
|
self.amphora_repo.update(db_apis.get_session(), amphora.id,
|
||||||
interface = self.amphora_driver.get_vrrp_interface(
|
vrrp_interface=interface)
|
||||||
amp, timeout_dict=timeout_dict)
|
return interface
|
||||||
except Exception as e:
|
|
||||||
# This can occur when an active/standby LB has no listener
|
|
||||||
LOG.error('Failed to get amphora VRRP interface on amphora '
|
|
||||||
'%s. Skipping this amphora as it is failing due to: '
|
|
||||||
'%s', amp.id, str(e))
|
|
||||||
self.amphora_repo.update(db_apis.get_session(), amp.id,
|
|
||||||
status=constants.ERROR)
|
|
||||||
continue
|
|
||||||
|
|
||||||
self.amphora_repo.update(db_apis.get_session(), amp.id,
|
|
||||||
vrrp_interface=interface)
|
|
||||||
amps.append(self.amphora_repo.get(db_apis.get_session(),
|
|
||||||
id=amp.id))
|
|
||||||
loadbalancer.amphorae = amps
|
|
||||||
return loadbalancer
|
|
||||||
|
|
||||||
def revert(self, result, loadbalancer, *args, **kwargs):
|
class AmphoraIndexUpdateVRRPInterface(BaseAmphoraTask):
|
||||||
"""Handle a failed amphora vip plug notification."""
|
"""Task to get and update the VRRP interface device name from amphora."""
|
||||||
if isinstance(result, failure.Failure):
|
|
||||||
return
|
|
||||||
LOG.warning("Reverting Get Amphora VRRP Interface.")
|
|
||||||
for amp in filter(
|
|
||||||
lambda amp: amp.status == constants.AMPHORA_ALLOCATED,
|
|
||||||
loadbalancer.amphorae):
|
|
||||||
|
|
||||||
try:
|
def execute(self, amphorae, amphora_index, timeout_dict=None):
|
||||||
self.amphora_repo.update(db_apis.get_session(), amp.id,
|
amphora_id = amphorae[amphora_index].id
|
||||||
vrrp_interface=None)
|
try:
|
||||||
except Exception as e:
|
interface = self.amphora_driver.get_interface_from_ip(
|
||||||
LOG.error("Failed to update amphora %(amp)s "
|
amphorae[amphora_index], amphorae[amphora_index].vrrp_ip,
|
||||||
"VRRP interface to None due to: %(except)s",
|
timeout_dict=timeout_dict)
|
||||||
{'amp': amp.id, 'except': e})
|
except Exception as e:
|
||||||
|
# This can occur when an active/standby LB has no listener
|
||||||
|
LOG.error('Failed to get amphora VRRP interface on amphora '
|
||||||
|
'%s. Skipping this amphora as it is failing due to: '
|
||||||
|
'%s', amphora_id, str(e))
|
||||||
|
self.amphora_repo.update(db_apis.get_session(), amphora_id,
|
||||||
|
status=constants.ERROR)
|
||||||
|
return None
|
||||||
|
|
||||||
|
self.amphora_repo.update(db_apis.get_session(), amphora_id,
|
||||||
|
vrrp_interface=interface)
|
||||||
|
return interface
|
||||||
|
|
||||||
|
|
||||||
class AmphoraVRRPUpdate(BaseAmphoraTask):
|
class AmphoraVRRPUpdate(BaseAmphoraTask):
|
||||||
"""Task to update the VRRP configuration of the loadbalancer amphorae."""
|
"""Task to update the VRRP configuration of an amphora."""
|
||||||
|
|
||||||
def execute(self, loadbalancer, amphorae_network_config):
|
def execute(self, loadbalancer_id, amphorae_network_config, amphora,
|
||||||
|
amp_vrrp_int, timeout_dict=None):
|
||||||
"""Execute update_vrrp_conf."""
|
"""Execute update_vrrp_conf."""
|
||||||
self.amphora_driver.update_vrrp_conf(loadbalancer,
|
loadbalancer = self.loadbalancer_repo.get(db_apis.get_session(),
|
||||||
amphorae_network_config)
|
id=loadbalancer_id)
|
||||||
LOG.debug("Uploaded VRRP configuration of loadbalancer %s amphorae",
|
# Note, we don't want this to cause a revert as it may be used
|
||||||
loadbalancer.id)
|
# in a failover flow with both amps failing. Skip it and let
|
||||||
|
# health manager fix it.
|
||||||
|
amphora.vrrp_interface = amp_vrrp_int
|
||||||
|
try:
|
||||||
|
self.amphora_driver.update_vrrp_conf(
|
||||||
|
loadbalancer, amphorae_network_config, amphora, timeout_dict)
|
||||||
|
except Exception as e:
|
||||||
|
LOG.error('Failed to update VRRP configuration amphora %s. '
|
||||||
|
'Skipping this amphora as it is failing to update due '
|
||||||
|
'to: %s', amphora.id, str(e))
|
||||||
|
self.amphora_repo.update(db_apis.get_session(), amphora.id,
|
||||||
|
status=constants.ERROR)
|
||||||
|
|
||||||
|
LOG.debug("Uploaded VRRP configuration of amphora %s.", amphora.id)
|
||||||
|
|
||||||
|
|
||||||
|
class AmphoraIndexVRRPUpdate(BaseAmphoraTask):
|
||||||
|
"""Task to update the VRRP configuration of an amphora."""
|
||||||
|
|
||||||
|
def execute(self, loadbalancer_id, amphorae_network_config, amphora_index,
|
||||||
|
amphorae, amp_vrrp_int, timeout_dict=None):
|
||||||
|
"""Execute update_vrrp_conf."""
|
||||||
|
loadbalancer = self.loadbalancer_repo.get(db_apis.get_session(),
|
||||||
|
id=loadbalancer_id)
|
||||||
|
# Note, we don't want this to cause a revert as it may be used
|
||||||
|
# in a failover flow with both amps failing. Skip it and let
|
||||||
|
# health manager fix it.
|
||||||
|
amphora_id = amphorae[amphora_index].id
|
||||||
|
amphorae[amphora_index].vrrp_interface = amp_vrrp_int
|
||||||
|
try:
|
||||||
|
self.amphora_driver.update_vrrp_conf(
|
||||||
|
loadbalancer, amphorae_network_config, amphorae[amphora_index],
|
||||||
|
timeout_dict)
|
||||||
|
except Exception as e:
|
||||||
|
LOG.error('Failed to update VRRP configuration amphora %s. '
|
||||||
|
'Skipping this amphora as it is failing to update due '
|
||||||
|
'to: %s', amphora_id, str(e))
|
||||||
|
self.amphora_repo.update(db_apis.get_session(), amphora_id,
|
||||||
|
status=constants.ERROR)
|
||||||
|
|
||||||
|
LOG.debug("Uploaded VRRP configuration of amphora %s.", amphora_id)
|
||||||
|
|
||||||
|
|
||||||
class AmphoraVRRPStop(BaseAmphoraTask):
|
class AmphoraVRRPStop(BaseAmphoraTask):
|
||||||
@ -311,12 +386,26 @@ class AmphoraVRRPStop(BaseAmphoraTask):
|
|||||||
|
|
||||||
|
|
||||||
class AmphoraVRRPStart(BaseAmphoraTask):
|
class AmphoraVRRPStart(BaseAmphoraTask):
|
||||||
"""Task to start keepalived of all amphorae of a LB."""
|
"""Task to start keepalived on an amphora.
|
||||||
|
|
||||||
def execute(self, loadbalancer):
|
This will reload keepalived if it is already running.
|
||||||
self.amphora_driver.start_vrrp_service(loadbalancer)
|
"""
|
||||||
LOG.debug("Started VRRP of loadbalancer %s amphorae",
|
|
||||||
loadbalancer.id)
|
def execute(self, amphora, timeout_dict=None):
|
||||||
|
self.amphora_driver.start_vrrp_service(amphora, timeout_dict)
|
||||||
|
LOG.debug("Started VRRP on amphora %s.", amphora.id)
|
||||||
|
|
||||||
|
|
||||||
|
class AmphoraIndexVRRPStart(BaseAmphoraTask):
|
||||||
|
"""Task to start keepalived on an amphora.
|
||||||
|
|
||||||
|
This will reload keepalived if it is already running.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def execute(self, amphora_index, amphorae, timeout_dict=None):
|
||||||
|
self.amphora_driver.start_vrrp_service(amphorae[amphora_index],
|
||||||
|
timeout_dict)
|
||||||
|
LOG.debug("Started VRRP on amphora %s.", amphorae[amphora_index].id)
|
||||||
|
|
||||||
|
|
||||||
class AmphoraComputeConnectivityWait(BaseAmphoraTask):
|
class AmphoraComputeConnectivityWait(BaseAmphoraTask):
|
||||||
|
@ -21,6 +21,7 @@ from oslo_log import log as logging
|
|||||||
from stevedore import driver as stevedore_driver
|
from stevedore import driver as stevedore_driver
|
||||||
from taskflow import task
|
from taskflow import task
|
||||||
from taskflow.types import failure
|
from taskflow.types import failure
|
||||||
|
import tenacity
|
||||||
|
|
||||||
from octavia.amphorae.backends.agent import agent_jinja_cfg
|
from octavia.amphorae.backends.agent import agent_jinja_cfg
|
||||||
from octavia.common import constants
|
from octavia.common import constants
|
||||||
@ -50,10 +51,9 @@ class BaseComputeTask(task.Task):
|
|||||||
class ComputeCreate(BaseComputeTask):
|
class ComputeCreate(BaseComputeTask):
|
||||||
"""Create the compute instance for a new amphora."""
|
"""Create the compute instance for a new amphora."""
|
||||||
|
|
||||||
def execute(self, amphora_id, config_drive_files=None,
|
def execute(self, amphora_id, server_group_id, config_drive_files=None,
|
||||||
build_type_priority=constants.LB_CREATE_NORMAL_PRIORITY,
|
build_type_priority=constants.LB_CREATE_NORMAL_PRIORITY,
|
||||||
server_group_id=None, ports=None, flavor=None,
|
ports=None, flavor=None, availability_zone=None):
|
||||||
availability_zone=None):
|
|
||||||
"""Create an amphora
|
"""Create an amphora
|
||||||
|
|
||||||
:returns: an amphora
|
:returns: an amphora
|
||||||
@ -147,10 +147,9 @@ class ComputeCreate(BaseComputeTask):
|
|||||||
|
|
||||||
|
|
||||||
class CertComputeCreate(ComputeCreate):
|
class CertComputeCreate(ComputeCreate):
|
||||||
def execute(self, amphora_id, server_pem,
|
def execute(self, amphora_id, server_pem, server_group_id,
|
||||||
build_type_priority=constants.LB_CREATE_NORMAL_PRIORITY,
|
build_type_priority=constants.LB_CREATE_NORMAL_PRIORITY,
|
||||||
server_group_id=None, ports=None, flavor=None,
|
ports=None, flavor=None, availability_zone=None):
|
||||||
availability_zone=None):
|
|
||||||
"""Create an amphora
|
"""Create an amphora
|
||||||
|
|
||||||
:returns: an amphora
|
:returns: an amphora
|
||||||
@ -190,15 +189,50 @@ class DeleteAmphoraeOnLoadBalancer(BaseComputeTask):
|
|||||||
|
|
||||||
|
|
||||||
class ComputeDelete(BaseComputeTask):
|
class ComputeDelete(BaseComputeTask):
|
||||||
def execute(self, amphora):
|
|
||||||
LOG.debug("Compute Delete execute for amphora with id %s", amphora.id)
|
@tenacity.retry(retry=tenacity.retry_if_exception_type(),
|
||||||
|
stop=tenacity.stop_after_attempt(CONF.compute.max_retries),
|
||||||
|
wait=tenacity.wait_exponential(
|
||||||
|
multiplier=CONF.compute.retry_backoff,
|
||||||
|
min=CONF.compute.retry_interval,
|
||||||
|
max=CONF.compute.retry_max), reraise=True)
|
||||||
|
def execute(self, amphora, passive_failure=False):
|
||||||
|
if self.execute.retry.statistics.get(constants.ATTEMPT_NUMBER, 1) == 1:
|
||||||
|
LOG.debug('Compute delete execute for amphora with ID %s and '
|
||||||
|
'compute ID: %s', amphora.id, amphora.compute_id)
|
||||||
|
else:
|
||||||
|
LOG.warning('Retrying compute delete of %s attempt %s of %s.',
|
||||||
|
amphora.compute_id,
|
||||||
|
self.execute.retry.statistics[
|
||||||
|
constants.ATTEMPT_NUMBER],
|
||||||
|
self.execute.retry.stop.max_attempt_number)
|
||||||
|
# Let the Taskflow engine know we are working and alive
|
||||||
|
# Don't use get with a default for 'attempt_number', we need to fail
|
||||||
|
# if that number is missing.
|
||||||
|
self.update_progress(
|
||||||
|
self.execute.retry.statistics[constants.ATTEMPT_NUMBER] /
|
||||||
|
self.execute.retry.stop.max_attempt_number)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.compute.delete(amphora.compute_id)
|
self.compute.delete(amphora.compute_id)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception("Compute delete for amphora id: %s failed",
|
if (self.execute.retry.statistics[constants.ATTEMPT_NUMBER] !=
|
||||||
amphora.id)
|
self.execute.retry.stop.max_attempt_number):
|
||||||
raise
|
LOG.warning('Compute delete for amphora id: %s failed. '
|
||||||
|
'Retrying.', amphora.id)
|
||||||
|
raise
|
||||||
|
if passive_failure:
|
||||||
|
LOG.exception('Compute delete for compute ID: %s on amphora '
|
||||||
|
'ID: %s failed. This resource will be abandoned '
|
||||||
|
'and should manually be cleaned up once the '
|
||||||
|
'compute service is functional.',
|
||||||
|
amphora.compute_id, amphora.id)
|
||||||
|
else:
|
||||||
|
LOG.exception('Compute delete for compute ID: %s on amphora '
|
||||||
|
'ID: %s failed. The compute service has failed. '
|
||||||
|
'Aborting and reverting.', amphora.compute_id,
|
||||||
|
amphora.id)
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
class ComputeActiveWait(BaseComputeTask):
|
class ComputeActiveWait(BaseComputeTask):
|
||||||
@ -267,3 +301,31 @@ class NovaServerGroupDelete(BaseComputeTask):
|
|||||||
self.compute.delete_server_group(server_group_id)
|
self.compute.delete_server_group(server_group_id)
|
||||||
else:
|
else:
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|
||||||
|
class AttachPort(BaseComputeTask):
|
||||||
|
def execute(self, amphora, port):
|
||||||
|
"""Attach a port to an amphora instance.
|
||||||
|
|
||||||
|
:param amphora: The amphora to attach the port to.
|
||||||
|
:param port: The port to attach to the amphora.
|
||||||
|
:returns: None
|
||||||
|
"""
|
||||||
|
LOG.debug('Attaching port: %s to compute: %s',
|
||||||
|
port.id, amphora.compute_id)
|
||||||
|
self.compute.attach_network_or_port(amphora.compute_id,
|
||||||
|
port_id=port.id)
|
||||||
|
|
||||||
|
def revert(self, amphora, port, *args, **kwargs):
|
||||||
|
"""Revert our port attach.
|
||||||
|
|
||||||
|
:param amphora: The amphora to detach the port from.
|
||||||
|
:param port: The port to attach to the amphora.
|
||||||
|
"""
|
||||||
|
LOG.warning('Reverting port: %s attach to compute: %s',
|
||||||
|
port.id, amphora.compute_id)
|
||||||
|
try:
|
||||||
|
self.compute.detach_port(amphora.compute_id, port.id)
|
||||||
|
except Exception as e:
|
||||||
|
LOG.error('Failed to detach port %s from compute %s for revert '
|
||||||
|
'due to %s.', port.id, amphora.compute_id, str(e))
|
||||||
|
@ -449,20 +449,21 @@ class UpdateAmphoraVIPData(BaseDatabaseTask):
|
|||||||
class UpdateAmpFailoverDetails(BaseDatabaseTask):
|
class UpdateAmpFailoverDetails(BaseDatabaseTask):
|
||||||
"""Update amphora failover details in the database."""
|
"""Update amphora failover details in the database."""
|
||||||
|
|
||||||
def execute(self, amphora, amp_data):
|
def execute(self, amphora, vip, base_port):
|
||||||
"""Update amphora failover details in the database.
|
"""Update amphora failover details in the database.
|
||||||
|
|
||||||
:param amphora: The amphora to update
|
:param amphora: The amphora to update
|
||||||
:param amp_data: data_models.Amphora object with update data
|
:param vip: The VIP object associated with this amphora.
|
||||||
|
:param base_port: The base port object associated with the amphora.
|
||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
# role and vrrp_priority will be updated later.
|
# role and vrrp_priority will be updated later.
|
||||||
self.repos.amphora.update(db_apis.get_session(), amphora.id,
|
self.repos.amphora.update(db_apis.get_session(), amphora.id,
|
||||||
vrrp_ip=amp_data.vrrp_ip,
|
vrrp_ip=base_port.fixed_ips[0].ip_address,
|
||||||
ha_ip=amp_data.ha_ip,
|
ha_ip=vip.ip_address,
|
||||||
vrrp_port_id=amp_data.vrrp_port_id,
|
vrrp_port_id=base_port.id,
|
||||||
ha_port_id=amp_data.ha_port_id,
|
ha_port_id=vip.port_id,
|
||||||
vrrp_id=amp_data.vrrp_id)
|
vrrp_id=1)
|
||||||
|
|
||||||
|
|
||||||
class AssociateFailoverAmphoraWithLBID(BaseDatabaseTask):
|
class AssociateFailoverAmphoraWithLBID(BaseDatabaseTask):
|
||||||
@ -1544,15 +1545,17 @@ class GetAmphoraDetails(BaseDatabaseTask):
|
|||||||
|
|
||||||
|
|
||||||
class GetAmphoraeFromLoadbalancer(BaseDatabaseTask):
|
class GetAmphoraeFromLoadbalancer(BaseDatabaseTask):
|
||||||
"""Task to pull the listeners from a loadbalancer."""
|
"""Task to pull the amphorae from a loadbalancer."""
|
||||||
|
|
||||||
def execute(self, loadbalancer):
|
def execute(self, loadbalancer_id):
|
||||||
"""Pull the amphorae from a loadbalancer.
|
"""Pull the amphorae from a loadbalancer.
|
||||||
|
|
||||||
:param loadbalancer: Load balancer which listeners are required
|
:param loadbalancer_id: Load balancer ID to get amphorae from
|
||||||
:returns: A list of Listener objects
|
:returns: A list of Listener objects
|
||||||
"""
|
"""
|
||||||
amphorae = []
|
amphorae = []
|
||||||
|
loadbalancer = self.loadbalancer_repo.get(db_apis.get_session(),
|
||||||
|
id=loadbalancer_id)
|
||||||
for amp in loadbalancer.amphorae:
|
for amp in loadbalancer.amphorae:
|
||||||
a = self.amphora_repo.get(db_apis.get_session(), id=amp.id,
|
a = self.amphora_repo.get(db_apis.get_session(), id=amp.id,
|
||||||
show_deleted=False)
|
show_deleted=False)
|
||||||
@ -1579,6 +1582,22 @@ class GetListenersFromLoadbalancer(BaseDatabaseTask):
|
|||||||
return listeners
|
return listeners
|
||||||
|
|
||||||
|
|
||||||
|
class GetLoadBalancer(BaseDatabaseTask):
|
||||||
|
"""Get an load balancer object from the database."""
|
||||||
|
|
||||||
|
def execute(self, loadbalancer_id, *args, **kwargs):
|
||||||
|
"""Get an load balancer object from the database.
|
||||||
|
|
||||||
|
:param loadbalancer_id: The load balancer ID to lookup
|
||||||
|
:returns: The load balancer object
|
||||||
|
"""
|
||||||
|
|
||||||
|
LOG.debug("Get load balancer from DB for load balancer id: %s",
|
||||||
|
loadbalancer_id)
|
||||||
|
return self.loadbalancer_repo.get(db_apis.get_session(),
|
||||||
|
id=loadbalancer_id)
|
||||||
|
|
||||||
|
|
||||||
class GetVipFromLoadbalancer(BaseDatabaseTask):
|
class GetVipFromLoadbalancer(BaseDatabaseTask):
|
||||||
"""Task to pull the vip from a loadbalancer."""
|
"""Task to pull the vip from a loadbalancer."""
|
||||||
|
|
||||||
@ -1594,25 +1613,23 @@ class GetVipFromLoadbalancer(BaseDatabaseTask):
|
|||||||
class CreateVRRPGroupForLB(BaseDatabaseTask):
|
class CreateVRRPGroupForLB(BaseDatabaseTask):
|
||||||
"""Create a VRRP group for a load balancer."""
|
"""Create a VRRP group for a load balancer."""
|
||||||
|
|
||||||
def execute(self, loadbalancer):
|
def execute(self, loadbalancer_id):
|
||||||
"""Create a VRRP group for a load balancer.
|
"""Create a VRRP group for a load balancer.
|
||||||
|
|
||||||
:param loadbalancer: Load balancer for which a VRRP group
|
:param loadbalancer_id: Load balancer ID for which a VRRP group
|
||||||
should be created
|
should be created
|
||||||
:returns: Updated load balancer
|
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
loadbalancer.vrrp_group = self.repos.vrrpgroup.create(
|
self.repos.vrrpgroup.create(
|
||||||
db_apis.get_session(),
|
db_apis.get_session(),
|
||||||
load_balancer_id=loadbalancer.id,
|
load_balancer_id=loadbalancer_id,
|
||||||
vrrp_group_name=str(loadbalancer.id).replace('-', ''),
|
vrrp_group_name=str(loadbalancer_id).replace('-', ''),
|
||||||
vrrp_auth_type=constants.VRRP_AUTH_DEFAULT,
|
vrrp_auth_type=constants.VRRP_AUTH_DEFAULT,
|
||||||
vrrp_auth_pass=uuidutils.generate_uuid().replace('-', '')[0:7],
|
vrrp_auth_pass=uuidutils.generate_uuid().replace('-', '')[0:7],
|
||||||
advert_int=CONF.keepalived_vrrp.vrrp_advert_int)
|
advert_int=CONF.keepalived_vrrp.vrrp_advert_int)
|
||||||
except odb_exceptions.DBDuplicateEntry:
|
except odb_exceptions.DBDuplicateEntry:
|
||||||
LOG.debug('VRRP_GROUP entry already exists for load balancer, '
|
LOG.debug('VRRP_GROUP entry already exists for load balancer, '
|
||||||
'skipping create.')
|
'skipping create.')
|
||||||
return loadbalancer
|
|
||||||
|
|
||||||
|
|
||||||
class DisableAmphoraHealthMonitoring(BaseDatabaseTask):
|
class DisableAmphoraHealthMonitoring(BaseDatabaseTask):
|
||||||
|
@ -12,15 +12,20 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
#
|
#
|
||||||
|
import time
|
||||||
|
|
||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
from oslo_utils import excutils
|
||||||
from taskflow import task
|
from taskflow import task
|
||||||
from taskflow.types import failure
|
from taskflow.types import failure
|
||||||
|
import tenacity
|
||||||
|
|
||||||
from octavia.common import constants
|
from octavia.common import constants
|
||||||
from octavia.common import utils
|
from octavia.common import utils
|
||||||
from octavia.controller.worker import task_utils
|
from octavia.controller.worker import task_utils
|
||||||
|
from octavia.db import api as db_apis
|
||||||
|
from octavia.db import repositories
|
||||||
from octavia.network import base
|
from octavia.network import base
|
||||||
from octavia.network import data_models as n_data_models
|
from octavia.network import data_models as n_data_models
|
||||||
|
|
||||||
@ -35,6 +40,7 @@ class BaseNetworkTask(task.Task):
|
|||||||
super(BaseNetworkTask, self).__init__(**kwargs)
|
super(BaseNetworkTask, self).__init__(**kwargs)
|
||||||
self._network_driver = None
|
self._network_driver = None
|
||||||
self.task_utils = task_utils.TaskUtils()
|
self.task_utils = task_utils.TaskUtils()
|
||||||
|
self.lb_repo = repositories.LoadBalancerRepository()
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def network_driver(self):
|
def network_driver(self):
|
||||||
@ -47,12 +53,12 @@ class CalculateAmphoraDelta(BaseNetworkTask):
|
|||||||
|
|
||||||
default_provides = constants.DELTA
|
default_provides = constants.DELTA
|
||||||
|
|
||||||
def execute(self, loadbalancer, amphora, availability_zone):
|
def execute(self, loadbalancer, amphora, availability_zone,
|
||||||
|
vrrp_port=None):
|
||||||
LOG.debug("Calculating network delta for amphora id: %s", amphora.id)
|
LOG.debug("Calculating network delta for amphora id: %s", amphora.id)
|
||||||
|
|
||||||
# Figure out what networks we want
|
if vrrp_port is None:
|
||||||
# seed with lb network(s)
|
vrrp_port = self.network_driver.get_port(amphora.vrrp_port_id)
|
||||||
vrrp_port = self.network_driver.get_port(amphora.vrrp_port_id)
|
|
||||||
if availability_zone:
|
if availability_zone:
|
||||||
management_nets = (
|
management_nets = (
|
||||||
[availability_zone.get(constants.MANAGEMENT_NETWORK)] or
|
[availability_zone.get(constants.MANAGEMENT_NETWORK)] or
|
||||||
@ -361,12 +367,19 @@ class PlugVIP(BaseNetworkTask):
|
|||||||
class UpdateVIPSecurityGroup(BaseNetworkTask):
|
class UpdateVIPSecurityGroup(BaseNetworkTask):
|
||||||
"""Task to setup SG for LB."""
|
"""Task to setup SG for LB."""
|
||||||
|
|
||||||
def execute(self, loadbalancer):
|
def execute(self, loadbalancer_id):
|
||||||
"""Task to setup SG for LB."""
|
"""Task to setup SG for LB.
|
||||||
|
|
||||||
LOG.debug("Setup SG for loadbalancer id: %s", loadbalancer.id)
|
Task is idempotent and safe to retry.
|
||||||
|
"""
|
||||||
|
|
||||||
self.network_driver.update_vip_sg(loadbalancer, loadbalancer.vip)
|
LOG.debug("Setup SG for loadbalancer id: %s", loadbalancer_id)
|
||||||
|
|
||||||
|
loadbalancer = self.lb_repo.get(db_apis.get_session(),
|
||||||
|
id=loadbalancer_id)
|
||||||
|
|
||||||
|
return self.network_driver.update_vip_sg(loadbalancer,
|
||||||
|
loadbalancer.vip)
|
||||||
|
|
||||||
|
|
||||||
class GetSubnetFromVIP(BaseNetworkTask):
|
class GetSubnetFromVIP(BaseNetworkTask):
|
||||||
@ -500,11 +513,26 @@ class GetAmphoraNetworkConfigs(BaseNetworkTask):
|
|||||||
amphora=amphora)
|
amphora=amphora)
|
||||||
|
|
||||||
|
|
||||||
|
class GetAmphoraNetworkConfigsByID(BaseNetworkTask):
|
||||||
|
"""Task to retrieve amphora network details."""
|
||||||
|
|
||||||
|
def execute(self, loadbalancer_id, amphora_id=None):
|
||||||
|
LOG.debug("Retrieving vip network details.")
|
||||||
|
amp_repo = repositories.AmphoraRepository()
|
||||||
|
loadbalancer = self.lb_repo.get(db_apis.get_session(),
|
||||||
|
id=loadbalancer_id)
|
||||||
|
amphora = amp_repo.get(db_apis.get_session(), id=amphora_id)
|
||||||
|
return self.network_driver.get_network_configs(loadbalancer,
|
||||||
|
amphora=amphora)
|
||||||
|
|
||||||
|
|
||||||
class GetAmphoraeNetworkConfigs(BaseNetworkTask):
|
class GetAmphoraeNetworkConfigs(BaseNetworkTask):
|
||||||
"""Task to retrieve amphorae network details."""
|
"""Task to retrieve amphorae network details."""
|
||||||
|
|
||||||
def execute(self, loadbalancer):
|
def execute(self, loadbalancer_id):
|
||||||
LOG.debug("Retrieving vip network details.")
|
LOG.debug("Retrieving vip network details.")
|
||||||
|
loadbalancer = self.lb_repo.get(db_apis.get_session(),
|
||||||
|
id=loadbalancer_id)
|
||||||
return self.network_driver.get_network_configs(loadbalancer)
|
return self.network_driver.get_network_configs(loadbalancer)
|
||||||
|
|
||||||
|
|
||||||
@ -553,36 +581,6 @@ class PlugPorts(BaseNetworkTask):
|
|||||||
self.network_driver.plug_port(amphora, port)
|
self.network_driver.plug_port(amphora, port)
|
||||||
|
|
||||||
|
|
||||||
class PlugVIPPort(BaseNetworkTask):
|
|
||||||
"""Task to plug a VIP into a compute instance."""
|
|
||||||
|
|
||||||
def execute(self, amphora, amphorae_network_config):
|
|
||||||
vrrp_port = amphorae_network_config.get(amphora.id).vrrp_port
|
|
||||||
LOG.debug('Plugging VIP VRRP port ID: %(port_id)s into compute '
|
|
||||||
'instance: %(compute_id)s.',
|
|
||||||
{'port_id': vrrp_port.id, 'compute_id': amphora.compute_id})
|
|
||||||
self.network_driver.plug_port(amphora, vrrp_port)
|
|
||||||
|
|
||||||
def revert(self, result, amphora, amphorae_network_config,
|
|
||||||
*args, **kwargs):
|
|
||||||
vrrp_port = None
|
|
||||||
try:
|
|
||||||
vrrp_port = amphorae_network_config.get(amphora.id).vrrp_port
|
|
||||||
self.network_driver.unplug_port(amphora, vrrp_port)
|
|
||||||
except Exception:
|
|
||||||
LOG.warning('Failed to unplug vrrp port: %(port)s from amphora: '
|
|
||||||
'%(amp)s', {'port': vrrp_port.id, 'amp': amphora.id})
|
|
||||||
|
|
||||||
|
|
||||||
class WaitForPortDetach(BaseNetworkTask):
|
|
||||||
"""Task to wait for the neutron ports to detach from an amphora."""
|
|
||||||
|
|
||||||
def execute(self, amphora):
|
|
||||||
LOG.debug('Waiting for ports to detach from amphora: %(amp_id)s.',
|
|
||||||
{'amp_id': amphora.id})
|
|
||||||
self.network_driver.wait_for_port_detach(amphora)
|
|
||||||
|
|
||||||
|
|
||||||
class ApplyQos(BaseNetworkTask):
|
class ApplyQos(BaseNetworkTask):
|
||||||
"""Apply Quality of Services to the VIP"""
|
"""Apply Quality of Services to the VIP"""
|
||||||
|
|
||||||
@ -664,3 +662,146 @@ class ApplyQosAmphora(BaseNetworkTask):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error('Failed to remove QoS policy: %s from port: %s due '
|
LOG.error('Failed to remove QoS policy: %s from port: %s due '
|
||||||
'to error: %s', orig_qos_id, amp_data.vrrp_port_id, e)
|
'to error: %s', orig_qos_id, amp_data.vrrp_port_id, e)
|
||||||
|
|
||||||
|
|
||||||
|
class DeletePort(BaseNetworkTask):
|
||||||
|
"""Task to delete a network port."""
|
||||||
|
|
||||||
|
@tenacity.retry(retry=tenacity.retry_if_exception_type(),
|
||||||
|
stop=tenacity.stop_after_attempt(
|
||||||
|
CONF.networking.max_retries),
|
||||||
|
wait=tenacity.wait_exponential(
|
||||||
|
multiplier=CONF.networking.retry_backoff,
|
||||||
|
min=CONF.networking.retry_interval,
|
||||||
|
max=CONF.networking.retry_max), reraise=True)
|
||||||
|
def execute(self, port_id, passive_failure=False):
|
||||||
|
"""Delete the network port."""
|
||||||
|
if port_id is None:
|
||||||
|
return
|
||||||
|
if self.execute.retry.statistics.get(constants.ATTEMPT_NUMBER, 1) == 1:
|
||||||
|
LOG.debug("Deleting network port %s", port_id)
|
||||||
|
else:
|
||||||
|
LOG.warning('Retrying network port %s delete attempt %s of %s.',
|
||||||
|
port_id,
|
||||||
|
self.execute.retry.statistics[
|
||||||
|
constants.ATTEMPT_NUMBER],
|
||||||
|
self.execute.retry.stop.max_attempt_number)
|
||||||
|
# Let the Taskflow engine know we are working and alive
|
||||||
|
# Don't use get with a default for 'attempt_number', we need to fail
|
||||||
|
# if that number is missing.
|
||||||
|
self.update_progress(
|
||||||
|
self.execute.retry.statistics[constants.ATTEMPT_NUMBER] /
|
||||||
|
self.execute.retry.stop.max_attempt_number)
|
||||||
|
try:
|
||||||
|
self.network_driver.delete_port(port_id)
|
||||||
|
except Exception:
|
||||||
|
if (self.execute.retry.statistics[constants.ATTEMPT_NUMBER] !=
|
||||||
|
self.execute.retry.stop.max_attempt_number):
|
||||||
|
LOG.warning('Network port delete for port id: %s failed. '
|
||||||
|
'Retrying.', port_id)
|
||||||
|
raise
|
||||||
|
if passive_failure:
|
||||||
|
LOG.exception('Network port delete for port ID: %s failed. '
|
||||||
|
'This resource will be abandoned and should '
|
||||||
|
'manually be cleaned up once the '
|
||||||
|
'network service is functional.', port_id)
|
||||||
|
# Let's at least attempt to disable it so if the instance
|
||||||
|
# comes back from the dead it doesn't conflict with anything.
|
||||||
|
try:
|
||||||
|
self.network_driver.admin_down_port(port_id)
|
||||||
|
LOG.info('Successfully disabled (admin down) network port '
|
||||||
|
'%s that failed to delete.', port_id)
|
||||||
|
except Exception:
|
||||||
|
LOG.warning('Attempt to disable (admin down) network port '
|
||||||
|
'%s failed. The network service has failed. '
|
||||||
|
'Continuing.', port_id)
|
||||||
|
else:
|
||||||
|
LOG.exception('Network port delete for port ID: %s failed. '
|
||||||
|
'The network service has failed. '
|
||||||
|
'Aborting and reverting.', port_id)
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
class CreateVIPBasePort(BaseNetworkTask):
|
||||||
|
"""Task to create the VIP base port for an amphora."""
|
||||||
|
|
||||||
|
@tenacity.retry(retry=tenacity.retry_if_exception_type(),
|
||||||
|
stop=tenacity.stop_after_attempt(
|
||||||
|
CONF.networking.max_retries),
|
||||||
|
wait=tenacity.wait_exponential(
|
||||||
|
multiplier=CONF.networking.retry_backoff,
|
||||||
|
min=CONF.networking.retry_interval,
|
||||||
|
max=CONF.networking.retry_max), reraise=True)
|
||||||
|
def execute(self, vip, vip_sg_id, amphora_id):
|
||||||
|
port_name = constants.AMP_BASE_PORT_PREFIX + amphora_id
|
||||||
|
fixed_ips = [{constants.SUBNET_ID: vip.subnet_id}]
|
||||||
|
sg_id = []
|
||||||
|
if vip_sg_id:
|
||||||
|
sg_id = [vip_sg_id]
|
||||||
|
port = self.network_driver.create_port(
|
||||||
|
vip.network_id, name=port_name, fixed_ips=fixed_ips,
|
||||||
|
secondary_ips=[vip.ip_address], security_group_ids=sg_id,
|
||||||
|
qos_policy_id=vip.qos_policy_id)
|
||||||
|
LOG.info('Created port %s with ID %s for amphora %s',
|
||||||
|
port_name, port.id, amphora_id)
|
||||||
|
return port
|
||||||
|
|
||||||
|
def revert(self, result, vip, vip_sg_id, amphora_id, *args, **kwargs):
|
||||||
|
if isinstance(result, failure.Failure):
|
||||||
|
return
|
||||||
|
try:
|
||||||
|
port_name = constants.AMP_BASE_PORT_PREFIX + amphora_id
|
||||||
|
for port in result:
|
||||||
|
self.network_driver.delete_port(port.id)
|
||||||
|
LOG.info('Deleted port %s with ID %s for amphora %s due to a '
|
||||||
|
'revert.', port_name, port.id, amphora_id)
|
||||||
|
except Exception as e:
|
||||||
|
LOG.error('Failed to delete port %s. Resources may still be in '
|
||||||
|
'use for a port intended for amphora %s due to error '
|
||||||
|
'%s. Search for a port named %s',
|
||||||
|
result, amphora_id, str(e), port_name)
|
||||||
|
|
||||||
|
|
||||||
|
class AdminDownPort(BaseNetworkTask):
|
||||||
|
|
||||||
|
def execute(self, port_id):
|
||||||
|
try:
|
||||||
|
self.network_driver.set_port_admin_state_up(port_id, False)
|
||||||
|
except base.PortNotFound:
|
||||||
|
return
|
||||||
|
for i in range(CONF.networking.max_retries):
|
||||||
|
port = self.network_driver.get_port(port_id)
|
||||||
|
if port.status == constants.DOWN:
|
||||||
|
LOG.debug('Disabled port: %s', port_id)
|
||||||
|
return
|
||||||
|
LOG.debug('Port %s is %s instead of DOWN, waiting.',
|
||||||
|
port_id, port.status)
|
||||||
|
time.sleep(CONF.networking.retry_interval)
|
||||||
|
LOG.error('Port %s failed to go DOWN. Port status is still %s. '
|
||||||
|
'Ignoring and continuing.', port_id, port.status)
|
||||||
|
|
||||||
|
def revert(self, result, port_id, *args, **kwargs):
|
||||||
|
if isinstance(result, failure.Failure):
|
||||||
|
return
|
||||||
|
try:
|
||||||
|
self.network_driver.set_port_admin_state_up(port_id, True)
|
||||||
|
except Exception as e:
|
||||||
|
LOG.error('Failed to bring port %s admin up on revert due to: %s.',
|
||||||
|
port_id, str(e))
|
||||||
|
|
||||||
|
|
||||||
|
class GetVIPSecurityGroupID(BaseNetworkTask):
|
||||||
|
|
||||||
|
def execute(self, loadbalancer_id):
|
||||||
|
sg_name = utils.get_vip_security_group_name(loadbalancer_id)
|
||||||
|
try:
|
||||||
|
security_group = self.network_driver.get_security_group(sg_name)
|
||||||
|
if security_group:
|
||||||
|
return security_group.id
|
||||||
|
except base.SecurityGroupNotFound:
|
||||||
|
with excutils.save_and_reraise_exception() as ctxt:
|
||||||
|
if self.network_driver.sec_grp_enabled:
|
||||||
|
LOG.error('VIP security group %s was not found.', sg_name)
|
||||||
|
else:
|
||||||
|
ctxt.reraise = False
|
||||||
|
return None
|
||||||
|
74
octavia/controller/worker/v1/tasks/retry_tasks.py
Normal file
74
octavia/controller/worker/v1/tasks/retry_tasks.py
Normal file
@ -0,0 +1,74 @@
|
|||||||
|
# Copyright 2019 Red Hat, Inc. All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import time
|
||||||
|
|
||||||
|
from oslo_log import log as logging
|
||||||
|
from taskflow import retry
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class SleepingRetryTimesController(retry.Times):
|
||||||
|
"""A retry controller to attempt subflow retries a number of times.
|
||||||
|
|
||||||
|
This retry controller overrides the Times on_failure to inject a
|
||||||
|
sleep interval between retries.
|
||||||
|
It also adds a log message when all of the retries are exhausted.
|
||||||
|
|
||||||
|
:param attempts: number of attempts to retry the associated subflow
|
||||||
|
before giving up
|
||||||
|
:type attempts: int
|
||||||
|
:param name: Meaningful name for this atom, should be something that is
|
||||||
|
distinguishable and understandable for notification,
|
||||||
|
debugging, storing and any other similar purposes.
|
||||||
|
:param provides: A set, string or list of items that
|
||||||
|
this will be providing (or could provide) to others, used
|
||||||
|
to correlate and associate the thing/s this atom
|
||||||
|
produces, if it produces anything at all.
|
||||||
|
:param requires: A set or list of required inputs for this atom's
|
||||||
|
``execute`` method.
|
||||||
|
:param rebind: A dict of key/value pairs used to define argument
|
||||||
|
name conversions for inputs to this atom's ``execute``
|
||||||
|
method.
|
||||||
|
:param revert_all: when provided this will cause the full flow to revert
|
||||||
|
when the number of attempts that have been tried
|
||||||
|
has been reached (when false, it will only locally
|
||||||
|
revert the associated subflow)
|
||||||
|
:type revert_all: bool
|
||||||
|
:param interval: Interval, in seconds, between retry attempts.
|
||||||
|
:type interval: int
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, attempts=1, name=None, provides=None, requires=None,
|
||||||
|
auto_extract=True, rebind=None, revert_all=False, interval=1):
|
||||||
|
super(SleepingRetryTimesController, self).__init__(
|
||||||
|
attempts, name, provides, requires, auto_extract, rebind,
|
||||||
|
revert_all)
|
||||||
|
self._interval = interval
|
||||||
|
|
||||||
|
def on_failure(self, history, *args, **kwargs):
|
||||||
|
if len(history) < self._attempts:
|
||||||
|
LOG.warning('%s attempt %s of %s failed. Sleeping %s seconds and '
|
||||||
|
'retrying.',
|
||||||
|
self.name[self.name.startswith('retry-') and
|
||||||
|
len('retry-'):], len(history),
|
||||||
|
self._attempts, self._interval)
|
||||||
|
time.sleep(self._interval)
|
||||||
|
return retry.RETRY
|
||||||
|
return self._revert_action
|
||||||
|
|
||||||
|
def revert(self, history, *args, **kwargs):
|
||||||
|
LOG.error('%s retries with interval %s seconds have failed for %s. '
|
||||||
|
'Giving up.', len(history), self._interval, self.name)
|
@ -350,8 +350,8 @@ class AmphoraUpdateVRRPInterface(BaseAmphoraTask):
|
|||||||
db_lb.amphorae):
|
db_lb.amphorae):
|
||||||
|
|
||||||
try:
|
try:
|
||||||
interface = self.amphora_driver.get_vrrp_interface(
|
interface = self.amphora_driver.get_interface_from_ip(
|
||||||
amp, timeout_dict=timeout_dict)
|
amp, amp.vrrp_ip, timeout_dict=timeout_dict)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
# This can occur when an active/standby LB has no listener
|
# This can occur when an active/standby LB has no listener
|
||||||
LOG.error('Failed to get amphora VRRP interface on amphora '
|
LOG.error('Failed to get amphora VRRP interface on amphora '
|
||||||
|
@ -77,6 +77,14 @@ class QosPolicyNotFound(NetworkException):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class SecurityGroupNotFound(NetworkException):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class CreatePortException(NetworkException):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
class AbstractNetworkDriver(object, metaclass=abc.ABCMeta):
|
class AbstractNetworkDriver(object, metaclass=abc.ABCMeta):
|
||||||
"""This class defines the methods for a fully functional network driver.
|
"""This class defines the methods for a fully functional network driver.
|
||||||
|
|
||||||
@ -96,6 +104,24 @@ class AbstractNetworkDriver(object, metaclass=abc.ABCMeta):
|
|||||||
:raises: AllocateVIPException, PortNotFound, SubnetNotFound
|
:raises: AllocateVIPException, PortNotFound, SubnetNotFound
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def create_port(self, network_id, name=None, fixed_ips=(),
|
||||||
|
secondary_ips=(), security_group_ids=(),
|
||||||
|
admin_state_up=True, qos_policy_id=None):
|
||||||
|
"""Creates a network port.
|
||||||
|
|
||||||
|
fixed_ips = [{'subnet_id': <id>, ('ip_address': <IP>')},]
|
||||||
|
ip_address is optional in the fixed_ips dictionary.
|
||||||
|
|
||||||
|
:param network_id: The network the port should be created on.
|
||||||
|
:param name: The name to apply to the port.
|
||||||
|
:param fixed_ips: A list of fixed IP dicts.
|
||||||
|
:param secondary_ips: A list of secondary IPs to add to the port.
|
||||||
|
:param security_group_ids: A list of security group IDs for the port.
|
||||||
|
:param qos_policy_id: The QoS policy ID to apply to the port.
|
||||||
|
:returns port: A port data model object.
|
||||||
|
"""
|
||||||
|
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
def deallocate_vip(self, vip):
|
def deallocate_vip(self, vip):
|
||||||
"""Removes any resources that reserved this virtual ip.
|
"""Removes any resources that reserved this virtual ip.
|
||||||
@ -106,6 +132,14 @@ class AbstractNetworkDriver(object, metaclass=abc.ABCMeta):
|
|||||||
VIPConfiigurationNotFound
|
VIPConfiigurationNotFound
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def delete_port(self, port_id):
|
||||||
|
"""Delete a network port.
|
||||||
|
|
||||||
|
:param port_id: The port ID to delete.
|
||||||
|
:returns: None
|
||||||
|
"""
|
||||||
|
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
def plug_vip(self, load_balancer, vip):
|
def plug_vip(self, load_balancer, vip):
|
||||||
"""Plugs a virtual ip as the frontend connection of a load balancer.
|
"""Plugs a virtual ip as the frontend connection of a load balancer.
|
||||||
@ -249,6 +283,15 @@ class AbstractNetworkDriver(object, metaclass=abc.ABCMeta):
|
|||||||
:raises: NetworkException, PortNotFound
|
:raises: NetworkException, PortNotFound
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def get_security_group(self, sg_name):
|
||||||
|
"""Retrieves the security group by it's name.
|
||||||
|
|
||||||
|
:param sg_name: The security group name.
|
||||||
|
:return: octavia.network.data_models.SecurityGroup, None if not enabled
|
||||||
|
:raises: NetworkException, SecurityGroupNotFound
|
||||||
|
"""
|
||||||
|
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
def failover_preparation(self, amphora):
|
def failover_preparation(self, amphora):
|
||||||
"""Prepare an amphora for failover.
|
"""Prepare an amphora for failover.
|
||||||
@ -345,3 +388,12 @@ class AbstractNetworkDriver(object, metaclass=abc.ABCMeta):
|
|||||||
:return: octavia.network.data_models.Network_IP_Availability
|
:return: octavia.network.data_models.Network_IP_Availability
|
||||||
:raises: NetworkException, NetworkNotFound
|
:raises: NetworkException, NetworkNotFound
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def set_port_admin_state_up(self, port_id, state):
|
||||||
|
"""Set the admin state of a port. True is up, False is down.
|
||||||
|
|
||||||
|
:param port_id: The port ID to update.
|
||||||
|
:param state: True for up, False for down.
|
||||||
|
:returns: None
|
||||||
|
"""
|
||||||
|
@ -76,7 +76,7 @@ class Port(data_models.BaseDataModel):
|
|||||||
def __init__(self, id=None, name=None, device_id=None, device_owner=None,
|
def __init__(self, id=None, name=None, device_id=None, device_owner=None,
|
||||||
mac_address=None, network_id=None, status=None,
|
mac_address=None, network_id=None, status=None,
|
||||||
project_id=None, admin_state_up=None, fixed_ips=None,
|
project_id=None, admin_state_up=None, fixed_ips=None,
|
||||||
network=None, qos_policy_id=None):
|
network=None, qos_policy_id=None, security_group_ids=None):
|
||||||
self.id = id
|
self.id = id
|
||||||
self.name = name
|
self.name = name
|
||||||
self.device_id = device_id
|
self.device_id = device_id
|
||||||
@ -89,6 +89,7 @@ class Port(data_models.BaseDataModel):
|
|||||||
self.fixed_ips = fixed_ips or []
|
self.fixed_ips = fixed_ips or []
|
||||||
self.network = network
|
self.network = network
|
||||||
self.qos_policy_id = qos_policy_id
|
self.qos_policy_id = qos_policy_id
|
||||||
|
self.security_group_ids = security_group_ids or []
|
||||||
|
|
||||||
def get_subnet_id(self, fixed_ip_address):
|
def get_subnet_id(self, fixed_ip_address):
|
||||||
for fixed_ip in self.fixed_ips:
|
for fixed_ip in self.fixed_ips:
|
||||||
@ -163,3 +164,16 @@ class Network_IP_Availability(data_models.BaseDataModel):
|
|||||||
self.total_ips = total_ips
|
self.total_ips = total_ips
|
||||||
self.used_ips = used_ips
|
self.used_ips = used_ips
|
||||||
self.subnet_ip_availability = subnet_ip_availability
|
self.subnet_ip_availability = subnet_ip_availability
|
||||||
|
|
||||||
|
|
||||||
|
class SecurityGroup(data_models.BaseDataModel):
|
||||||
|
|
||||||
|
def __init__(self, id=None, project_id=None, name=None, description=None,
|
||||||
|
security_group_rule_ids=None, tags=None, stateful=None):
|
||||||
|
self.id = id
|
||||||
|
self.project_id = project_id
|
||||||
|
self.name = name
|
||||||
|
self.description = description
|
||||||
|
self.security_group_rule_ids = security_group_rule_ids or []
|
||||||
|
self.tags = tags or []
|
||||||
|
self.stateful = stateful
|
||||||
|
@ -11,7 +11,6 @@
|
|||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
import ipaddress
|
import ipaddress
|
||||||
import time
|
import time
|
||||||
|
|
||||||
@ -24,6 +23,7 @@ from stevedore import driver as stevedore_driver
|
|||||||
from octavia.common import constants
|
from octavia.common import constants
|
||||||
from octavia.common import data_models
|
from octavia.common import data_models
|
||||||
from octavia.common import exceptions
|
from octavia.common import exceptions
|
||||||
|
from octavia.common import utils as common_utils
|
||||||
from octavia.i18n import _
|
from octavia.i18n import _
|
||||||
from octavia.network import base
|
from octavia.network import base
|
||||||
from octavia.network import data_models as n_data_models
|
from octavia.network import data_models as n_data_models
|
||||||
@ -33,7 +33,6 @@ from octavia.network.drivers.neutron import utils
|
|||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
AAP_EXT_ALIAS = 'allowed-address-pairs'
|
AAP_EXT_ALIAS = 'allowed-address-pairs'
|
||||||
PROJECT_ID_ALIAS = 'project-id'
|
PROJECT_ID_ALIAS = 'project-id'
|
||||||
VIP_SECURITY_GRP_PREFIX = 'lb-'
|
|
||||||
OCTAVIA_OWNER = 'Octavia'
|
OCTAVIA_OWNER = 'Octavia'
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
@ -84,11 +83,12 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
|||||||
def _plug_amphora_vip(self, amphora, subnet):
|
def _plug_amphora_vip(self, amphora, subnet):
|
||||||
# We need a vip port owned by Octavia for Act/Stby and failover
|
# We need a vip port owned by Octavia for Act/Stby and failover
|
||||||
try:
|
try:
|
||||||
port = {'port': {'name': 'octavia-lb-vrrp-' + amphora.id,
|
port = {constants.PORT: {
|
||||||
'network_id': subnet.network_id,
|
constants.NAME: 'octavia-lb-vrrp-' + amphora.id,
|
||||||
'fixed_ips': [{'subnet_id': subnet.id}],
|
constants.NETWORK_ID: subnet.network_id,
|
||||||
'admin_state_up': True,
|
constants.FIXED_IPS: [{'subnet_id': subnet.id}],
|
||||||
'device_owner': OCTAVIA_OWNER}}
|
constants.ADMIN_STATE_UP: True,
|
||||||
|
constants.DEVICE_OWNER: OCTAVIA_OWNER}}
|
||||||
new_port = self.neutron_client.create_port(port)
|
new_port = self.neutron_client.create_port(port)
|
||||||
new_port = utils.convert_port_dict_to_model(new_port)
|
new_port = utils.convert_port_dict_to_model(new_port)
|
||||||
|
|
||||||
@ -135,10 +135,11 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
|||||||
raise base.PlugVIPException(message)
|
raise base.PlugVIPException(message)
|
||||||
|
|
||||||
def _get_lb_security_group(self, load_balancer_id):
|
def _get_lb_security_group(self, load_balancer_id):
|
||||||
sec_grp_name = VIP_SECURITY_GRP_PREFIX + load_balancer_id
|
sec_grp_name = common_utils.get_vip_security_group_name(
|
||||||
|
load_balancer_id)
|
||||||
sec_grps = self.neutron_client.list_security_groups(name=sec_grp_name)
|
sec_grps = self.neutron_client.list_security_groups(name=sec_grp_name)
|
||||||
if sec_grps and sec_grps.get('security_groups'):
|
if sec_grps and sec_grps.get(constants.SECURITY_GROUPS):
|
||||||
return sec_grps.get('security_groups')[0]
|
return sec_grps.get(constants.SECURITY_GROUPS)[0]
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def _get_ethertype_for_ip(self, ip):
|
def _get_ethertype_for_ip(self, ip):
|
||||||
@ -195,7 +196,7 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
|||||||
rule.get('protocol', '').lower() in ['tcp', 'udp'] and
|
rule.get('protocol', '').lower() in ['tcp', 'udp'] and
|
||||||
(rule.get('port_range_max'), rule.get('protocol'),
|
(rule.get('port_range_max'), rule.get('protocol'),
|
||||||
rule.get('remote_ip_prefix')) in del_ports):
|
rule.get('remote_ip_prefix')) in del_ports):
|
||||||
rule_id = rule.get('id')
|
rule_id = rule.get(constants.ID)
|
||||||
try:
|
try:
|
||||||
self.neutron_client.delete_security_group_rule(rule_id)
|
self.neutron_client.delete_security_group_rule(rule_id)
|
||||||
except neutron_client_exceptions.NotFound:
|
except neutron_client_exceptions.NotFound:
|
||||||
@ -235,19 +236,11 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise base.PlugVIPException(str(e))
|
raise base.PlugVIPException(str(e))
|
||||||
|
|
||||||
def _update_vip_security_group(self, load_balancer, vip):
|
|
||||||
sec_grp = self._get_lb_security_group(load_balancer.id)
|
|
||||||
if not sec_grp:
|
|
||||||
sec_grp_name = VIP_SECURITY_GRP_PREFIX + load_balancer.id
|
|
||||||
sec_grp = self._create_security_group(sec_grp_name)
|
|
||||||
self._update_security_group_rules(load_balancer, sec_grp.get('id'))
|
|
||||||
self._add_vip_security_group_to_port(load_balancer.id, vip.port_id,
|
|
||||||
sec_grp.get('id'))
|
|
||||||
|
|
||||||
def _add_vip_security_group_to_port(self, load_balancer_id, port_id,
|
def _add_vip_security_group_to_port(self, load_balancer_id, port_id,
|
||||||
sec_grp_id=None):
|
sec_grp_id=None):
|
||||||
sec_grp_id = (sec_grp_id or
|
sec_grp_id = (sec_grp_id or
|
||||||
self._get_lb_security_group(load_balancer_id).get('id'))
|
self._get_lb_security_group(load_balancer_id).get(
|
||||||
|
constants.ID))
|
||||||
try:
|
try:
|
||||||
self._add_security_group_to_port(sec_grp_id, port_id)
|
self._add_security_group_to_port(sec_grp_id, port_id)
|
||||||
except base.PortNotFound:
|
except base.PortNotFound:
|
||||||
@ -286,10 +279,10 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
|||||||
if self.sec_grp_enabled:
|
if self.sec_grp_enabled:
|
||||||
sec_grp = self._get_lb_security_group(vip.load_balancer.id)
|
sec_grp = self._get_lb_security_group(vip.load_balancer.id)
|
||||||
if sec_grp:
|
if sec_grp:
|
||||||
sec_grp_id = sec_grp.get('id')
|
sec_grp_id = sec_grp.get(constants.ID)
|
||||||
LOG.info(
|
LOG.info(
|
||||||
"Removing security group %(sg)s from port %(port)s",
|
"Removing security group %(sg)s from port %(port)s",
|
||||||
{'sg': sec_grp_id, 'port': vip.port_id})
|
{'sg': sec_grp_id, constants.PORT: vip.port_id})
|
||||||
raw_port = None
|
raw_port = None
|
||||||
try:
|
try:
|
||||||
if port:
|
if port:
|
||||||
@ -300,10 +293,11 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
|||||||
'group.', port.id)
|
'group.', port.id)
|
||||||
if raw_port:
|
if raw_port:
|
||||||
sec_grps = raw_port.get(
|
sec_grps = raw_port.get(
|
||||||
'port', {}).get('security_groups', [])
|
constants.PORT, {}).get(constants.SECURITY_GROUPS, [])
|
||||||
if sec_grp_id in sec_grps:
|
if sec_grp_id in sec_grps:
|
||||||
sec_grps.remove(sec_grp_id)
|
sec_grps.remove(sec_grp_id)
|
||||||
port_update = {'port': {'security_groups': sec_grps}}
|
port_update = {constants.PORT: {
|
||||||
|
constants.SECURITY_GROUPS: sec_grps}}
|
||||||
try:
|
try:
|
||||||
self.neutron_client.update_port(port.id,
|
self.neutron_client.update_port(port.id,
|
||||||
port_update)
|
port_update)
|
||||||
@ -323,7 +317,7 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
|||||||
'pass: %s', sec_grp_id)
|
'pass: %s', sec_grp_id)
|
||||||
extra_ports = self._get_ports_by_security_group(sec_grp_id)
|
extra_ports = self._get_ports_by_security_group(sec_grp_id)
|
||||||
for extra_port in extra_ports:
|
for extra_port in extra_ports:
|
||||||
port_id = extra_port.get('id')
|
port_id = extra_port.get(constants.ID)
|
||||||
try:
|
try:
|
||||||
LOG.warning('Deleting extra port %s on security '
|
LOG.warning('Deleting extra port %s on security '
|
||||||
'group %s...', port_id, sec_grp_id)
|
'group %s...', port_id, sec_grp_id)
|
||||||
@ -376,7 +370,17 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
|||||||
|
|
||||||
def update_vip_sg(self, load_balancer, vip):
|
def update_vip_sg(self, load_balancer, vip):
|
||||||
if self.sec_grp_enabled:
|
if self.sec_grp_enabled:
|
||||||
self._update_vip_security_group(load_balancer, vip)
|
sec_grp = self._get_lb_security_group(load_balancer.id)
|
||||||
|
if not sec_grp:
|
||||||
|
sec_grp_name = common_utils.get_vip_security_group_name(
|
||||||
|
load_balancer.id)
|
||||||
|
sec_grp = self._create_security_group(sec_grp_name)
|
||||||
|
self._update_security_group_rules(load_balancer,
|
||||||
|
sec_grp.get(constants.ID))
|
||||||
|
self._add_vip_security_group_to_port(load_balancer.id, vip.port_id,
|
||||||
|
sec_grp.get(constants.ID))
|
||||||
|
return sec_grp.get(constants.ID)
|
||||||
|
return None
|
||||||
|
|
||||||
def plug_aap_port(self, load_balancer, vip, amphora, subnet):
|
def plug_aap_port(self, load_balancer, vip, amphora, subnet):
|
||||||
interface = self._get_plugged_interface(
|
interface = self._get_plugged_interface(
|
||||||
@ -415,18 +419,78 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
|||||||
amphora, subnet))
|
amphora, subnet))
|
||||||
return plugged_amphorae
|
return plugged_amphorae
|
||||||
|
|
||||||
|
def _validate_fixed_ip(self, fixed_ips, subnet_id, ip_address):
|
||||||
|
"""Validate an IP address exists in a fixed_ips dict
|
||||||
|
|
||||||
|
:param fixed_ips: A port fixed_ups dict
|
||||||
|
:param subnet_id: The subnet that should contain the IP
|
||||||
|
:param ip_address: The IP address to validate
|
||||||
|
:returns: True if the ip address is in the dict, False if not
|
||||||
|
"""
|
||||||
|
for fixed_ip in fixed_ips:
|
||||||
|
normalized_fixed_ip = ipaddress.ip_address(
|
||||||
|
fixed_ip.ip_address).compressed
|
||||||
|
normalized_ip = ipaddress.ip_address(ip_address).compressed
|
||||||
|
if (fixed_ip.subnet_id == subnet_id and
|
||||||
|
normalized_fixed_ip == normalized_ip):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _fixed_ips_to_list_of_dicts(fixed_ips):
|
||||||
|
list_of_dicts = []
|
||||||
|
for fixed_ip in fixed_ips:
|
||||||
|
list_of_dicts.append(fixed_ip.to_dict())
|
||||||
|
return list_of_dicts
|
||||||
|
|
||||||
def allocate_vip(self, load_balancer):
|
def allocate_vip(self, load_balancer):
|
||||||
if load_balancer.vip.port_id:
|
if load_balancer.vip.port_id:
|
||||||
LOG.info('Port %s already exists. Nothing to be done.',
|
try:
|
||||||
load_balancer.vip.port_id)
|
port = self.get_port(load_balancer.vip.port_id)
|
||||||
port = self.get_port(load_balancer.vip.port_id)
|
fixed_ip_found = self._validate_fixed_ip(
|
||||||
return self._port_to_vip(port, load_balancer)
|
port.fixed_ips, load_balancer.vip.subnet_id,
|
||||||
|
load_balancer.vip.ip_address)
|
||||||
|
if (port.network_id == load_balancer.vip.network_id and
|
||||||
|
fixed_ip_found):
|
||||||
|
LOG.info('Port %s already exists. Nothing to be done.',
|
||||||
|
load_balancer.vip.port_id)
|
||||||
|
return self._port_to_vip(port, load_balancer)
|
||||||
|
LOG.error('Neutron VIP mis-match. Expected ip %s on '
|
||||||
|
'subnet %s in network %s. Neutron has fixed_ips %s '
|
||||||
|
'in network %s. Deleting and recreating the VIP '
|
||||||
|
'port.', load_balancer.vip.ip_address,
|
||||||
|
load_balancer.vip.subnet_id,
|
||||||
|
load_balancer.vip.network_id,
|
||||||
|
self._fixed_ips_to_list_of_dicts(port.fixed_ips),
|
||||||
|
port.network_id)
|
||||||
|
if load_balancer.vip.octavia_owned:
|
||||||
|
self.delete_port(load_balancer.vip.port_id)
|
||||||
|
else:
|
||||||
|
raise base.AllocateVIPException(
|
||||||
|
'VIP port {0} is broken, but is owned by project {1} '
|
||||||
|
'so will not be recreated. Aborting VIP allocation.'
|
||||||
|
.format(port.id, port.project_id))
|
||||||
|
except base.AllocateVIPException as e:
|
||||||
|
# Catch this explicitly because otherwise we blame Neutron
|
||||||
|
LOG.error(getattr(e, constants.MESSAGE, None))
|
||||||
|
raise
|
||||||
|
except base.PortNotFound:
|
||||||
|
LOG.warning('VIP port %s is missing from neutron. Rebuilding.',
|
||||||
|
load_balancer.vip.port_id)
|
||||||
|
except Exception as e:
|
||||||
|
message = _('Neutron is failing to service requests due to: '
|
||||||
|
'{}. Aborting.').format(str(e))
|
||||||
|
LOG.error(message)
|
||||||
|
raise base.AllocateVIPException(
|
||||||
|
message,
|
||||||
|
orig_msg=getattr(e, constants.MESSAGE, None),
|
||||||
|
orig_code=getattr(e, constants.STATUS_CODE, None),)
|
||||||
|
|
||||||
fixed_ip = {}
|
fixed_ip = {}
|
||||||
if load_balancer.vip.subnet_id:
|
if load_balancer.vip.subnet_id:
|
||||||
fixed_ip['subnet_id'] = load_balancer.vip.subnet_id
|
fixed_ip['subnet_id'] = load_balancer.vip.subnet_id
|
||||||
if load_balancer.vip.ip_address:
|
if load_balancer.vip.ip_address:
|
||||||
fixed_ip['ip_address'] = load_balancer.vip.ip_address
|
fixed_ip[constants.IP_ADDRESS] = load_balancer.vip.ip_address
|
||||||
|
|
||||||
# Make sure we are backward compatible with older neutron
|
# Make sure we are backward compatible with older neutron
|
||||||
if self._check_extension_enabled(PROJECT_ID_ALIAS):
|
if self._check_extension_enabled(PROJECT_ID_ALIAS):
|
||||||
@ -435,29 +499,30 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
|||||||
project_id_key = 'tenant_id'
|
project_id_key = 'tenant_id'
|
||||||
|
|
||||||
# It can be assumed that network_id exists
|
# It can be assumed that network_id exists
|
||||||
port = {'port': {'name': 'octavia-lb-' + load_balancer.id,
|
port = {constants.PORT: {
|
||||||
'network_id': load_balancer.vip.network_id,
|
constants.NAME: 'octavia-lb-' + load_balancer.id,
|
||||||
'admin_state_up': False,
|
constants.NETWORK_ID: load_balancer.vip.network_id,
|
||||||
'device_id': 'lb-{0}'.format(load_balancer.id),
|
constants.ADMIN_STATE_UP: False,
|
||||||
'device_owner': OCTAVIA_OWNER,
|
'device_id': 'lb-{0}'.format(load_balancer.id),
|
||||||
project_id_key: load_balancer.project_id}}
|
constants.DEVICE_OWNER: OCTAVIA_OWNER,
|
||||||
|
project_id_key: load_balancer.project_id}}
|
||||||
|
|
||||||
if fixed_ip:
|
if fixed_ip:
|
||||||
port['port']['fixed_ips'] = [fixed_ip]
|
port[constants.PORT][constants.FIXED_IPS] = [fixed_ip]
|
||||||
try:
|
try:
|
||||||
new_port = self.neutron_client.create_port(port)
|
new_port = self.neutron_client.create_port(port)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
message = _('Error creating neutron port on network '
|
message = _('Error creating neutron port on network '
|
||||||
'{network_id}.').format(
|
'{network_id} due to {e}.').format(
|
||||||
network_id=load_balancer.vip.network_id)
|
network_id=load_balancer.vip.network_id, e=str(e))
|
||||||
LOG.exception(message)
|
LOG.exception(message)
|
||||||
raise base.AllocateVIPException(
|
raise base.AllocateVIPException(
|
||||||
message,
|
message,
|
||||||
orig_msg=getattr(e, 'message', None),
|
orig_msg=getattr(e, constants.MESSAGE, None),
|
||||||
orig_code=getattr(e, 'status_code', None),
|
orig_code=getattr(e, constants.STATUS_CODE, None),
|
||||||
)
|
)
|
||||||
new_port = utils.convert_port_dict_to_model(new_port)
|
new_port = utils.convert_port_dict_to_model(new_port)
|
||||||
return self._port_to_vip(new_port, load_balancer)
|
return self._port_to_vip(new_port, load_balancer, octavia_owned=True)
|
||||||
|
|
||||||
def unplug_aap_port(self, vip, amphora, subnet):
|
def unplug_aap_port(self, vip, amphora, subnet):
|
||||||
interface = self._get_plugged_interface(
|
interface = self._get_plugged_interface(
|
||||||
@ -473,8 +538,8 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
|||||||
except Exception:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
try:
|
try:
|
||||||
aap_update = {'port': {
|
aap_update = {constants.PORT: {
|
||||||
'allowed_address_pairs': []
|
constants.ALLOWED_ADDRESS_PAIRS: []
|
||||||
}}
|
}}
|
||||||
self.neutron_client.update_port(interface.port_id,
|
self.neutron_client.update_port(interface.port_id,
|
||||||
aap_update)
|
aap_update)
|
||||||
@ -495,8 +560,8 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
|||||||
pass
|
pass
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error('Failed to delete port. Resources may still be in '
|
LOG.error('Failed to delete port. Resources may still be in '
|
||||||
'use for port: %(port)s due to error: %s(except)s',
|
'use for port: %(port)s due to error: %(except)s',
|
||||||
{'port': amphora.vrrp_port_id, 'except': e})
|
{constants.PORT: amphora.vrrp_port_id, 'except': e})
|
||||||
|
|
||||||
def unplug_vip(self, load_balancer, vip):
|
def unplug_vip(self, load_balancer, vip):
|
||||||
try:
|
try:
|
||||||
@ -516,7 +581,7 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
|||||||
interface = self.compute.attach_network_or_port(
|
interface = self.compute.attach_network_or_port(
|
||||||
compute_id=compute_id, network_id=network_id,
|
compute_id=compute_id, network_id=network_id,
|
||||||
ip_address=ip_address)
|
ip_address=ip_address)
|
||||||
except nova_client_exceptions.NotFound as e:
|
except exceptions.NotFound as e:
|
||||||
if 'Instance' in str(e):
|
if 'Instance' in str(e):
|
||||||
raise base.AmphoraNotFound(str(e))
|
raise base.AmphoraNotFound(str(e))
|
||||||
if 'Network' in str(e):
|
if 'Network' in str(e):
|
||||||
@ -548,7 +613,8 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
|||||||
def update_vip(self, load_balancer, for_delete=False):
|
def update_vip(self, load_balancer, for_delete=False):
|
||||||
sec_grp = self._get_lb_security_group(load_balancer.id)
|
sec_grp = self._get_lb_security_group(load_balancer.id)
|
||||||
if sec_grp:
|
if sec_grp:
|
||||||
self._update_security_group_rules(load_balancer, sec_grp.get('id'))
|
self._update_security_group_rules(load_balancer,
|
||||||
|
sec_grp.get(constants.ID))
|
||||||
elif not for_delete:
|
elif not for_delete:
|
||||||
raise exceptions.MissingVIPSecurityGroup(lb_id=load_balancer.id)
|
raise exceptions.MissingVIPSecurityGroup(lb_id=load_balancer.id)
|
||||||
else:
|
else:
|
||||||
@ -577,8 +643,8 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
|||||||
|
|
||||||
for port in ports:
|
for port in ports:
|
||||||
try:
|
try:
|
||||||
self.neutron_client.update_port(port.id,
|
self.neutron_client.update_port(
|
||||||
{'port': {'dns_name': ''}})
|
port.id, {constants.PORT: {'dns_name': ''}})
|
||||||
|
|
||||||
except (neutron_client_exceptions.NotFound,
|
except (neutron_client_exceptions.NotFound,
|
||||||
neutron_client_exceptions.PortNotFoundClient):
|
neutron_client_exceptions.PortNotFoundClient):
|
||||||
@ -591,7 +657,7 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
|||||||
ip_address=None, port_id=port.id)
|
ip_address=None, port_id=port.id)
|
||||||
plugged_interface = self._nova_interface_to_octavia_interface(
|
plugged_interface = self._nova_interface_to_octavia_interface(
|
||||||
amphora.compute_id, interface)
|
amphora.compute_id, interface)
|
||||||
except nova_client_exceptions.NotFound as e:
|
except exceptions.NotFound as e:
|
||||||
if 'Instance' in str(e):
|
if 'Instance' in str(e):
|
||||||
raise base.AmphoraNotFound(str(e))
|
raise base.AmphoraNotFound(str(e))
|
||||||
if 'Network' in str(e):
|
if 'Network' in str(e):
|
||||||
@ -650,6 +716,7 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
|||||||
vip_subnet, vip_port)
|
vip_subnet, vip_port)
|
||||||
return amp_configs
|
return amp_configs
|
||||||
|
|
||||||
|
# TODO(johnsom) This may be dead code now. Remove in failover for v2 patch
|
||||||
def wait_for_port_detach(self, amphora):
|
def wait_for_port_detach(self, amphora):
|
||||||
"""Waits for the amphora ports device_id to be unset.
|
"""Waits for the amphora ports device_id to be unset.
|
||||||
|
|
||||||
@ -679,14 +746,14 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
|||||||
for port in ports:
|
for port in ports:
|
||||||
try:
|
try:
|
||||||
neutron_port = self.neutron_client.show_port(
|
neutron_port = self.neutron_client.show_port(
|
||||||
port.id).get('port')
|
port.id).get(constants.PORT)
|
||||||
device_id = neutron_port['device_id']
|
device_id = neutron_port['device_id']
|
||||||
start = int(time.time())
|
start = int(time.time())
|
||||||
|
|
||||||
while device_id:
|
while device_id:
|
||||||
time.sleep(CONF.networking.retry_interval)
|
time.sleep(CONF.networking.retry_interval)
|
||||||
neutron_port = self.neutron_client.show_port(
|
neutron_port = self.neutron_client.show_port(
|
||||||
port.id).get('port')
|
port.id).get(constants.PORT)
|
||||||
device_id = neutron_port['device_id']
|
device_id = neutron_port['device_id']
|
||||||
|
|
||||||
timed_out = int(time.time()) - start >= port_detach_timeout
|
timed_out = int(time.time()) - start >= port_detach_timeout
|
||||||
@ -700,3 +767,106 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
|||||||
except (neutron_client_exceptions.NotFound,
|
except (neutron_client_exceptions.NotFound,
|
||||||
neutron_client_exceptions.PortNotFoundClient):
|
neutron_client_exceptions.PortNotFoundClient):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
def delete_port(self, port_id):
|
||||||
|
"""delete a neutron port.
|
||||||
|
|
||||||
|
:param port_id: The port ID to delete.
|
||||||
|
:returns: None
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
self.neutron_client.delete_port(port_id)
|
||||||
|
except (neutron_client_exceptions.NotFound,
|
||||||
|
neutron_client_exceptions.PortNotFoundClient):
|
||||||
|
LOG.debug('VIP instance port %s already deleted. Skipping.',
|
||||||
|
port_id)
|
||||||
|
except Exception as e:
|
||||||
|
raise exceptions.NetworkServiceError(net_error=str(e))
|
||||||
|
|
||||||
|
def set_port_admin_state_up(self, port_id, state):
|
||||||
|
"""Set the admin state of a port. True is up, False is down.
|
||||||
|
|
||||||
|
:param port_id: The port ID to update.
|
||||||
|
:param state: True for up, False for down.
|
||||||
|
:returns: None
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
self.neutron_client.update_port(
|
||||||
|
port_id, {constants.PORT: {constants.ADMIN_STATE_UP: state}})
|
||||||
|
except (neutron_client_exceptions.NotFound,
|
||||||
|
neutron_client_exceptions.PortNotFoundClient) as e:
|
||||||
|
raise base.PortNotFound(str(e))
|
||||||
|
except Exception as e:
|
||||||
|
raise exceptions.NetworkServiceError(net_error=str(e))
|
||||||
|
|
||||||
|
def create_port(self, network_id, name=None, fixed_ips=(),
|
||||||
|
secondary_ips=(), security_group_ids=(),
|
||||||
|
admin_state_up=True, qos_policy_id=None):
|
||||||
|
"""Creates a network port.
|
||||||
|
|
||||||
|
fixed_ips = [{'subnet_id': <id>, ('ip_addrss': <IP>')},]
|
||||||
|
ip_address is optional in the fixed_ips dictionary.
|
||||||
|
|
||||||
|
:param network_id: The network the port should be created on.
|
||||||
|
:param name: The name to apply to the port.
|
||||||
|
:param fixed_ips: A list of fixed IP dicts.
|
||||||
|
:param secondary_ips: A list of secondary IPs to add to the port.
|
||||||
|
:param security_group_ids: A list of security group IDs for the port.
|
||||||
|
:param qos_policy_id: The QoS policy ID to apply to the port.
|
||||||
|
:returns port: A port data model object.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
aap_list = []
|
||||||
|
for ip in secondary_ips:
|
||||||
|
aap_list.append({constants.IP_ADDRESS: ip})
|
||||||
|
port = {constants.NETWORK_ID: network_id,
|
||||||
|
constants.ADMIN_STATE_UP: admin_state_up,
|
||||||
|
constants.DEVICE_OWNER: OCTAVIA_OWNER}
|
||||||
|
if aap_list:
|
||||||
|
port[constants.ALLOWED_ADDRESS_PAIRS] = aap_list
|
||||||
|
if fixed_ips:
|
||||||
|
port[constants.FIXED_IPS] = fixed_ips
|
||||||
|
if name:
|
||||||
|
port[constants.NAME] = name
|
||||||
|
if qos_policy_id:
|
||||||
|
port[constants.QOS_POLICY_ID] = qos_policy_id
|
||||||
|
if security_group_ids:
|
||||||
|
port[constants.SECURITY_GROUPS] = security_group_ids
|
||||||
|
|
||||||
|
new_port = self.neutron_client.create_port({constants.PORT: port})
|
||||||
|
|
||||||
|
LOG.debug('Created port: %(port)s', {constants.PORT: new_port})
|
||||||
|
|
||||||
|
return utils.convert_port_dict_to_model(new_port)
|
||||||
|
except Exception as e:
|
||||||
|
message = _('Error creating a port on network '
|
||||||
|
'{network_id} due to {error}.').format(
|
||||||
|
network_id=network_id, error=str(e))
|
||||||
|
LOG.exception(message)
|
||||||
|
raise base.CreatePortException(message)
|
||||||
|
|
||||||
|
def get_security_group(self, sg_name):
|
||||||
|
"""Retrieves the security group by it's name.
|
||||||
|
|
||||||
|
:param sg_name: The security group name.
|
||||||
|
:return: octavia.network.data_models.SecurityGroup, None if not enabled
|
||||||
|
:raises: NetworkException, SecurityGroupNotFound
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
if self.sec_grp_enabled and sg_name:
|
||||||
|
sec_grps = self.neutron_client.list_security_groups(
|
||||||
|
name=sg_name)
|
||||||
|
if sec_grps and sec_grps.get(constants.SECURITY_GROUPS):
|
||||||
|
sg_dict = sec_grps.get(constants.SECURITY_GROUPS)[0]
|
||||||
|
return utils.convert_security_group_dict_to_model(sg_dict)
|
||||||
|
message = _('Security group {name} not found.').format(
|
||||||
|
name=sg_name)
|
||||||
|
raise base.SecurityGroupNotFound(message)
|
||||||
|
return None
|
||||||
|
except base.SecurityGroupNotFound:
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
message = _('Error when getting security group {name} due to '
|
||||||
|
'{error}').format(name=sg_name, error=str(e))
|
||||||
|
LOG.exception(message)
|
||||||
|
raise base.NetworkException(message)
|
||||||
|
@ -71,18 +71,26 @@ class BaseNeutronDriver(base.AbstractNetworkDriver):
|
|||||||
self._check_extension_cache[extension_alias] = False
|
self._check_extension_cache[extension_alias] = False
|
||||||
return self._check_extension_cache[extension_alias]
|
return self._check_extension_cache[extension_alias]
|
||||||
|
|
||||||
def _port_to_vip(self, port, load_balancer):
|
def _port_to_vip(self, port, load_balancer, octavia_owned=False):
|
||||||
fixed_ip = None
|
fixed_ip = None
|
||||||
for port_fixed_ip in port.fixed_ips:
|
for port_fixed_ip in port.fixed_ips:
|
||||||
if port_fixed_ip.subnet_id == load_balancer.vip.subnet_id:
|
if port_fixed_ip.subnet_id == load_balancer.vip.subnet_id:
|
||||||
fixed_ip = port_fixed_ip
|
fixed_ip = port_fixed_ip
|
||||||
break
|
break
|
||||||
return data_models.Vip(ip_address=fixed_ip.ip_address,
|
if fixed_ip:
|
||||||
subnet_id=fixed_ip.subnet_id,
|
return data_models.Vip(ip_address=fixed_ip.ip_address,
|
||||||
|
subnet_id=fixed_ip.subnet_id,
|
||||||
|
network_id=port.network_id,
|
||||||
|
port_id=port.id,
|
||||||
|
load_balancer=load_balancer,
|
||||||
|
load_balancer_id=load_balancer.id,
|
||||||
|
octavia_owned=octavia_owned)
|
||||||
|
return data_models.Vip(ip_address=None, subnet_id=None,
|
||||||
network_id=port.network_id,
|
network_id=port.network_id,
|
||||||
port_id=port.id,
|
port_id=port.id,
|
||||||
load_balancer=load_balancer,
|
load_balancer=load_balancer,
|
||||||
load_balancer_id=load_balancer.id)
|
load_balancer_id=load_balancer.id,
|
||||||
|
octavia_owned=octavia_owned)
|
||||||
|
|
||||||
def _nova_interface_to_octavia_interface(self, compute_id, nova_interface):
|
def _nova_interface_to_octavia_interface(self, compute_id, nova_interface):
|
||||||
fixed_ips = [utils.convert_fixed_ip_dict_to_model(fixed_ip)
|
fixed_ips = [utils.convert_fixed_ip_dict_to_model(fixed_ip)
|
||||||
@ -112,6 +120,7 @@ class BaseNeutronDriver(base.AbstractNetworkDriver):
|
|||||||
|
|
||||||
def _add_security_group_to_port(self, sec_grp_id, port_id):
|
def _add_security_group_to_port(self, sec_grp_id, port_id):
|
||||||
port_update = {'port': {'security_groups': [sec_grp_id]}}
|
port_update = {'port': {'security_groups': [sec_grp_id]}}
|
||||||
|
# Note: Neutron accepts the SG even if it already exists
|
||||||
try:
|
try:
|
||||||
self.neutron_client.update_port(port_id, port_update)
|
self.neutron_client.update_port(port_id, port_update)
|
||||||
except neutron_client_exceptions.PortNotFoundClient as e:
|
except neutron_client_exceptions.PortNotFoundClient as e:
|
||||||
|
@ -13,6 +13,7 @@
|
|||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
|
||||||
|
from octavia.common import constants
|
||||||
from octavia.network import data_models as network_models
|
from octavia.network import data_models as network_models
|
||||||
|
|
||||||
|
|
||||||
@ -22,9 +23,10 @@ def convert_subnet_dict_to_model(subnet_dict):
|
|||||||
host_routes = [network_models.HostRoute(nexthop=hr.get('nexthop'),
|
host_routes = [network_models.HostRoute(nexthop=hr.get('nexthop'),
|
||||||
destination=hr.get('destination'))
|
destination=hr.get('destination'))
|
||||||
for hr in subnet_hrs]
|
for hr in subnet_hrs]
|
||||||
return network_models.Subnet(id=subnet.get('id'), name=subnet.get('name'),
|
return network_models.Subnet(id=subnet.get(constants.ID),
|
||||||
|
name=subnet.get(constants.NAME),
|
||||||
network_id=subnet.get('network_id'),
|
network_id=subnet.get('network_id'),
|
||||||
project_id=subnet.get('tenant_id'),
|
project_id=subnet.get(constants.TENANT_ID),
|
||||||
gateway_ip=subnet.get('gateway_ip'),
|
gateway_ip=subnet.get('gateway_ip'),
|
||||||
cidr=subnet.get('cidr'),
|
cidr=subnet.get('cidr'),
|
||||||
ip_version=subnet.get('ip_version'),
|
ip_version=subnet.get('ip_version'),
|
||||||
@ -38,27 +40,28 @@ def convert_port_dict_to_model(port_dict):
|
|||||||
ip_address=fixed_ip.get('ip_address'))
|
ip_address=fixed_ip.get('ip_address'))
|
||||||
for fixed_ip in port.get('fixed_ips', [])]
|
for fixed_ip in port.get('fixed_ips', [])]
|
||||||
return network_models.Port(
|
return network_models.Port(
|
||||||
id=port.get('id'),
|
id=port.get(constants.ID),
|
||||||
name=port.get('name'),
|
name=port.get(constants.NAME),
|
||||||
device_id=port.get('device_id'),
|
device_id=port.get('device_id'),
|
||||||
device_owner=port.get('device_owner'),
|
device_owner=port.get('device_owner'),
|
||||||
mac_address=port.get('mac_address'),
|
mac_address=port.get('mac_address'),
|
||||||
network_id=port.get('network_id'),
|
network_id=port.get('network_id'),
|
||||||
status=port.get('status'),
|
status=port.get('status'),
|
||||||
project_id=port.get('tenant_id'),
|
project_id=port.get(constants.TENANT_ID),
|
||||||
admin_state_up=port.get('admin_state_up'),
|
admin_state_up=port.get('admin_state_up'),
|
||||||
fixed_ips=fixed_ips,
|
fixed_ips=fixed_ips,
|
||||||
qos_policy_id=port.get('qos_policy_id')
|
qos_policy_id=port.get('qos_policy_id'),
|
||||||
|
security_group_ids=port.get(constants.SECURITY_GROUPS, [])
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def convert_network_dict_to_model(network_dict):
|
def convert_network_dict_to_model(network_dict):
|
||||||
nw = network_dict.get('network', network_dict)
|
nw = network_dict.get('network', network_dict)
|
||||||
return network_models.Network(
|
return network_models.Network(
|
||||||
id=nw.get('id'),
|
id=nw.get(constants.ID),
|
||||||
name=nw.get('name'),
|
name=nw.get(constants.NAME),
|
||||||
subnets=nw.get('subnets'),
|
subnets=nw.get('subnets'),
|
||||||
project_id=nw.get('tenant_id'),
|
project_id=nw.get(constants.TENANT_ID),
|
||||||
admin_state_up=nw.get('admin_state_up'),
|
admin_state_up=nw.get('admin_state_up'),
|
||||||
mtu=nw.get('mtu'),
|
mtu=nw.get('mtu'),
|
||||||
provider_network_type=nw.get('provider:network_type'),
|
provider_network_type=nw.get('provider:network_type'),
|
||||||
@ -76,16 +79,17 @@ def convert_fixed_ip_dict_to_model(fixed_ip_dict):
|
|||||||
|
|
||||||
def convert_qos_policy_dict_to_model(qos_policy_dict):
|
def convert_qos_policy_dict_to_model(qos_policy_dict):
|
||||||
qos_policy = qos_policy_dict.get('policy', qos_policy_dict)
|
qos_policy = qos_policy_dict.get('policy', qos_policy_dict)
|
||||||
return network_models.QosPolicy(id=qos_policy.get('id'))
|
return network_models.QosPolicy(id=qos_policy.get(constants.ID))
|
||||||
|
|
||||||
|
|
||||||
# We can't use "floating_ip" because we need to match the neutron client method
|
# We can't use "floating_ip" because we need to match the neutron client method
|
||||||
def convert_floatingip_dict_to_model(floating_ip_dict):
|
def convert_floatingip_dict_to_model(floating_ip_dict):
|
||||||
floating_ip = floating_ip_dict.get('floatingip', floating_ip_dict)
|
floating_ip = floating_ip_dict.get('floatingip', floating_ip_dict)
|
||||||
return network_models.FloatingIP(
|
return network_models.FloatingIP(
|
||||||
id=floating_ip.get('id'),
|
id=floating_ip.get(constants.ID),
|
||||||
description=floating_ip.get('description'),
|
description=floating_ip.get(constants.DESCRIPTION),
|
||||||
project_id=floating_ip.get('project_id', floating_ip.get('tenant_id')),
|
project_id=floating_ip.get(constants.PROJECT_ID,
|
||||||
|
floating_ip.get(constants.TENANT_ID)),
|
||||||
status=floating_ip.get('status'),
|
status=floating_ip.get('status'),
|
||||||
router_id=floating_ip.get('router_id'),
|
router_id=floating_ip.get('router_id'),
|
||||||
port_id=floating_ip.get('port_id'),
|
port_id=floating_ip.get('port_id'),
|
||||||
@ -103,3 +107,18 @@ def convert_network_ip_availability_dict_to_model(
|
|||||||
ip_avail = network_models.Network_IP_Availability.from_dict(nw_ip_avail)
|
ip_avail = network_models.Network_IP_Availability.from_dict(nw_ip_avail)
|
||||||
ip_avail.subnet_ip_availability = nw_ip_avail.get('subnet_ip_availability')
|
ip_avail.subnet_ip_availability = nw_ip_avail.get('subnet_ip_availability')
|
||||||
return ip_avail
|
return ip_avail
|
||||||
|
|
||||||
|
|
||||||
|
def convert_security_group_dict_to_model(security_group_dict):
|
||||||
|
sg_rule_ids = [rule.get(constants.ID) for rule in
|
||||||
|
security_group_dict.get(constants.SECURITY_GROUP_RULES, [])]
|
||||||
|
return network_models.SecurityGroup(
|
||||||
|
id=security_group_dict.get(constants.ID),
|
||||||
|
project_id=security_group_dict.get(
|
||||||
|
constants.PROJECT_ID,
|
||||||
|
security_group_dict.get(constants.TENANT_ID)),
|
||||||
|
name=security_group_dict.get(constants.NAME),
|
||||||
|
description=security_group_dict.get(constants.DESCRIPTION),
|
||||||
|
security_group_rule_ids=sg_rule_ids,
|
||||||
|
tags=security_group_dict.get(constants.TAGS, []),
|
||||||
|
stateful=security_group_dict.get('stateful'))
|
||||||
|
@ -204,6 +204,12 @@ class NoopManager(object):
|
|||||||
network_id, device_id, 'get_port_by_net_id_device_id')
|
network_id, device_id, 'get_port_by_net_id_device_id')
|
||||||
return network_models.Port(id=uuidutils.generate_uuid())
|
return network_models.Port(id=uuidutils.generate_uuid())
|
||||||
|
|
||||||
|
def get_security_group(self, sg_name):
|
||||||
|
LOG.debug("Network %s no-op, get_security_group name %s",
|
||||||
|
self.__class__.__name__, sg_name)
|
||||||
|
self.networkconfigconfig[(sg_name)] = (sg_name, 'get_security_group')
|
||||||
|
return network_models.SecurityGroup(id=uuidutils.generate_uuid())
|
||||||
|
|
||||||
def failover_preparation(self, amphora):
|
def failover_preparation(self, amphora):
|
||||||
LOG.debug("failover %s no-op, failover_preparation, amphora id %s",
|
LOG.debug("failover %s no-op, failover_preparation, amphora id %s",
|
||||||
self.__class__.__name__, amphora.id)
|
self.__class__.__name__, amphora.id)
|
||||||
@ -282,6 +288,53 @@ class NoopManager(object):
|
|||||||
ip_avail.subnet_ip_availability = subnet_ip_availability
|
ip_avail.subnet_ip_availability = subnet_ip_availability
|
||||||
return ip_avail
|
return ip_avail
|
||||||
|
|
||||||
|
def delete_port(self, port_id):
|
||||||
|
LOG.debug("Network %s no-op, delete_port port_id %s",
|
||||||
|
self.__class__.__name__, port_id)
|
||||||
|
self.networkconfigconfig[port_id] = (port_id, 'delete_port')
|
||||||
|
|
||||||
|
def set_port_admin_state_up(self, port_id, state):
|
||||||
|
LOG.debug("Network %s no-op, set_port_admin_state_up port_id %s, "
|
||||||
|
"state %s", self.__class__.__name__, port_id, state)
|
||||||
|
self.networkconfigconfig[(port_id, state)] = (port_id, state,
|
||||||
|
'admin_down_port')
|
||||||
|
|
||||||
|
def create_port(self, network_id, name=None, fixed_ips=(),
|
||||||
|
secondary_ips=(), security_group_ids=(),
|
||||||
|
admin_state_up=True, qos_policy_id=None):
|
||||||
|
LOG.debug("Network %s no-op, create_port network_id %s",
|
||||||
|
self.__class__.__name__, network_id)
|
||||||
|
if not name:
|
||||||
|
name = 'no-op-port'
|
||||||
|
port_id = uuidutils.generate_uuid()
|
||||||
|
project_id = uuidutils.generate_uuid()
|
||||||
|
|
||||||
|
fixed_ip_obj_list = []
|
||||||
|
for fixed_ip in fixed_ips:
|
||||||
|
if fixed_ip and not fixed_ip.get('ip_address'):
|
||||||
|
fixed_ip_obj_list.append(
|
||||||
|
network_models.FixedIP(subnet_id=fixed_ip.get('subnet_id'),
|
||||||
|
ip_address='198.51.100.56'))
|
||||||
|
else:
|
||||||
|
fixed_ip_obj_list.append(
|
||||||
|
network_models.FixedIP(
|
||||||
|
subnet_id=fixed_ip.get('subnet_id'),
|
||||||
|
ip_address=fixed_ip.get('ip_address')))
|
||||||
|
if not fixed_ip_obj_list:
|
||||||
|
fixed_ip_obj_list = [network_models.FixedIP(
|
||||||
|
subnet_id=uuidutils.generate_uuid(),
|
||||||
|
ip_address='198.51.100.56')]
|
||||||
|
|
||||||
|
self.networkconfigconfig[(network_id, 'create_port')] = (
|
||||||
|
network_id, name, fixed_ip_obj_list, secondary_ips,
|
||||||
|
security_group_ids, admin_state_up, qos_policy_id)
|
||||||
|
return network_models.Port(
|
||||||
|
id=port_id, name=name, device_id='no-op-device-id',
|
||||||
|
device_owner='Octavia', mac_address='00:00:5E:00:53:05',
|
||||||
|
network_id=network_id, status='UP', project_id=project_id,
|
||||||
|
admin_state_up=admin_state_up, fixed_ips=fixed_ip_obj_list,
|
||||||
|
qos_policy_id=qos_policy_id, security_group_ids=security_group_ids)
|
||||||
|
|
||||||
|
|
||||||
class NoopNetworkDriver(driver_base.AbstractNetworkDriver):
|
class NoopNetworkDriver(driver_base.AbstractNetworkDriver):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
@ -337,6 +390,9 @@ class NoopNetworkDriver(driver_base.AbstractNetworkDriver):
|
|||||||
def get_port_by_net_id_device_id(self, network_id, device_id):
|
def get_port_by_net_id_device_id(self, network_id, device_id):
|
||||||
return self.driver.get_port_by_net_id_device_id(network_id, device_id)
|
return self.driver.get_port_by_net_id_device_id(network_id, device_id)
|
||||||
|
|
||||||
|
def get_security_group(self, sg_name):
|
||||||
|
return self.driver.get_security_group(sg_name)
|
||||||
|
|
||||||
def failover_preparation(self, amphora):
|
def failover_preparation(self, amphora):
|
||||||
self.driver.failover_preparation(amphora)
|
self.driver.failover_preparation(amphora)
|
||||||
|
|
||||||
@ -366,3 +422,16 @@ class NoopNetworkDriver(driver_base.AbstractNetworkDriver):
|
|||||||
|
|
||||||
def get_network_ip_availability(self, network):
|
def get_network_ip_availability(self, network):
|
||||||
return self.driver.get_network_ip_availability(network)
|
return self.driver.get_network_ip_availability(network)
|
||||||
|
|
||||||
|
def delete_port(self, port_id):
|
||||||
|
self.driver.delete_port(port_id)
|
||||||
|
|
||||||
|
def set_port_admin_state_up(self, port_id, state):
|
||||||
|
self.driver.set_port_admin_state_up(port_id, state)
|
||||||
|
|
||||||
|
def create_port(self, network_id, name=None, fixed_ips=(),
|
||||||
|
secondary_ips=(), security_group_ids=(),
|
||||||
|
admin_state_up=True, qos_policy_id=None):
|
||||||
|
return self.driver.create_port(
|
||||||
|
network_id, name, fixed_ips, secondary_ips, security_group_ids,
|
||||||
|
admin_state_up, qos_policy_id)
|
||||||
|
@ -28,6 +28,7 @@ def list_opts():
|
|||||||
itertools.chain(octavia.common.config.core_opts)),
|
itertools.chain(octavia.common.config.core_opts)),
|
||||||
('api_settings', octavia.common.config.api_opts),
|
('api_settings', octavia.common.config.api_opts),
|
||||||
('amphora_agent', octavia.common.config.amphora_agent_opts),
|
('amphora_agent', octavia.common.config.amphora_agent_opts),
|
||||||
|
('compute', octavia.common.config.compute_opts),
|
||||||
('networking', octavia.common.config.networking_opts),
|
('networking', octavia.common.config.networking_opts),
|
||||||
('oslo_messaging', octavia.common.config.oslo_messaging_opts),
|
('oslo_messaging', octavia.common.config.oslo_messaging_opts),
|
||||||
('haproxy_amphora', octavia.common.config.haproxy_amphora_opts),
|
('haproxy_amphora', octavia.common.config.haproxy_amphora_opts),
|
||||||
|
@ -67,6 +67,52 @@ MOCK_DEVICE_ID2 = 'Moctavia124'
|
|||||||
MOCK_SECURITY_GROUP_ID = 'security-group-1'
|
MOCK_SECURITY_GROUP_ID = 'security-group-1'
|
||||||
MOCK_SECURITY_GROUP_NAME = 'SecurityGroup1'
|
MOCK_SECURITY_GROUP_NAME = 'SecurityGroup1'
|
||||||
|
|
||||||
|
MOCK_SECURITY_GROUP = {
|
||||||
|
"id": MOCK_SECURITY_GROUP_ID,
|
||||||
|
"name": MOCK_SECURITY_GROUP_NAME,
|
||||||
|
"tenant_id": MOCK_PROJECT_ID,
|
||||||
|
"description": "",
|
||||||
|
"security_group_rules": [{
|
||||||
|
"id": "85f1c72b-cdd4-484f-a9c8-b3205f4e6f53",
|
||||||
|
"tenant_id": MOCK_PROJECT_ID,
|
||||||
|
"security_group_id": MOCK_SECURITY_GROUP_ID,
|
||||||
|
"ethertype": "IPv4",
|
||||||
|
"direction": "ingress",
|
||||||
|
"protocol": "tcp",
|
||||||
|
"port_range_min": 80,
|
||||||
|
"port_range_max": 80,
|
||||||
|
"remote_ip_prefix": None,
|
||||||
|
"remote_group_id": None,
|
||||||
|
"description": "",
|
||||||
|
"tags": [],
|
||||||
|
"created_at": "2020-03-12T20:44:48Z",
|
||||||
|
"updated_at": "2020-03-12T20:44:48Z",
|
||||||
|
"revision_number": 0,
|
||||||
|
"project_id": MOCK_PROJECT_ID
|
||||||
|
}, {
|
||||||
|
"id": "aa16ae5f-eac2-40b5-994b-5169a06228a4",
|
||||||
|
"tenant_id": MOCK_PROJECT_ID,
|
||||||
|
"security_group_id": "6530d536-3083-4d5c-a4a9-272ac7b8f3de",
|
||||||
|
"ethertype": "IPv4",
|
||||||
|
"direction": "egress",
|
||||||
|
"protocol": None,
|
||||||
|
"port_range_min": None,
|
||||||
|
"port_range_max": None,
|
||||||
|
"remote_ip_prefix": None,
|
||||||
|
"remote_group_id": None,
|
||||||
|
"description": None,
|
||||||
|
"tags": [],
|
||||||
|
"created_at": "2020-03-12T20:43:31Z",
|
||||||
|
"updated_at": "2020-03-12T20:43:31Z",
|
||||||
|
"revision_number": 0,
|
||||||
|
"project_id": MOCK_PROJECT_ID,
|
||||||
|
}],
|
||||||
|
"tags": [],
|
||||||
|
"created_at": "2020-03-12T20:43:31Z",
|
||||||
|
"updated_at": "2020-03-12T20:44:48Z",
|
||||||
|
"revision_number": 3,
|
||||||
|
"project_id": MOCK_PROJECT_ID}
|
||||||
|
|
||||||
MOCK_ADMIN_STATE_UP = True
|
MOCK_ADMIN_STATE_UP = True
|
||||||
MOCK_STATUS = 'ACTIVE'
|
MOCK_STATUS = 'ACTIVE'
|
||||||
MOCK_MTU = 1500
|
MOCK_MTU = 1500
|
||||||
|
@ -27,7 +27,8 @@ def generate_load_balancer_tree():
|
|||||||
LB_SEED = 0
|
LB_SEED = 0
|
||||||
|
|
||||||
|
|
||||||
def generate_load_balancer(vip=None, amphorae=None):
|
def generate_load_balancer(vip=None, amphorae=None,
|
||||||
|
topology=constants.TOPOLOGY_SINGLE):
|
||||||
amphorae = amphorae or []
|
amphorae = amphorae or []
|
||||||
global LB_SEED
|
global LB_SEED
|
||||||
LB_SEED += 1
|
LB_SEED += 1
|
||||||
@ -36,6 +37,7 @@ def generate_load_balancer(vip=None, amphorae=None):
|
|||||||
name='lb{0}'.format(LB_SEED),
|
name='lb{0}'.format(LB_SEED),
|
||||||
description='lb{0}'.format(LB_SEED),
|
description='lb{0}'.format(LB_SEED),
|
||||||
vip=vip,
|
vip=vip,
|
||||||
|
topology=topology,
|
||||||
amphorae=amphorae)
|
amphorae=amphorae)
|
||||||
for amp in lb.amphorae:
|
for amp in lb.amphorae:
|
||||||
amp.load_balancer = lb
|
amp.load_balancer = lb
|
||||||
|
@ -578,14 +578,16 @@ class SampleDriverDataModels(object):
|
|||||||
constants.NETWORK_ID: self.network_id,
|
constants.NETWORK_ID: self.network_id,
|
||||||
constants.PORT_ID: self.port_id,
|
constants.PORT_ID: self.port_id,
|
||||||
lib_consts.SUBNET_ID: self.subnet_id,
|
lib_consts.SUBNET_ID: self.subnet_id,
|
||||||
constants.QOS_POLICY_ID: self.qos_policy_id}
|
constants.QOS_POLICY_ID: self.qos_policy_id,
|
||||||
|
constants.OCTAVIA_OWNED: None}
|
||||||
|
|
||||||
self.provider_vip_dict = {
|
self.provider_vip_dict = {
|
||||||
lib_consts.VIP_ADDRESS: self.ip_address,
|
lib_consts.VIP_ADDRESS: self.ip_address,
|
||||||
lib_consts.VIP_NETWORK_ID: self.network_id,
|
lib_consts.VIP_NETWORK_ID: self.network_id,
|
||||||
lib_consts.VIP_PORT_ID: self.port_id,
|
lib_consts.VIP_PORT_ID: self.port_id,
|
||||||
lib_consts.VIP_SUBNET_ID: self.subnet_id,
|
lib_consts.VIP_SUBNET_ID: self.subnet_id,
|
||||||
lib_consts.VIP_QOS_POLICY_ID: self.qos_policy_id}
|
lib_consts.VIP_QOS_POLICY_ID: self.qos_policy_id,
|
||||||
|
constants.OCTAVIA_OWNED: None}
|
||||||
|
|
||||||
self.db_vip = data_models.Vip(
|
self.db_vip = data_models.Vip(
|
||||||
ip_address=self.ip_address,
|
ip_address=self.ip_address,
|
||||||
|
198
octavia/tests/common/sample_network_data.py
Normal file
198
octavia/tests/common/sample_network_data.py
Normal file
@ -0,0 +1,198 @@
|
|||||||
|
# Copyright 2020 Red Hat, Inc. All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
import collections
|
||||||
|
|
||||||
|
|
||||||
|
def create_iproute_ipv4_address(ip_address, broadcast_address, interface_name):
|
||||||
|
"""Returns a netlink/iproute (pyroute2) IPv4 address."""
|
||||||
|
Stats = collections.namedtuple('Stats', ('qsize', 'delta', 'delay'))
|
||||||
|
return (
|
||||||
|
{'family': 2, 'prefixlen': 24, 'flags': 0, 'scope': 0, 'index': 2,
|
||||||
|
'attrs': [('IFA_ADDRESS', ip_address), ('IFA_LOCAL', ip_address),
|
||||||
|
('IFA_BROADCAST', broadcast_address),
|
||||||
|
('IFA_LABEL', interface_name), ('IFA_FLAGS', 0),
|
||||||
|
('IFA_CACHEINFO', {'ifa_preferred': 49256,
|
||||||
|
'ifa_valid': 49256, 'cstamp': 1961,
|
||||||
|
'tstamp': 73441020})],
|
||||||
|
'header': {'length': 88, 'type': 20, 'flags': 2,
|
||||||
|
'sequence_number': 258, 'pid': 7590, 'error': None,
|
||||||
|
'stats': Stats(qsize=0, delta=0, delay=0)},
|
||||||
|
'event': 'RTM_NEWADDR'},)
|
||||||
|
|
||||||
|
|
||||||
|
def create_iproute_ipv6_address(ip_address, interface_name):
|
||||||
|
"""Returns a netlink/iproute (pyroute2) IPv6 address."""
|
||||||
|
Stats = collections.namedtuple('Stats', ('qsize', 'delta', 'delay'))
|
||||||
|
return (
|
||||||
|
{'family': 10, 'prefixlen': 64, 'flags': 0, 'scope': 0, 'index': 2,
|
||||||
|
'attrs': [('IFA_CACHEINFO', {'ifa_preferred': 604503,
|
||||||
|
'ifa_valid': 2591703, 'cstamp': 2038,
|
||||||
|
'tstamp': 77073215}),
|
||||||
|
('IFA_ADDRESS', '2001:db8:ffff:ffff:ffff:ffff:ffff:ffff'),
|
||||||
|
('IFA_FLAGS', 768)],
|
||||||
|
'header': {'length': 72, 'type': 20, 'flags': 2,
|
||||||
|
'sequence_number': 257, 'pid': 7590, 'error': None,
|
||||||
|
'stats': Stats(qsize=0, delta=0, delay=0)},
|
||||||
|
'event': 'RTM_NEWADDR'},
|
||||||
|
{'family': 10, 'prefixlen': 64, 'flags': 0, 'scope': 0, 'index': 2,
|
||||||
|
'attrs': [('IFA_CACHEINFO', {'ifa_preferred': 604503,
|
||||||
|
'ifa_valid': 2591703, 'cstamp': 2038,
|
||||||
|
'tstamp': 77073215}),
|
||||||
|
('IFA_ADDRESS', ip_address), ('IFA_FLAGS', 768)],
|
||||||
|
'header': {'length': 72, 'type': 20, 'flags': 2,
|
||||||
|
'sequence_number': 257, 'pid': 7590, 'error': None,
|
||||||
|
'stats': Stats(qsize=0, delta=0, delay=0)},
|
||||||
|
'event': 'RTM_NEWADDR'},)
|
||||||
|
|
||||||
|
|
||||||
|
def create_iproute_interface(interface_name):
|
||||||
|
"""Returns a netlink/iproute (pyroute2) interface."""
|
||||||
|
Stats = collections.namedtuple('Stats', ('qsize', 'delta', 'delay'))
|
||||||
|
return [{
|
||||||
|
'family': 0, '__align': (), 'ifi_type': 1, 'index': 2, 'flags': 69699,
|
||||||
|
'change': 0,
|
||||||
|
'attrs': [('IFLA_TXQLEN', 1000), ('IFLA_IFNAME', interface_name),
|
||||||
|
('IFLA_OPERSTATE', 'UP'), ('IFLA_LINKMODE', 0),
|
||||||
|
('IFLA_MTU', 1500), ('IFLA_GROUP', 0),
|
||||||
|
('IFLA_PROMISCUITY', 0), ('IFLA_NUM_TX_QUEUES', 1),
|
||||||
|
('IFLA_GSO_MAX_SEGS', 65535),
|
||||||
|
('IFLA_GSO_MAX_SIZE', 65536), ('IFLA_NUM_RX_QUEUES', 1),
|
||||||
|
('IFLA_CARRIER', 1), ('IFLA_QDISC', 'fq_codel'),
|
||||||
|
('IFLA_CARRIER_CHANGES', 2), ('IFLA_PROTO_DOWN', 0),
|
||||||
|
('IFLA_CARRIER_UP_COUNT', 1),
|
||||||
|
('IFLA_CARRIER_DOWN_COUNT', 1),
|
||||||
|
('IFLA_MAP', {'mem_start': 0, 'mem_end': 0, 'base_addr': 0,
|
||||||
|
'irq': 0, 'dma': 0, 'port': 0}),
|
||||||
|
('IFLA_ADDRESS', '52:54:00:cf:37:9e'),
|
||||||
|
('IFLA_BROADCAST', 'ff:ff:ff:ff:ff:ff'),
|
||||||
|
('IFLA_STATS64', {
|
||||||
|
'rx_packets': 756091, 'tx_packets': 780292,
|
||||||
|
'rx_bytes': 234846748, 'tx_bytes': 208583687,
|
||||||
|
'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0,
|
||||||
|
'tx_dropped': 0, 'multicast': 0, 'collisions': 0,
|
||||||
|
'rx_length_errors': 0, 'rx_over_errors': 0,
|
||||||
|
'rx_crc_errors': 0, 'rx_frame_errors': 0,
|
||||||
|
'rx_fifo_errors': 0, 'rx_missed_errors': 0,
|
||||||
|
'tx_aborted_errors': 0, 'tx_carrier_errors': 0,
|
||||||
|
'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0,
|
||||||
|
'tx_window_errors': 0, 'rx_compressed': 0,
|
||||||
|
'tx_compressed': 0}),
|
||||||
|
('IFLA_STATS', {
|
||||||
|
'rx_packets': 756091, 'tx_packets': 780292,
|
||||||
|
'rx_bytes': 234846748, 'tx_bytes': 208583687,
|
||||||
|
'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0,
|
||||||
|
'tx_dropped': 0, 'multicast': 0, 'collisions': 0,
|
||||||
|
'rx_length_errors': 0, 'rx_over_errors': 0,
|
||||||
|
'rx_crc_errors': 0, 'rx_frame_errors': 0,
|
||||||
|
'rx_fifo_errors': 0, 'rx_missed_errors': 0,
|
||||||
|
'tx_aborted_errors': 0, 'tx_carrier_errors': 0,
|
||||||
|
'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0,
|
||||||
|
'tx_window_errors': 0, 'rx_compressed': 0,
|
||||||
|
'tx_compressed': 0}),
|
||||||
|
('IFLA_XDP', '05:00:02:00:00:00:00:00'),
|
||||||
|
('IFLA_AF_SPEC', {
|
||||||
|
'attrs': [
|
||||||
|
('AF_INET', {
|
||||||
|
'dummy': 65664, 'forwarding': 1,
|
||||||
|
'mc_forwarding': 0, 'proxy_arp': 0,
|
||||||
|
'accept_redirects': 1,
|
||||||
|
'secure_redirects': 1,
|
||||||
|
'send_redirects': 1, 'shared_media': 1,
|
||||||
|
'rp_filter': 1, 'accept_source_route': 1,
|
||||||
|
'bootp_relay': 0, 'log_martians': 0,
|
||||||
|
'tag': 0, 'arpfilter': 0, 'medium_id': 0,
|
||||||
|
'noxfrm': 0, 'nopolicy': 0,
|
||||||
|
'force_igmp_version': 0, 'arp_announce': 0,
|
||||||
|
'arp_ignore': 0, 'promote_secondaries': 0,
|
||||||
|
'arp_accept': 0, 'arp_notify': 0,
|
||||||
|
'accept_local': 0, 'src_vmark': 0,
|
||||||
|
'proxy_arp_pvlan': 0, 'route_localnet': 0,
|
||||||
|
'igmpv2_unsolicited_report_interval': 10000,
|
||||||
|
'igmpv3_unsolicited_report_interval': 1000}),
|
||||||
|
('AF_INET6', {
|
||||||
|
'attrs': [('IFLA_INET6_FLAGS', 2147483648),
|
||||||
|
('IFLA_INET6_CACHEINFO', {
|
||||||
|
'max_reasm_len': 65535,
|
||||||
|
'tstamp': 1859,
|
||||||
|
'reachable_time': 30708,
|
||||||
|
'retrans_time': 1000}),
|
||||||
|
('IFLA_INET6_CONF', {
|
||||||
|
'forwarding': 1, 'hop_limit': 64,
|
||||||
|
'mtu': 1500, 'accept_ra': 2,
|
||||||
|
'accept_redirects': 1,
|
||||||
|
'autoconf': 1,
|
||||||
|
'dad_transmits': 1,
|
||||||
|
'router_solicitations': 4294967295,
|
||||||
|
'router_solicitation_interval':
|
||||||
|
4000,
|
||||||
|
'router_solicitation_delay': 1000,
|
||||||
|
'use_tempaddr': 0,
|
||||||
|
'temp_valid_lft': 604800,
|
||||||
|
'temp_preferred_lft': 86400,
|
||||||
|
'regen_max_retry': 3,
|
||||||
|
'max_desync_factor': 600,
|
||||||
|
'max_addresses': 16,
|
||||||
|
'force_mld_version': 0,
|
||||||
|
'accept_ra_defrtr': 1,
|
||||||
|
'accept_ra_pinfo': 1,
|
||||||
|
'accept_ra_rtr_pref': 1,
|
||||||
|
'router_probe_interval': 60000,
|
||||||
|
'accept_ra_rt_info_max_plen': 0,
|
||||||
|
'proxy_ndp': 0,
|
||||||
|
'optimistic_dad': 0,
|
||||||
|
'accept_source_route': 0,
|
||||||
|
'mc_forwarding': 0,
|
||||||
|
'disable_ipv6': 0,
|
||||||
|
'accept_dad': 1, 'force_tllao': 0,
|
||||||
|
'ndisc_notify': 0}),
|
||||||
|
('IFLA_INET6_STATS', {
|
||||||
|
'num': 37, 'inpkts': 57817,
|
||||||
|
'inoctets': 144065857,
|
||||||
|
'indelivers': 36758,
|
||||||
|
'outforwdatagrams': 0,
|
||||||
|
'outpkts': 35062,
|
||||||
|
'outoctets': 4796485,
|
||||||
|
'inhdrerrors': 0,
|
||||||
|
'intoobigerrors': 0,
|
||||||
|
'innoroutes': 0, 'inaddrerrors': 0,
|
||||||
|
'inunknownprotos': 0,
|
||||||
|
'intruncatedpkts': 0,
|
||||||
|
'indiscards': 0,
|
||||||
|
'outdiscards': 0, 'outnoroutes': 0,
|
||||||
|
'reasmtimeout': 0, 'reasmreqds': 0,
|
||||||
|
'reasmoks': 0, 'reasmfails': 0,
|
||||||
|
'fragoks': 0, 'fragfails': 0,
|
||||||
|
'fragcreates': 0,
|
||||||
|
'inmcastpkts': 23214,
|
||||||
|
'outmcastpkts': 6546,
|
||||||
|
'inbcastpkts': 0,
|
||||||
|
'outbcastpkts': 0,
|
||||||
|
'inmcastoctets': 2255059,
|
||||||
|
'outmcastoctets': 589090,
|
||||||
|
'inbcastoctets': 0,
|
||||||
|
'outbcastoctets': 0,
|
||||||
|
'csumerrors': 0,
|
||||||
|
'noectpkts': 57860,
|
||||||
|
'ect1pkts': 0, 'ect0pkts': 0,
|
||||||
|
'cepkts': 0}),
|
||||||
|
('IFLA_INET6_ICMP6STATS', {
|
||||||
|
'num': 6, 'inmsgs': 2337,
|
||||||
|
'inerrors': 0, 'outmsgs': 176,
|
||||||
|
'outerrors': 0, 'csumerrors': 0}),
|
||||||
|
('IFLA_INET6_TOKEN', '::'),
|
||||||
|
('IFLA_INET6_ADDR_GEN_MODE', 0)]})]})],
|
||||||
|
'header': {'length': 1304, 'type': 16, 'flags': 0,
|
||||||
|
'sequence_number': 261, 'pid': 7590, 'error': None,
|
||||||
|
'stats': Stats(qsize=0, delta=0, delay=0)},
|
||||||
|
'state': 'up', 'event': 'RTM_NEWLINK'}]
|
@ -19,6 +19,8 @@ from unittest import mock
|
|||||||
|
|
||||||
import flask
|
import flask
|
||||||
|
|
||||||
|
from oslo_config import cfg
|
||||||
|
from oslo_config import fixture as oslo_fixture
|
||||||
from oslo_utils import uuidutils
|
from oslo_utils import uuidutils
|
||||||
|
|
||||||
from octavia.amphorae.backends.agent.api_server import keepalivedlvs
|
from octavia.amphorae.backends.agent.api_server import keepalivedlvs
|
||||||
@ -186,10 +188,14 @@ class KeepalivedLvsTestCase(base.TestCase):
|
|||||||
self, m_check_output, m_os_rm, m_os_mkdir, m_exists, m_os_chmod,
|
self, m_check_output, m_os_rm, m_os_mkdir, m_exists, m_os_chmod,
|
||||||
m_os_sysinit, m_copy2, mock_netns, mock_install_netns,
|
m_os_sysinit, m_copy2, mock_netns, mock_install_netns,
|
||||||
mock_systemctl):
|
mock_systemctl):
|
||||||
m_exists.side_effect = [False, False, True, True, True, False, False]
|
m_exists.side_effect = [False, False, True, True, False, False, False]
|
||||||
cfg_path = util.keepalived_lvs_cfg_path(self.FAKE_ID)
|
cfg_path = util.keepalived_lvs_cfg_path(self.FAKE_ID)
|
||||||
m = self.useFixture(test_utils.OpenFixture(cfg_path)).mock_open
|
m = self.useFixture(test_utils.OpenFixture(cfg_path)).mock_open
|
||||||
|
|
||||||
|
conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
|
||||||
|
conf.config(group='controller_worker',
|
||||||
|
loadbalancer_topology=consts.TOPOLOGY_ACTIVE_STANDBY)
|
||||||
|
|
||||||
with mock.patch('os.open') as m_open, mock.patch.object(os,
|
with mock.patch('os.open') as m_open, mock.patch.object(os,
|
||||||
'fdopen',
|
'fdopen',
|
||||||
m) as m_fdopen:
|
m) as m_fdopen:
|
||||||
@ -248,10 +254,10 @@ class KeepalivedLvsTestCase(base.TestCase):
|
|||||||
def test_upload_udp_listener_config_start_service_failure(
|
def test_upload_udp_listener_config_start_service_failure(
|
||||||
self, m_check_output, m_os_rm, m_os_mkdir, m_exists, m_os_chmod,
|
self, m_check_output, m_os_rm, m_os_mkdir, m_exists, m_os_chmod,
|
||||||
m_os_sysinit, m_copy2, mock_install_netns, mock_systemctl):
|
m_os_sysinit, m_copy2, mock_install_netns, mock_systemctl):
|
||||||
m_exists.side_effect = [False, False, True, True, True, False]
|
m_exists.side_effect = [False, False, True, True, False]
|
||||||
m_check_output.side_effect = subprocess.CalledProcessError(1, 'blah!')
|
|
||||||
cfg_path = util.keepalived_lvs_cfg_path(self.FAKE_ID)
|
cfg_path = util.keepalived_lvs_cfg_path(self.FAKE_ID)
|
||||||
m = self.useFixture(test_utils.OpenFixture(cfg_path)).mock_open
|
m = self.useFixture(test_utils.OpenFixture(cfg_path)).mock_open
|
||||||
|
mock_systemctl.side_effect = [mock.DEFAULT, Exception('boom')]
|
||||||
|
|
||||||
with mock.patch('os.open') as m_open, mock.patch.object(os,
|
with mock.patch('os.open') as m_open, mock.patch.object(os,
|
||||||
'fdopen',
|
'fdopen',
|
||||||
|
@ -270,8 +270,8 @@ class TestServerTestCase(base.TestCase):
|
|||||||
|
|
||||||
@mock.patch('os.listdir')
|
@mock.patch('os.listdir')
|
||||||
@mock.patch('os.path.exists')
|
@mock.patch('os.path.exists')
|
||||||
@mock.patch('octavia.amphorae.backends.agent.api_server.loadbalancer.'
|
@mock.patch('octavia.amphorae.backends.agent.api_server.util.'
|
||||||
'Loadbalancer.vrrp_check_script_update')
|
'vrrp_check_script_update')
|
||||||
@mock.patch('subprocess.check_output')
|
@mock.patch('subprocess.check_output')
|
||||||
def _test_start(self, distro, mock_subprocess, mock_vrrp, mock_exists,
|
def _test_start(self, distro, mock_subprocess, mock_vrrp, mock_exists,
|
||||||
mock_listdir):
|
mock_listdir):
|
||||||
@ -346,8 +346,8 @@ class TestServerTestCase(base.TestCase):
|
|||||||
|
|
||||||
@mock.patch('os.listdir')
|
@mock.patch('os.listdir')
|
||||||
@mock.patch('os.path.exists')
|
@mock.patch('os.path.exists')
|
||||||
@mock.patch('octavia.amphorae.backends.agent.api_server.loadbalancer.'
|
@mock.patch('octavia.amphorae.backends.agent.api_server.util.'
|
||||||
'Loadbalancer.vrrp_check_script_update')
|
'vrrp_check_script_update')
|
||||||
@mock.patch('octavia.amphorae.backends.agent.api_server.loadbalancer.'
|
@mock.patch('octavia.amphorae.backends.agent.api_server.loadbalancer.'
|
||||||
'Loadbalancer._check_haproxy_status')
|
'Loadbalancer._check_haproxy_status')
|
||||||
@mock.patch('subprocess.check_output')
|
@mock.patch('subprocess.check_output')
|
||||||
@ -460,8 +460,8 @@ class TestServerTestCase(base.TestCase):
|
|||||||
@mock.patch('os.listdir')
|
@mock.patch('os.listdir')
|
||||||
@mock.patch('os.path.exists')
|
@mock.patch('os.path.exists')
|
||||||
@mock.patch('subprocess.check_output')
|
@mock.patch('subprocess.check_output')
|
||||||
@mock.patch('octavia.amphorae.backends.agent.api_server.loadbalancer.'
|
@mock.patch('octavia.amphorae.backends.agent.api_server.util.'
|
||||||
'Loadbalancer.vrrp_check_script_update')
|
'vrrp_check_script_update')
|
||||||
@mock.patch('octavia.amphorae.backends.agent.api_server.util.' +
|
@mock.patch('octavia.amphorae.backends.agent.api_server.util.' +
|
||||||
'get_haproxy_pid')
|
'get_haproxy_pid')
|
||||||
@mock.patch('shutil.rmtree')
|
@mock.patch('shutil.rmtree')
|
||||||
@ -2322,6 +2322,8 @@ class TestServerTestCase(base.TestCase):
|
|||||||
self._test_upload_keepalived_config(consts.INIT_SYSVINIT,
|
self._test_upload_keepalived_config(consts.INIT_SYSVINIT,
|
||||||
consts.UBUNTU, mock_init_system)
|
consts.UBUNTU, mock_init_system)
|
||||||
|
|
||||||
|
@mock.patch('octavia.amphorae.backends.agent.api_server.util.'
|
||||||
|
'vrrp_check_script_update')
|
||||||
@mock.patch('os.path.exists')
|
@mock.patch('os.path.exists')
|
||||||
@mock.patch('os.makedirs')
|
@mock.patch('os.makedirs')
|
||||||
@mock.patch('os.rename')
|
@mock.patch('os.rename')
|
||||||
@ -2330,7 +2332,8 @@ class TestServerTestCase(base.TestCase):
|
|||||||
def _test_upload_keepalived_config(self, init_system, distro,
|
def _test_upload_keepalived_config(self, init_system, distro,
|
||||||
mock_init_system, mock_remove,
|
mock_init_system, mock_remove,
|
||||||
mock_subprocess, mock_rename,
|
mock_subprocess, mock_rename,
|
||||||
mock_makedirs, mock_exists):
|
mock_makedirs, mock_exists,
|
||||||
|
mock_vrrp_check):
|
||||||
|
|
||||||
self.assertIn(distro, [consts.UBUNTU, consts.CENTOS])
|
self.assertIn(distro, [consts.UBUNTU, consts.CENTOS])
|
||||||
flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
|
flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
|
||||||
@ -2353,8 +2356,11 @@ class TestServerTestCase(base.TestCase):
|
|||||||
mock_open.assert_called_with(cfg_path, flags, mode)
|
mock_open.assert_called_with(cfg_path, flags, mode)
|
||||||
mock_fdopen.assert_called_with(123, 'wb')
|
mock_fdopen.assert_called_with(123, 'wb')
|
||||||
self.assertEqual(200, rv.status_code)
|
self.assertEqual(200, rv.status_code)
|
||||||
|
mock_vrrp_check.assert_called_once_with(None,
|
||||||
|
consts.AMP_ACTION_START)
|
||||||
|
|
||||||
mock_exists.return_value = False
|
mock_exists.return_value = False
|
||||||
|
mock_vrrp_check.reset_mock()
|
||||||
script_path = util.keepalived_check_script_path()
|
script_path = util.keepalived_check_script_path()
|
||||||
m = self.useFixture(test_utils.OpenFixture(script_path)).mock_open
|
m = self.useFixture(test_utils.OpenFixture(script_path)).mock_open
|
||||||
|
|
||||||
@ -2372,6 +2378,8 @@ class TestServerTestCase(base.TestCase):
|
|||||||
mock_open.assert_called_with(script_path, flags, mode)
|
mock_open.assert_called_with(script_path, flags, mode)
|
||||||
mock_fdopen.assert_called_with(123, 'w')
|
mock_fdopen.assert_called_with(123, 'w')
|
||||||
self.assertEqual(200, rv.status_code)
|
self.assertEqual(200, rv.status_code)
|
||||||
|
mock_vrrp_check.assert_called_once_with(None,
|
||||||
|
consts.AMP_ACTION_START)
|
||||||
|
|
||||||
def test_ubuntu_manage_service_vrrp(self):
|
def test_ubuntu_manage_service_vrrp(self):
|
||||||
self._test_manage_service_vrrp(consts.UBUNTU)
|
self._test_manage_service_vrrp(consts.UBUNTU)
|
||||||
|
@ -14,6 +14,8 @@
|
|||||||
import subprocess
|
import subprocess
|
||||||
from unittest import mock
|
from unittest import mock
|
||||||
|
|
||||||
|
from oslo_config import cfg
|
||||||
|
from oslo_config import fixture as oslo_fixture
|
||||||
from oslo_utils import uuidutils
|
from oslo_utils import uuidutils
|
||||||
|
|
||||||
from octavia.amphorae.backends.agent.api_server import loadbalancer
|
from octavia.amphorae.backends.agent.api_server import loadbalancer
|
||||||
@ -22,6 +24,7 @@ from octavia.common import constants as consts
|
|||||||
from octavia.tests.common import utils as test_utils
|
from octavia.tests.common import utils as test_utils
|
||||||
import octavia.tests.unit.base as base
|
import octavia.tests.unit.base as base
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
LISTENER_ID1 = uuidutils.generate_uuid()
|
LISTENER_ID1 = uuidutils.generate_uuid()
|
||||||
LB_ID1 = uuidutils.generate_uuid()
|
LB_ID1 = uuidutils.generate_uuid()
|
||||||
|
|
||||||
@ -33,39 +36,6 @@ class ListenerTestCase(base.TestCase):
|
|||||||
self.mock_platform.return_value = "ubuntu"
|
self.mock_platform.return_value = "ubuntu"
|
||||||
self.test_loadbalancer = loadbalancer.Loadbalancer()
|
self.test_loadbalancer = loadbalancer.Loadbalancer()
|
||||||
|
|
||||||
@mock.patch('os.makedirs')
|
|
||||||
@mock.patch('os.path.exists')
|
|
||||||
@mock.patch('os.listdir')
|
|
||||||
@mock.patch('os.path.join')
|
|
||||||
@mock.patch('octavia.amphorae.backends.agent.api_server.util.'
|
|
||||||
'get_loadbalancers')
|
|
||||||
@mock.patch('octavia.amphorae.backends.agent.api_server.util'
|
|
||||||
'.haproxy_sock_path')
|
|
||||||
def test_vrrp_check_script_update(self, mock_sock_path, mock_get_lbs,
|
|
||||||
mock_join, mock_listdir, mock_exists,
|
|
||||||
mock_makedirs):
|
|
||||||
mock_get_lbs.return_value = ['abc', LB_ID1]
|
|
||||||
mock_sock_path.return_value = 'listener.sock'
|
|
||||||
mock_exists.return_value = False
|
|
||||||
cmd = 'haproxy-vrrp-check ' + ' '.join(['listener.sock']) + '; exit $?'
|
|
||||||
|
|
||||||
path = agent_util.keepalived_dir()
|
|
||||||
m = self.useFixture(test_utils.OpenFixture(path)).mock_open
|
|
||||||
|
|
||||||
self.test_loadbalancer.vrrp_check_script_update(LB_ID1, 'stop')
|
|
||||||
handle = m()
|
|
||||||
handle.write.assert_called_once_with(cmd)
|
|
||||||
|
|
||||||
mock_get_lbs.return_value = ['abc', LB_ID1]
|
|
||||||
cmd = ('haproxy-vrrp-check ' + ' '.join(['listener.sock',
|
|
||||||
'listener.sock']) + '; exit '
|
|
||||||
'$?')
|
|
||||||
|
|
||||||
m = self.useFixture(test_utils.OpenFixture(path)).mock_open
|
|
||||||
self.test_loadbalancer.vrrp_check_script_update(LB_ID1, 'start')
|
|
||||||
handle = m()
|
|
||||||
handle.write.assert_called_once_with(cmd)
|
|
||||||
|
|
||||||
@mock.patch('os.path.exists')
|
@mock.patch('os.path.exists')
|
||||||
@mock.patch('octavia.amphorae.backends.agent.api_server' +
|
@mock.patch('octavia.amphorae.backends.agent.api_server' +
|
||||||
'.util.get_haproxy_pid')
|
'.util.get_haproxy_pid')
|
||||||
@ -88,8 +58,8 @@ class ListenerTestCase(base.TestCase):
|
|||||||
|
|
||||||
@mock.patch('octavia.amphorae.backends.agent.api_server.loadbalancer.'
|
@mock.patch('octavia.amphorae.backends.agent.api_server.loadbalancer.'
|
||||||
'Loadbalancer._check_haproxy_status')
|
'Loadbalancer._check_haproxy_status')
|
||||||
@mock.patch('octavia.amphorae.backends.agent.api_server.loadbalancer.'
|
@mock.patch('octavia.amphorae.backends.agent.api_server.util.'
|
||||||
'Loadbalancer.vrrp_check_script_update')
|
'vrrp_check_script_update')
|
||||||
@mock.patch('os.path.exists')
|
@mock.patch('os.path.exists')
|
||||||
@mock.patch('octavia.amphorae.backends.agent.api_server.loadbalancer.'
|
@mock.patch('octavia.amphorae.backends.agent.api_server.loadbalancer.'
|
||||||
'Loadbalancer._check_lb_exists')
|
'Loadbalancer._check_lb_exists')
|
||||||
@ -99,6 +69,8 @@ class ListenerTestCase(base.TestCase):
|
|||||||
mock_check_status):
|
mock_check_status):
|
||||||
listener_id = uuidutils.generate_uuid()
|
listener_id = uuidutils.generate_uuid()
|
||||||
|
|
||||||
|
conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
|
||||||
|
|
||||||
mock_path_exists.side_effect = [False, True, True, False, False]
|
mock_path_exists.side_effect = [False, True, True, False, False]
|
||||||
mock_check_status.side_effect = ['bogus', consts.OFFLINE]
|
mock_check_status.side_effect = ['bogus', consts.OFFLINE]
|
||||||
|
|
||||||
@ -121,6 +93,9 @@ class ListenerTestCase(base.TestCase):
|
|||||||
self.assertEqual(ref_details, result.json['details'])
|
self.assertEqual(ref_details, result.json['details'])
|
||||||
|
|
||||||
# Happy path - VRRP - RELOAD
|
# Happy path - VRRP - RELOAD
|
||||||
|
conf.config(group="controller_worker",
|
||||||
|
loadbalancer_topology=consts.TOPOLOGY_ACTIVE_STANDBY)
|
||||||
|
|
||||||
mock_lb_exists.reset_mock()
|
mock_lb_exists.reset_mock()
|
||||||
mock_vrrp_update.reset_mock()
|
mock_vrrp_update.reset_mock()
|
||||||
mock_check_output.reset_mock()
|
mock_check_output.reset_mock()
|
||||||
@ -167,6 +142,9 @@ class ListenerTestCase(base.TestCase):
|
|||||||
self.assertEqual(ref_details, result.json['details'])
|
self.assertEqual(ref_details, result.json['details'])
|
||||||
|
|
||||||
# Unhappy path - Not already running
|
# Unhappy path - Not already running
|
||||||
|
conf.config(group="controller_worker",
|
||||||
|
loadbalancer_topology=consts.TOPOLOGY_SINGLE)
|
||||||
|
|
||||||
mock_lb_exists.reset_mock()
|
mock_lb_exists.reset_mock()
|
||||||
mock_vrrp_update.reset_mock()
|
mock_vrrp_update.reset_mock()
|
||||||
mock_check_output.reset_mock()
|
mock_check_output.reset_mock()
|
||||||
|
@ -30,6 +30,7 @@ BASE_AMP_PATH = '/var/lib/octavia'
|
|||||||
BASE_CRT_PATH = BASE_AMP_PATH + '/certs'
|
BASE_CRT_PATH = BASE_AMP_PATH + '/certs'
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
LISTENER_ID1 = uuidutils.generate_uuid()
|
LISTENER_ID1 = uuidutils.generate_uuid()
|
||||||
|
LB_ID1 = uuidutils.generate_uuid()
|
||||||
|
|
||||||
|
|
||||||
class TestUtil(base.TestCase):
|
class TestUtil(base.TestCase):
|
||||||
@ -278,3 +279,130 @@ class TestUtil(base.TestCase):
|
|||||||
self.useFixture(test_utils.OpenFixture(path, fake_cfg))
|
self.useFixture(test_utils.OpenFixture(path, fake_cfg))
|
||||||
self.assertRaises(util.ParsingError, util.parse_haproxy_file,
|
self.assertRaises(util.ParsingError, util.parse_haproxy_file,
|
||||||
LISTENER_ID1)
|
LISTENER_ID1)
|
||||||
|
|
||||||
|
@mock.patch('octavia.amphorae.backends.agent.api_server.util.'
|
||||||
|
'get_udp_listeners')
|
||||||
|
@mock.patch('os.makedirs')
|
||||||
|
@mock.patch('os.path.exists')
|
||||||
|
@mock.patch('os.listdir')
|
||||||
|
@mock.patch('os.path.join')
|
||||||
|
@mock.patch('octavia.amphorae.backends.agent.api_server.util.'
|
||||||
|
'get_loadbalancers')
|
||||||
|
@mock.patch('octavia.amphorae.backends.agent.api_server.util'
|
||||||
|
'.haproxy_sock_path')
|
||||||
|
def test_vrrp_check_script_update(self, mock_sock_path, mock_get_lbs,
|
||||||
|
mock_join, mock_listdir, mock_exists,
|
||||||
|
mock_makedirs, mock_get_listeners):
|
||||||
|
mock_get_lbs.return_value = ['abc', LB_ID1]
|
||||||
|
mock_sock_path.return_value = 'listener.sock'
|
||||||
|
mock_exists.side_effect = [False, False, True]
|
||||||
|
mock_get_lbs.side_effect = [['abc', LB_ID1], ['abc', LB_ID1], []]
|
||||||
|
mock_get_listeners.return_value = []
|
||||||
|
|
||||||
|
# Test the stop action path
|
||||||
|
cmd = 'haproxy-vrrp-check ' + ' '.join(['listener.sock']) + '; exit $?'
|
||||||
|
path = util.keepalived_dir()
|
||||||
|
m = self.useFixture(test_utils.OpenFixture(path)).mock_open
|
||||||
|
|
||||||
|
util.vrrp_check_script_update(LB_ID1, 'stop')
|
||||||
|
|
||||||
|
handle = m()
|
||||||
|
handle.write.assert_called_once_with(cmd)
|
||||||
|
|
||||||
|
# Test the start action path
|
||||||
|
cmd = ('haproxy-vrrp-check ' + ' '.join(['listener.sock',
|
||||||
|
'listener.sock']) + '; exit '
|
||||||
|
'$?')
|
||||||
|
m = self.useFixture(test_utils.OpenFixture(path)).mock_open
|
||||||
|
util.vrrp_check_script_update(LB_ID1, 'start')
|
||||||
|
handle = m()
|
||||||
|
handle.write.assert_called_once_with(cmd)
|
||||||
|
|
||||||
|
# Test the path with existing keepalived directory and no LBs
|
||||||
|
mock_makedirs.reset_mock()
|
||||||
|
cmd = 'exit 1'
|
||||||
|
m = self.useFixture(test_utils.OpenFixture(path)).mock_open
|
||||||
|
|
||||||
|
util.vrrp_check_script_update(LB_ID1, 'start')
|
||||||
|
|
||||||
|
handle = m()
|
||||||
|
handle.write.assert_called_once_with(cmd)
|
||||||
|
mock_makedirs.assert_has_calls(
|
||||||
|
[mock.call(util.keepalived_dir(), exist_ok=True),
|
||||||
|
mock.call(util.keepalived_check_scripts_dir(), exist_ok=True)])
|
||||||
|
|
||||||
|
@mock.patch('octavia.amphorae.backends.agent.api_server.util.config_path')
|
||||||
|
def test_get_haproxy_vip_addresses(self, mock_cfg_path):
|
||||||
|
FAKE_PATH = 'fake_path'
|
||||||
|
mock_cfg_path.return_value = FAKE_PATH
|
||||||
|
self.useFixture(
|
||||||
|
test_utils.OpenFixture(FAKE_PATH, 'no match')).mock_open()
|
||||||
|
|
||||||
|
# Test with no matching lines in the config file
|
||||||
|
self.assertEqual([], util.get_haproxy_vip_addresses(LB_ID1))
|
||||||
|
mock_cfg_path.assert_called_once_with(LB_ID1)
|
||||||
|
|
||||||
|
# Test with a matching bind line
|
||||||
|
mock_cfg_path.reset_mock()
|
||||||
|
test_data = 'no match\nbind 203.0.113.43:1\nbogus line'
|
||||||
|
self.useFixture(
|
||||||
|
test_utils.OpenFixture(FAKE_PATH, test_data)).mock_open()
|
||||||
|
expected_result = ['203.0.113.43']
|
||||||
|
self.assertEqual(expected_result,
|
||||||
|
util.get_haproxy_vip_addresses(LB_ID1))
|
||||||
|
mock_cfg_path.assert_called_once_with(LB_ID1)
|
||||||
|
|
||||||
|
# Test with a matching bind line multiple binds
|
||||||
|
mock_cfg_path.reset_mock()
|
||||||
|
test_data = 'no match\nbind 203.0.113.44:1234, 203.0.113.45:4321'
|
||||||
|
self.useFixture(
|
||||||
|
test_utils.OpenFixture(FAKE_PATH, test_data)).mock_open()
|
||||||
|
expected_result = ['203.0.113.44', '203.0.113.45']
|
||||||
|
self.assertEqual(expected_result,
|
||||||
|
util.get_haproxy_vip_addresses(LB_ID1))
|
||||||
|
mock_cfg_path.assert_called_once_with(LB_ID1)
|
||||||
|
|
||||||
|
# Test with a bogus bind line
|
||||||
|
mock_cfg_path.reset_mock()
|
||||||
|
test_data = 'no match\nbind\nbogus line'
|
||||||
|
self.useFixture(
|
||||||
|
test_utils.OpenFixture(FAKE_PATH, test_data)).mock_open()
|
||||||
|
self.assertEqual([], util.get_haproxy_vip_addresses(LB_ID1))
|
||||||
|
mock_cfg_path.assert_called_once_with(LB_ID1)
|
||||||
|
|
||||||
|
@mock.patch('octavia.amphorae.backends.utils.ip_advertisement.'
|
||||||
|
'send_ip_advertisement')
|
||||||
|
@mock.patch('octavia.amphorae.backends.utils.network_utils.'
|
||||||
|
'get_interface_name')
|
||||||
|
@mock.patch('octavia.amphorae.backends.agent.api_server.util.'
|
||||||
|
'get_haproxy_vip_addresses')
|
||||||
|
def test_send_vip_advertisements(self, mock_get_vip_addrs,
|
||||||
|
mock_get_int_name, mock_send_advert):
|
||||||
|
mock_get_vip_addrs.side_effect = [[], ['203.0.113.46'],
|
||||||
|
Exception('boom')]
|
||||||
|
mock_get_int_name.return_value = 'fake0'
|
||||||
|
|
||||||
|
# Test no VIPs
|
||||||
|
util.send_vip_advertisements(LB_ID1)
|
||||||
|
mock_get_vip_addrs.assert_called_once_with(LB_ID1)
|
||||||
|
mock_get_int_name.assert_not_called()
|
||||||
|
mock_send_advert.assert_not_called()
|
||||||
|
|
||||||
|
# Test with a VIP
|
||||||
|
mock_get_vip_addrs.reset_mock()
|
||||||
|
mock_get_int_name.reset_mock()
|
||||||
|
mock_send_advert.reset_mock()
|
||||||
|
util.send_vip_advertisements(LB_ID1)
|
||||||
|
mock_get_vip_addrs.assert_called_once_with(LB_ID1)
|
||||||
|
mock_get_int_name.assert_called_once_with(
|
||||||
|
'203.0.113.46', net_ns=consts.AMPHORA_NAMESPACE)
|
||||||
|
mock_send_advert.assert_called_once_with(
|
||||||
|
'fake0', '203.0.113.46', net_ns=consts.AMPHORA_NAMESPACE)
|
||||||
|
|
||||||
|
# Test with an exception (should not raise)
|
||||||
|
mock_get_vip_addrs.reset_mock()
|
||||||
|
mock_get_int_name.reset_mock()
|
||||||
|
mock_send_advert.reset_mock()
|
||||||
|
util.send_vip_advertisements(LB_ID1)
|
||||||
|
mock_get_int_name.assert_not_called()
|
||||||
|
mock_send_advert.assert_not_called()
|
||||||
|
@ -0,0 +1,212 @@
|
|||||||
|
# Copyright 2020 Red Hat, Inc. All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
from binascii import a2b_hex
|
||||||
|
import socket
|
||||||
|
from struct import pack
|
||||||
|
from unittest import mock
|
||||||
|
|
||||||
|
from octavia.amphorae.backends.utils import ip_advertisement
|
||||||
|
from octavia.common import constants
|
||||||
|
import octavia.tests.unit.base as base
|
||||||
|
|
||||||
|
|
||||||
|
class TestIPAdvertisement(base.TestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super(TestIPAdvertisement, self).setUp()
|
||||||
|
|
||||||
|
@mock.patch('octavia.amphorae.backends.utils.network_namespace.'
|
||||||
|
'NetworkNamespace')
|
||||||
|
@mock.patch('socket.AF_PACKET', create=True)
|
||||||
|
@mock.patch('socket.socket')
|
||||||
|
def test_garp(self, mock_socket, mock_socket_packet, mock_netns):
|
||||||
|
ARP_ETHERTYPE = 0x0806
|
||||||
|
EXPECTED_PACKET_DATA = (b'\xff\xff\xff\xff\xff\xff\x00\x00^\x00S3\x08'
|
||||||
|
b'\x06\x00\x01\x08\x00\x06\x04\x00\x01\x00'
|
||||||
|
b'\x00^\x00S3\xcb\x00q\x02\xff\xff\xff\xff'
|
||||||
|
b'\xff\xff\xcb\x00q\x02')
|
||||||
|
FAKE_INTERFACE = 'fake0'
|
||||||
|
FAKE_MAC = '00005E005333'
|
||||||
|
FAKE_NETNS = 'fake_netns'
|
||||||
|
|
||||||
|
mock_garp_socket = mock.MagicMock()
|
||||||
|
mock_garp_socket.getsockname.return_value = [None, None, None, None,
|
||||||
|
a2b_hex(FAKE_MAC)]
|
||||||
|
mock_socket.return_value = mock_garp_socket
|
||||||
|
|
||||||
|
# Test with a network namespace
|
||||||
|
ip_advertisement.garp(FAKE_INTERFACE, '203.0.113.2', net_ns=FAKE_NETNS)
|
||||||
|
|
||||||
|
mock_netns.assert_called_once_with(FAKE_NETNS)
|
||||||
|
mock_garp_socket.bind.assert_called_once_with((FAKE_INTERFACE,
|
||||||
|
ARP_ETHERTYPE))
|
||||||
|
mock_garp_socket.getsockname.assert_called_once_with()
|
||||||
|
mock_garp_socket.send.assert_called_once_with(EXPECTED_PACKET_DATA)
|
||||||
|
mock_garp_socket.close.assert_called_once_with()
|
||||||
|
|
||||||
|
# Test without a network namespace
|
||||||
|
mock_netns.reset_mock()
|
||||||
|
mock_garp_socket.reset_mock()
|
||||||
|
ip_advertisement.garp(FAKE_INTERFACE, '203.0.113.2')
|
||||||
|
|
||||||
|
mock_netns.assert_not_called()
|
||||||
|
mock_garp_socket.bind.assert_called_once_with((FAKE_INTERFACE,
|
||||||
|
ARP_ETHERTYPE))
|
||||||
|
mock_garp_socket.getsockname.assert_called_once_with()
|
||||||
|
mock_garp_socket.send.assert_called_once_with(EXPECTED_PACKET_DATA)
|
||||||
|
mock_garp_socket.close.assert_called_once_with()
|
||||||
|
|
||||||
|
def test_calculate_icmpv6_checksum(self):
|
||||||
|
TEST_PACKET1 = (
|
||||||
|
b'\x01\r\xb8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x003\xff\x02'
|
||||||
|
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00'
|
||||||
|
b'\x00\x00:\x00 \x88\x00\x00\x00 \x01\r\xb8\x00\x00\x00\x00\x00'
|
||||||
|
b'\x00\x00\x00\x00\x00\x003\xff\x02\x00\x00\x00\x00\x00\x00\x00'
|
||||||
|
b'\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00:\x00')
|
||||||
|
TEST_PACKET2 = (
|
||||||
|
b'\x01\r\xb8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x003\xff\x02'
|
||||||
|
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00'
|
||||||
|
b'\x00\x00:\x00 \x88\x00\x00\x00 \x01\r\xb8\x00\x00\x00\x00\x00'
|
||||||
|
b'\x00\x00\x00\x00\x00\x003\xff\x02\x00\x00\x00\x00\x00\x00\x00'
|
||||||
|
b'\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00:\x00\x01')
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
35645, ip_advertisement.calculate_icmpv6_checksum(TEST_PACKET1))
|
||||||
|
self.assertEqual(
|
||||||
|
35389, ip_advertisement.calculate_icmpv6_checksum(TEST_PACKET2))
|
||||||
|
|
||||||
|
@mock.patch('fcntl.ioctl')
|
||||||
|
@mock.patch('octavia.amphorae.backends.utils.network_namespace.'
|
||||||
|
'NetworkNamespace')
|
||||||
|
@mock.patch('socket.socket')
|
||||||
|
def test_neighbor_advertisement(self, mock_socket, mock_netns, mock_ioctl):
|
||||||
|
ALL_NODES_ADDR = 'ff02::1'
|
||||||
|
EXPECTED_PACKET_DATA = (b'\x88\x00\x1dk\xa0\x00\x00\x00 \x01\r\xb8\x00'
|
||||||
|
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x003'
|
||||||
|
b'\x02\x01')
|
||||||
|
FAKE_INTERFACE = 'fake0'
|
||||||
|
FAKE_MAC = '00005E005333'
|
||||||
|
FAKE_NETNS = 'fake_netns'
|
||||||
|
ICMPV6_PROTO = socket.getprotobyname(constants.IPV6_ICMP)
|
||||||
|
SIOCGIFHWADDR = 0x8927
|
||||||
|
SOURCE_IP = '2001:db8::33'
|
||||||
|
|
||||||
|
mock_na_socket = mock.MagicMock()
|
||||||
|
mock_socket.return_value = mock_na_socket
|
||||||
|
mock_ioctl.return_value = a2b_hex(FAKE_MAC)
|
||||||
|
|
||||||
|
# Test with a network namespace
|
||||||
|
ip_advertisement.neighbor_advertisement(FAKE_INTERFACE, SOURCE_IP,
|
||||||
|
net_ns=FAKE_NETNS)
|
||||||
|
|
||||||
|
mock_netns.assert_called_once_with(FAKE_NETNS)
|
||||||
|
mock_socket.assert_called_once_with(socket.AF_INET6, socket.SOCK_RAW,
|
||||||
|
ICMPV6_PROTO)
|
||||||
|
mock_na_socket.setsockopt.assert_called_once_with(
|
||||||
|
socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_HOPS, 255)
|
||||||
|
mock_na_socket.bind.assert_called_once_with((SOURCE_IP, 0))
|
||||||
|
mock_ioctl.assert_called_once_with(
|
||||||
|
mock_na_socket.fileno(), SIOCGIFHWADDR,
|
||||||
|
pack('256s', bytes(FAKE_INTERFACE, 'utf-8')))
|
||||||
|
mock_na_socket.sendto.assert_called_once_with(
|
||||||
|
EXPECTED_PACKET_DATA, (ALL_NODES_ADDR, 0, 0, 0))
|
||||||
|
mock_na_socket.close.assert_called_once_with()
|
||||||
|
|
||||||
|
# Test without a network namespace
|
||||||
|
mock_na_socket.reset_mock()
|
||||||
|
mock_netns.reset_mock()
|
||||||
|
mock_ioctl.reset_mock()
|
||||||
|
mock_socket.reset_mock()
|
||||||
|
|
||||||
|
ip_advertisement.neighbor_advertisement(FAKE_INTERFACE, SOURCE_IP)
|
||||||
|
|
||||||
|
mock_netns.assert_not_called()
|
||||||
|
mock_socket.assert_called_once_with(socket.AF_INET6, socket.SOCK_RAW,
|
||||||
|
ICMPV6_PROTO)
|
||||||
|
mock_na_socket.setsockopt.assert_called_once_with(
|
||||||
|
socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_HOPS, 255)
|
||||||
|
mock_na_socket.bind.assert_called_once_with((SOURCE_IP, 0))
|
||||||
|
mock_ioctl.assert_called_once_with(
|
||||||
|
mock_na_socket.fileno(), SIOCGIFHWADDR,
|
||||||
|
pack('256s', bytes(FAKE_INTERFACE, 'utf-8')))
|
||||||
|
mock_na_socket.sendto.assert_called_once_with(
|
||||||
|
EXPECTED_PACKET_DATA, (ALL_NODES_ADDR, 0, 0, 0))
|
||||||
|
mock_na_socket.close.assert_called_once_with()
|
||||||
|
|
||||||
|
@mock.patch('octavia.common.utils.is_ipv6')
|
||||||
|
@mock.patch('octavia.amphorae.backends.utils.ip_advertisement.garp')
|
||||||
|
@mock.patch('octavia.amphorae.backends.utils.ip_advertisement.'
|
||||||
|
'neighbor_advertisement')
|
||||||
|
def test_send_ip_advertisement(self, mock_na, mock_garp, mock_is_ipv6):
|
||||||
|
FAKE_INTERFACE = 'fake0'
|
||||||
|
FAKE_NETNS = 'fake_netns'
|
||||||
|
IPV4_ADDRESS = '203.0.113.9'
|
||||||
|
IPV6_ADDRESS = '2001:db8::33'
|
||||||
|
|
||||||
|
mock_is_ipv6.side_effect = [mock.DEFAULT, mock.DEFAULT, False]
|
||||||
|
|
||||||
|
# Test IPv4 advertisement
|
||||||
|
ip_advertisement.send_ip_advertisement(FAKE_INTERFACE, IPV4_ADDRESS)
|
||||||
|
|
||||||
|
mock_garp.assert_called_once_with(FAKE_INTERFACE, IPV4_ADDRESS, None)
|
||||||
|
mock_na.assert_not_called()
|
||||||
|
|
||||||
|
# Test IPv4 advertisement with a network namespace
|
||||||
|
mock_garp.reset_mock()
|
||||||
|
mock_na.reset_mock()
|
||||||
|
|
||||||
|
ip_advertisement.send_ip_advertisement(FAKE_INTERFACE, IPV4_ADDRESS,
|
||||||
|
net_ns=FAKE_NETNS)
|
||||||
|
|
||||||
|
mock_garp.assert_called_once_with(FAKE_INTERFACE, IPV4_ADDRESS,
|
||||||
|
FAKE_NETNS)
|
||||||
|
mock_na.assert_not_called()
|
||||||
|
|
||||||
|
# Test IPv6 advertisement
|
||||||
|
mock_garp.reset_mock()
|
||||||
|
mock_na.reset_mock()
|
||||||
|
|
||||||
|
ip_advertisement.send_ip_advertisement(FAKE_INTERFACE, IPV6_ADDRESS)
|
||||||
|
|
||||||
|
mock_garp.assert_not_called()
|
||||||
|
mock_na.assert_called_once_with(FAKE_INTERFACE, IPV6_ADDRESS, None)
|
||||||
|
|
||||||
|
# Test IPv6 advertisement with a network namespace
|
||||||
|
mock_garp.reset_mock()
|
||||||
|
mock_na.reset_mock()
|
||||||
|
|
||||||
|
ip_advertisement.send_ip_advertisement(FAKE_INTERFACE, IPV6_ADDRESS,
|
||||||
|
net_ns=FAKE_NETNS)
|
||||||
|
|
||||||
|
mock_garp.assert_not_called()
|
||||||
|
mock_na.assert_called_once_with(FAKE_INTERFACE, IPV6_ADDRESS,
|
||||||
|
FAKE_NETNS)
|
||||||
|
|
||||||
|
# Test bogus IP
|
||||||
|
mock_garp.reset_mock()
|
||||||
|
mock_na.reset_mock()
|
||||||
|
|
||||||
|
ip_advertisement.send_ip_advertisement(FAKE_INTERFACE, 'not an IP')
|
||||||
|
|
||||||
|
mock_garp.assert_not_called()
|
||||||
|
mock_na.assert_not_called()
|
||||||
|
|
||||||
|
# Test unknown IP version
|
||||||
|
mock_garp.reset_mock()
|
||||||
|
mock_na.reset_mock()
|
||||||
|
|
||||||
|
ip_advertisement.send_ip_advertisement(FAKE_INTERFACE, IPV6_ADDRESS)
|
||||||
|
|
||||||
|
mock_garp.assert_not_called()
|
||||||
|
mock_na.assert_not_called()
|
@ -0,0 +1,116 @@
|
|||||||
|
# Copyright 2020 Red Hat, Inc. All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
import random
|
||||||
|
from unittest import mock
|
||||||
|
|
||||||
|
from octavia.amphorae.backends.utils import network_namespace
|
||||||
|
from octavia.tests.common import utils as test_utils
|
||||||
|
import octavia.tests.unit.base as base
|
||||||
|
|
||||||
|
|
||||||
|
class TestNetworkNamespace(base.TestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super(TestNetworkNamespace, self).setUp()
|
||||||
|
|
||||||
|
@mock.patch('ctypes.get_errno')
|
||||||
|
@mock.patch('ctypes.CDLL')
|
||||||
|
def test_error_handler(self, mock_cdll, mock_get_errno):
|
||||||
|
FAKE_NETNS = 'fake-netns'
|
||||||
|
netns = network_namespace.NetworkNamespace(FAKE_NETNS)
|
||||||
|
|
||||||
|
# Test result 0
|
||||||
|
netns._error_handler(0, None, None)
|
||||||
|
|
||||||
|
mock_get_errno.assert_not_called()
|
||||||
|
|
||||||
|
# Test result -1
|
||||||
|
mock_get_errno.reset_mock()
|
||||||
|
|
||||||
|
self.assertRaises(OSError, netns._error_handler, -1, None, None)
|
||||||
|
|
||||||
|
mock_get_errno.assert_called_once_with()
|
||||||
|
|
||||||
|
@mock.patch('os.getpid')
|
||||||
|
@mock.patch('ctypes.CDLL')
|
||||||
|
def test_init(self, mock_cdll, mock_getpid):
|
||||||
|
FAKE_NETNS = 'fake-netns'
|
||||||
|
FAKE_PID = random.randrange(100000)
|
||||||
|
mock_cdll_obj = mock.MagicMock()
|
||||||
|
mock_cdll.return_value = mock_cdll_obj
|
||||||
|
mock_getpid.return_value = FAKE_PID
|
||||||
|
expected_current_netns = '/proc/{pid}/ns/net'.format(pid=FAKE_PID)
|
||||||
|
expected_target_netns = '/var/run/netns/{netns}'.format(
|
||||||
|
netns=FAKE_NETNS)
|
||||||
|
|
||||||
|
netns = network_namespace.NetworkNamespace(FAKE_NETNS)
|
||||||
|
|
||||||
|
self.assertEqual(expected_current_netns, netns.current_netns)
|
||||||
|
self.assertEqual(expected_target_netns, netns.target_netns)
|
||||||
|
self.assertEqual(mock_cdll_obj.setns, netns.set_netns)
|
||||||
|
self.assertEqual(netns.set_netns.errcheck, netns._error_handler)
|
||||||
|
|
||||||
|
@mock.patch('os.getpid')
|
||||||
|
@mock.patch('ctypes.CDLL')
|
||||||
|
def test_enter(self, mock_cdll, mock_getpid):
|
||||||
|
CLONE_NEWNET = 0x40000000
|
||||||
|
FAKE_NETNS = 'fake-netns'
|
||||||
|
FAKE_PID = random.randrange(100000)
|
||||||
|
current_netns_fd = random.randrange(100000)
|
||||||
|
target_netns_fd = random.randrange(100000)
|
||||||
|
mock_getpid.return_value = FAKE_PID
|
||||||
|
mock_cdll_obj = mock.MagicMock()
|
||||||
|
mock_cdll.return_value = mock_cdll_obj
|
||||||
|
expected_current_netns = '/proc/{pid}/ns/net'.format(pid=FAKE_PID)
|
||||||
|
expected_target_netns = '/var/run/netns/{netns}'.format(
|
||||||
|
netns=FAKE_NETNS)
|
||||||
|
|
||||||
|
netns = network_namespace.NetworkNamespace(FAKE_NETNS)
|
||||||
|
|
||||||
|
current_mock_open = self.useFixture(
|
||||||
|
test_utils.OpenFixture(expected_current_netns)).mock_open
|
||||||
|
current_mock_open.return_value = current_netns_fd
|
||||||
|
|
||||||
|
target_mock_open = self.useFixture(
|
||||||
|
test_utils.OpenFixture(expected_target_netns)).mock_open
|
||||||
|
handle = target_mock_open()
|
||||||
|
handle.fileno.return_value = target_netns_fd
|
||||||
|
|
||||||
|
netns.__enter__()
|
||||||
|
|
||||||
|
self.assertEqual(current_netns_fd, netns.current_netns_fd)
|
||||||
|
netns.set_netns.assert_called_once_with(target_netns_fd, CLONE_NEWNET)
|
||||||
|
|
||||||
|
@mock.patch('os.getpid')
|
||||||
|
@mock.patch('ctypes.CDLL')
|
||||||
|
def test_exit(self, mock_cdll, mock_getpid):
|
||||||
|
CLONE_NEWNET = 0x40000000
|
||||||
|
FAKE_NETNS = 'fake-netns'
|
||||||
|
FAKE_PID = random.randrange(100000)
|
||||||
|
current_netns_fileno = random.randrange(100000)
|
||||||
|
mock_getpid.return_value = FAKE_PID
|
||||||
|
mock_cdll_obj = mock.MagicMock()
|
||||||
|
mock_cdll.return_value = mock_cdll_obj
|
||||||
|
mock_current_netns_fd = mock.MagicMock()
|
||||||
|
mock_current_netns_fd.fileno.return_value = current_netns_fileno
|
||||||
|
|
||||||
|
netns = network_namespace.NetworkNamespace(FAKE_NETNS)
|
||||||
|
|
||||||
|
netns.current_netns_fd = mock_current_netns_fd
|
||||||
|
|
||||||
|
netns.__exit__()
|
||||||
|
|
||||||
|
netns.set_netns.assert_called_once_with(current_netns_fileno,
|
||||||
|
CLONE_NEWNET)
|
||||||
|
mock_current_netns_fd.close.assert_called_once_with()
|
140
octavia/tests/unit/amphorae/backends/utils/test_network_utils.py
Normal file
140
octavia/tests/unit/amphorae/backends/utils/test_network_utils.py
Normal file
@ -0,0 +1,140 @@
|
|||||||
|
# Copyright 2020 Red Hat, Inc. All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
from unittest import mock
|
||||||
|
|
||||||
|
from octavia.amphorae.backends.utils import network_utils
|
||||||
|
from octavia.common import exceptions
|
||||||
|
from octavia.tests.common import sample_network_data
|
||||||
|
import octavia.tests.unit.base as base
|
||||||
|
|
||||||
|
|
||||||
|
class TestNetworkUtils(base.TestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super(TestNetworkUtils, self).setUp()
|
||||||
|
|
||||||
|
def test_find_interface(self):
|
||||||
|
FAKE_INTERFACE = 'fake0'
|
||||||
|
IPV4_ADDRESS = '203.0.113.55'
|
||||||
|
BROADCAST_ADDRESS = '203.0.113.55'
|
||||||
|
IPV6_ADDRESS = '2001:db8::55'
|
||||||
|
SAMPLE_IPV4_ADDR = sample_network_data.create_iproute_ipv4_address(
|
||||||
|
IPV4_ADDRESS, BROADCAST_ADDRESS, FAKE_INTERFACE)
|
||||||
|
SAMPLE_IPV6_ADDR = sample_network_data.create_iproute_ipv6_address(
|
||||||
|
IPV6_ADDRESS, FAKE_INTERFACE)
|
||||||
|
SAMPLE_INTERFACE = sample_network_data.create_iproute_interface(
|
||||||
|
FAKE_INTERFACE)
|
||||||
|
BROKEN_INTERFACE = [{'attrs': []}]
|
||||||
|
|
||||||
|
mock_ip_addr = mock.MagicMock()
|
||||||
|
mock_rtnl_api = mock.MagicMock()
|
||||||
|
mock_rtnl_api.get_addr.side_effect = [[], SAMPLE_IPV4_ADDR,
|
||||||
|
SAMPLE_IPV6_ADDR,
|
||||||
|
SAMPLE_IPV6_ADDR]
|
||||||
|
mock_rtnl_api.get_links.side_effect = [SAMPLE_INTERFACE,
|
||||||
|
SAMPLE_INTERFACE,
|
||||||
|
BROKEN_INTERFACE]
|
||||||
|
|
||||||
|
# Test no match
|
||||||
|
IPV4_ADDRESS = '203.0.113.55'
|
||||||
|
mock_ip_addr.version = 4
|
||||||
|
|
||||||
|
self.assertIsNone(network_utils._find_interface(IPV4_ADDRESS,
|
||||||
|
mock_rtnl_api,
|
||||||
|
IPV4_ADDRESS))
|
||||||
|
|
||||||
|
# Test with IPv4 address
|
||||||
|
mock_rtnl_api.reset_mock()
|
||||||
|
mock_ip_addr.version = 4
|
||||||
|
|
||||||
|
result = network_utils._find_interface(IPV4_ADDRESS, mock_rtnl_api,
|
||||||
|
IPV4_ADDRESS)
|
||||||
|
|
||||||
|
self.assertEqual(FAKE_INTERFACE, result)
|
||||||
|
mock_rtnl_api.get_addr.assert_called_once_with(address=IPV4_ADDRESS)
|
||||||
|
mock_rtnl_api.get_links.assert_called_once_with(2)
|
||||||
|
|
||||||
|
# Test with IPv6 address
|
||||||
|
mock_rtnl_api.reset_mock()
|
||||||
|
mock_ip_addr.version = 6
|
||||||
|
|
||||||
|
result = network_utils._find_interface(IPV6_ADDRESS, mock_rtnl_api,
|
||||||
|
IPV6_ADDRESS)
|
||||||
|
|
||||||
|
self.assertEqual(FAKE_INTERFACE, result)
|
||||||
|
mock_rtnl_api.get_addr.assert_called_once_with(address=IPV6_ADDRESS)
|
||||||
|
mock_rtnl_api.get_links.assert_called_once_with(2)
|
||||||
|
|
||||||
|
# Test with a broken interface
|
||||||
|
mock_rtnl_api.reset_mock()
|
||||||
|
mock_ip_addr.version = 6
|
||||||
|
|
||||||
|
self.assertIsNone(network_utils._find_interface(IPV6_ADDRESS,
|
||||||
|
mock_rtnl_api,
|
||||||
|
IPV6_ADDRESS))
|
||||||
|
mock_rtnl_api.get_addr.assert_called_once_with(address=IPV6_ADDRESS)
|
||||||
|
mock_rtnl_api.get_links.assert_called_once_with(2)
|
||||||
|
|
||||||
|
@mock.patch('octavia.amphorae.backends.utils.network_utils.'
|
||||||
|
'_find_interface')
|
||||||
|
@mock.patch('pyroute2.IPRoute', create=True)
|
||||||
|
@mock.patch('pyroute2.NetNS', create=True)
|
||||||
|
def test_get_interface_name(self, mock_netns, mock_ipr, mock_find_int):
|
||||||
|
FAKE_INTERFACE = 'fake0'
|
||||||
|
FAKE_NETNS = 'fake-ns'
|
||||||
|
IPV4_ADDRESS = '203.0.113.64'
|
||||||
|
|
||||||
|
mock_ipr_enter_obj = mock.MagicMock()
|
||||||
|
mock_ipr_obj = mock.MagicMock()
|
||||||
|
mock_ipr_obj.__enter__.return_value = mock_ipr_enter_obj
|
||||||
|
mock_ipr.return_value = mock_ipr_obj
|
||||||
|
|
||||||
|
mock_netns_enter_obj = mock.MagicMock()
|
||||||
|
mock_netns_obj = mock.MagicMock()
|
||||||
|
mock_netns_obj.__enter__.return_value = mock_netns_enter_obj
|
||||||
|
mock_netns.return_value = mock_netns_obj
|
||||||
|
|
||||||
|
mock_find_int.side_effect = [FAKE_INTERFACE, FAKE_INTERFACE, None]
|
||||||
|
|
||||||
|
# Test a bogus IP address
|
||||||
|
self.assertRaises(exceptions.InvalidIPAddress,
|
||||||
|
network_utils.get_interface_name, 'not an IP', None)
|
||||||
|
|
||||||
|
# Test with no network namespace
|
||||||
|
result = network_utils.get_interface_name(IPV4_ADDRESS)
|
||||||
|
|
||||||
|
self.assertEqual(FAKE_INTERFACE, result)
|
||||||
|
mock_ipr.assert_called_once_with()
|
||||||
|
mock_find_int.assert_called_once_with(IPV4_ADDRESS, mock_ipr_enter_obj,
|
||||||
|
IPV4_ADDRESS)
|
||||||
|
|
||||||
|
# Test with network namespace
|
||||||
|
mock_ipr.reset_mock()
|
||||||
|
mock_find_int.reset_mock()
|
||||||
|
|
||||||
|
result = network_utils.get_interface_name(IPV4_ADDRESS,
|
||||||
|
net_ns=FAKE_NETNS)
|
||||||
|
self.assertEqual(FAKE_INTERFACE, result)
|
||||||
|
mock_netns.assert_called_once_with(FAKE_NETNS)
|
||||||
|
mock_find_int.assert_called_once_with(IPV4_ADDRESS,
|
||||||
|
mock_netns_enter_obj,
|
||||||
|
IPV4_ADDRESS)
|
||||||
|
|
||||||
|
# Test no interface found
|
||||||
|
mock_ipr.reset_mock()
|
||||||
|
mock_find_int.reset_mock()
|
||||||
|
|
||||||
|
self.assertRaises(
|
||||||
|
exceptions.NotFound, network_utils.get_interface_name,
|
||||||
|
IPV4_ADDRESS, net_ns=FAKE_NETNS)
|
@ -0,0 +1,52 @@
|
|||||||
|
# Copyright 2020 Red Hat, Inc. All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
from unittest import mock
|
||||||
|
|
||||||
|
from octavia.amphorae.drivers.haproxy import exceptions
|
||||||
|
import octavia.tests.unit.base as base
|
||||||
|
|
||||||
|
|
||||||
|
class TestHAProxyExceptions(base.TestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super(TestHAProxyExceptions, self).setUp()
|
||||||
|
|
||||||
|
@mock.patch('octavia.amphorae.drivers.haproxy.exceptions.LOG')
|
||||||
|
def test_check_exception(self, mock_logger):
|
||||||
|
|
||||||
|
response_mock = mock.MagicMock()
|
||||||
|
|
||||||
|
# Test exception that should raise and log
|
||||||
|
response_mock.status_code = 404
|
||||||
|
|
||||||
|
self.assertRaises(exceptions.NotFound, exceptions.check_exception,
|
||||||
|
response_mock)
|
||||||
|
mock_logger.error.assert_called_once()
|
||||||
|
|
||||||
|
# Test exception that should raise but not log
|
||||||
|
mock_logger.reset_mock()
|
||||||
|
response_mock.status_code = 403
|
||||||
|
|
||||||
|
self.assertRaises(exceptions.Forbidden, exceptions.check_exception,
|
||||||
|
response_mock, log_error=False)
|
||||||
|
mock_logger.error.assert_not_called()
|
||||||
|
|
||||||
|
# Test exception that should be ignored
|
||||||
|
mock_logger.reset_mock()
|
||||||
|
response_mock.status_code = 401
|
||||||
|
|
||||||
|
result = exceptions.check_exception(response_mock, ignore=[401])
|
||||||
|
|
||||||
|
mock_logger.error.assert_not_called()
|
||||||
|
self.assertEqual(response_mock, result)
|
@ -460,7 +460,30 @@ class TestHaproxyAmphoraLoadBalancerDriverTest(base.TestCase):
|
|||||||
self.driver.start(loadbalancer)
|
self.driver.start(loadbalancer)
|
||||||
self.driver.clients[
|
self.driver.clients[
|
||||||
API_VERSION].start_listener.assert_called_once_with(
|
API_VERSION].start_listener.assert_called_once_with(
|
||||||
amp1, listener.id)
|
amp1, listener.id, None)
|
||||||
|
|
||||||
|
def test_reload(self):
|
||||||
|
amp1 = mock.MagicMock()
|
||||||
|
amp1.api_version = API_VERSION
|
||||||
|
amp2 = mock.MagicMock()
|
||||||
|
amp2.api_version = API_VERSION
|
||||||
|
amp2.status = constants.DELETED
|
||||||
|
loadbalancer = mock.MagicMock()
|
||||||
|
loadbalancer.id = uuidutils.generate_uuid()
|
||||||
|
loadbalancer.amphorae = [amp1, amp2]
|
||||||
|
loadbalancer.vip = self.sv
|
||||||
|
listener = mock.MagicMock()
|
||||||
|
listener.id = uuidutils.generate_uuid()
|
||||||
|
listener.protocol = constants.PROTOCOL_HTTP
|
||||||
|
loadbalancer.listeners = [listener]
|
||||||
|
listener.load_balancer = loadbalancer
|
||||||
|
self.driver.clients[
|
||||||
|
API_VERSION].reload_listener.__name__ = 'reload_listener'
|
||||||
|
# Execute driver method
|
||||||
|
self.driver.reload(loadbalancer)
|
||||||
|
self.driver.clients[
|
||||||
|
API_VERSION].reload_listener.assert_called_once_with(
|
||||||
|
amp1, listener.id, None)
|
||||||
|
|
||||||
def test_start_with_amphora(self):
|
def test_start_with_amphora(self):
|
||||||
# Execute driver method
|
# Execute driver method
|
||||||
@ -470,7 +493,7 @@ class TestHaproxyAmphoraLoadBalancerDriverTest(base.TestCase):
|
|||||||
self.driver.start(self.lb, self.amp)
|
self.driver.start(self.lb, self.amp)
|
||||||
self.driver.clients[
|
self.driver.clients[
|
||||||
API_VERSION].start_listener.assert_called_once_with(
|
API_VERSION].start_listener.assert_called_once_with(
|
||||||
self.amp, self.sl.id)
|
self.amp, self.sl.id, None)
|
||||||
|
|
||||||
self.driver.clients[API_VERSION].start_listener.reset_mock()
|
self.driver.clients[API_VERSION].start_listener.reset_mock()
|
||||||
amp.status = constants.DELETED
|
amp.status = constants.DELETED
|
||||||
@ -484,7 +507,7 @@ class TestHaproxyAmphoraLoadBalancerDriverTest(base.TestCase):
|
|||||||
self.driver.start(self.lb_udp)
|
self.driver.start(self.lb_udp)
|
||||||
self.driver.clients[
|
self.driver.clients[
|
||||||
API_VERSION].start_listener.assert_called_once_with(
|
API_VERSION].start_listener.assert_called_once_with(
|
||||||
self.amp, self.sl_udp.id)
|
self.amp, self.sl_udp.id, None)
|
||||||
|
|
||||||
@mock.patch('octavia.amphorae.drivers.haproxy.rest_api_driver.'
|
@mock.patch('octavia.amphorae.drivers.haproxy.rest_api_driver.'
|
||||||
'HaproxyAmphoraLoadBalancerDriver._process_secret')
|
'HaproxyAmphoraLoadBalancerDriver._process_secret')
|
||||||
@ -627,11 +650,6 @@ class TestHaproxyAmphoraLoadBalancerDriverTest(base.TestCase):
|
|||||||
fixed_ips=expected_fixed_ips,
|
fixed_ips=expected_fixed_ips,
|
||||||
mtu=FAKE_MTU))
|
mtu=FAKE_MTU))
|
||||||
|
|
||||||
def test_get_vrrp_interface(self):
|
|
||||||
self.driver.get_vrrp_interface(self.amp)
|
|
||||||
self.driver.clients[API_VERSION].get_interface.assert_called_once_with(
|
|
||||||
self.amp, self.amp.vrrp_ip, timeout_dict=None)
|
|
||||||
|
|
||||||
def test_get_haproxy_versions(self):
|
def test_get_haproxy_versions(self):
|
||||||
ref_haproxy_versions = ['1', '6']
|
ref_haproxy_versions = ['1', '6']
|
||||||
result = self.driver._get_haproxy_versions(self.amp)
|
result = self.driver._get_haproxy_versions(self.amp)
|
||||||
|
@ -461,7 +461,30 @@ class TestHaproxyAmphoraLoadBalancerDriverTest(base.TestCase):
|
|||||||
self.driver.start(loadbalancer)
|
self.driver.start(loadbalancer)
|
||||||
self.driver.clients[
|
self.driver.clients[
|
||||||
API_VERSION].start_listener.assert_called_once_with(
|
API_VERSION].start_listener.assert_called_once_with(
|
||||||
amp1, loadbalancer.id)
|
amp1, loadbalancer.id, None)
|
||||||
|
|
||||||
|
def test_reload(self):
|
||||||
|
amp1 = mock.MagicMock()
|
||||||
|
amp1.api_version = API_VERSION
|
||||||
|
amp2 = mock.MagicMock()
|
||||||
|
amp2.api_version = API_VERSION
|
||||||
|
amp2.status = constants.DELETED
|
||||||
|
loadbalancer = mock.MagicMock()
|
||||||
|
loadbalancer.id = uuidutils.generate_uuid()
|
||||||
|
loadbalancer.amphorae = [amp1, amp2]
|
||||||
|
loadbalancer.vip = self.sv
|
||||||
|
listener = mock.MagicMock()
|
||||||
|
listener.id = uuidutils.generate_uuid()
|
||||||
|
listener.protocol = constants.PROTOCOL_HTTP
|
||||||
|
loadbalancer.listeners = [listener]
|
||||||
|
listener.load_balancer = loadbalancer
|
||||||
|
self.driver.clients[
|
||||||
|
API_VERSION].reload_listener.__name__ = 'reload_listener'
|
||||||
|
# Execute driver method
|
||||||
|
self.driver.reload(loadbalancer)
|
||||||
|
self.driver.clients[
|
||||||
|
API_VERSION].reload_listener.assert_called_once_with(
|
||||||
|
amp1, loadbalancer.id, None)
|
||||||
|
|
||||||
def test_start_with_amphora(self):
|
def test_start_with_amphora(self):
|
||||||
# Execute driver method
|
# Execute driver method
|
||||||
@ -471,7 +494,7 @@ class TestHaproxyAmphoraLoadBalancerDriverTest(base.TestCase):
|
|||||||
self.driver.start(self.lb, self.amp)
|
self.driver.start(self.lb, self.amp)
|
||||||
self.driver.clients[
|
self.driver.clients[
|
||||||
API_VERSION].start_listener.assert_called_once_with(
|
API_VERSION].start_listener.assert_called_once_with(
|
||||||
self.amp, self.lb.id)
|
self.amp, self.lb.id, None)
|
||||||
|
|
||||||
self.driver.clients[API_VERSION].start_listener.reset_mock()
|
self.driver.clients[API_VERSION].start_listener.reset_mock()
|
||||||
amp.status = constants.DELETED
|
amp.status = constants.DELETED
|
||||||
@ -485,7 +508,7 @@ class TestHaproxyAmphoraLoadBalancerDriverTest(base.TestCase):
|
|||||||
self.driver.start(self.lb_udp)
|
self.driver.start(self.lb_udp)
|
||||||
self.driver.clients[
|
self.driver.clients[
|
||||||
API_VERSION].start_listener.assert_called_once_with(
|
API_VERSION].start_listener.assert_called_once_with(
|
||||||
self.amp, self.sl_udp.id)
|
self.amp, self.sl_udp.id, None)
|
||||||
|
|
||||||
@mock.patch('octavia.amphorae.drivers.haproxy.rest_api_driver.'
|
@mock.patch('octavia.amphorae.drivers.haproxy.rest_api_driver.'
|
||||||
'HaproxyAmphoraLoadBalancerDriver._process_secret')
|
'HaproxyAmphoraLoadBalancerDriver._process_secret')
|
||||||
@ -721,11 +744,6 @@ class TestHaproxyAmphoraLoadBalancerDriverTest(base.TestCase):
|
|||||||
fixed_ips=expected_fixed_ips,
|
fixed_ips=expected_fixed_ips,
|
||||||
mtu=FAKE_MTU))
|
mtu=FAKE_MTU))
|
||||||
|
|
||||||
def test_get_vrrp_interface(self):
|
|
||||||
self.driver.get_vrrp_interface(self.amp)
|
|
||||||
self.driver.clients[API_VERSION].get_interface.assert_called_once_with(
|
|
||||||
self.amp, self.amp.vrrp_ip, timeout_dict=None)
|
|
||||||
|
|
||||||
def test_get_haproxy_versions(self):
|
def test_get_haproxy_versions(self):
|
||||||
ref_haproxy_versions = ['1', '6']
|
ref_haproxy_versions = ['1', '6']
|
||||||
result = self.driver._get_haproxy_versions(self.amp)
|
result = self.driver._get_haproxy_versions(self.amp)
|
||||||
|
@ -0,0 +1,83 @@
|
|||||||
|
# Copyright 2020 Red Hat, Inc. All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
from unittest import mock
|
||||||
|
|
||||||
|
from octavia.amphorae.drivers.haproxy import exceptions as exc
|
||||||
|
from octavia.amphorae.drivers.haproxy import rest_api_driver
|
||||||
|
import octavia.tests.unit.base as base
|
||||||
|
|
||||||
|
|
||||||
|
class TestHAProxyAmphoraDriver(base.TestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super(TestHAProxyAmphoraDriver, self).setUp()
|
||||||
|
self.driver = rest_api_driver.HaproxyAmphoraLoadBalancerDriver()
|
||||||
|
|
||||||
|
@mock.patch('octavia.amphorae.drivers.haproxy.rest_api_driver.'
|
||||||
|
'HaproxyAmphoraLoadBalancerDriver.'
|
||||||
|
'_populate_amphora_api_version')
|
||||||
|
def test_get_interface_from_ip(self, mock_api_version):
|
||||||
|
FAKE_INTERFACE = 'fake0'
|
||||||
|
IP_ADDRESS = '203.0.113.42'
|
||||||
|
TIMEOUT_DICT = {'outa': 'time'}
|
||||||
|
amphora_mock = mock.MagicMock()
|
||||||
|
amphora_mock.api_version = '0'
|
||||||
|
client_mock = mock.MagicMock()
|
||||||
|
client_mock.get_interface.side_effect = [
|
||||||
|
{'interface': FAKE_INTERFACE}, {'interface': FAKE_INTERFACE},
|
||||||
|
{}, exc.NotFound]
|
||||||
|
self.driver.clients['0'] = client_mock
|
||||||
|
|
||||||
|
# Test interface found no timeout
|
||||||
|
|
||||||
|
result = self.driver.get_interface_from_ip(amphora_mock, IP_ADDRESS)
|
||||||
|
|
||||||
|
self.assertEqual(FAKE_INTERFACE, result)
|
||||||
|
mock_api_version.assert_called_once_with(amphora_mock, None)
|
||||||
|
client_mock.get_interface.assert_called_once_with(
|
||||||
|
amphora_mock, IP_ADDRESS, None, log_error=False)
|
||||||
|
|
||||||
|
# Test interface found with timeout
|
||||||
|
mock_api_version.reset_mock()
|
||||||
|
client_mock.reset_mock()
|
||||||
|
|
||||||
|
result = self.driver.get_interface_from_ip(amphora_mock, IP_ADDRESS,
|
||||||
|
timeout_dict=TIMEOUT_DICT)
|
||||||
|
|
||||||
|
self.assertEqual(FAKE_INTERFACE, result)
|
||||||
|
mock_api_version.assert_called_once_with(amphora_mock, TIMEOUT_DICT)
|
||||||
|
client_mock.get_interface.assert_called_once_with(
|
||||||
|
amphora_mock, IP_ADDRESS, TIMEOUT_DICT, log_error=False)
|
||||||
|
|
||||||
|
# Test no interface data
|
||||||
|
mock_api_version.reset_mock()
|
||||||
|
client_mock.reset_mock()
|
||||||
|
|
||||||
|
result = self.driver.get_interface_from_ip(amphora_mock, IP_ADDRESS)
|
||||||
|
|
||||||
|
self.assertIsNone(result)
|
||||||
|
mock_api_version.assert_called_once_with(amphora_mock, None)
|
||||||
|
client_mock.get_interface.assert_called_once_with(
|
||||||
|
amphora_mock, IP_ADDRESS, None, log_error=False)
|
||||||
|
|
||||||
|
# Test NotFound
|
||||||
|
mock_api_version.reset_mock()
|
||||||
|
client_mock.reset_mock()
|
||||||
|
|
||||||
|
result = self.driver.get_interface_from_ip(amphora_mock, IP_ADDRESS)
|
||||||
|
|
||||||
|
self.assertIsNone(result)
|
||||||
|
mock_api_version.assert_called_once_with(amphora_mock, None)
|
||||||
|
client_mock.get_interface.assert_called_once_with(
|
||||||
|
amphora_mock, IP_ADDRESS, None, log_error=False)
|
@ -53,13 +53,42 @@ class TestVRRPRestDriver(base.TestCase):
|
|||||||
|
|
||||||
mock_templater.return_value = self.FAKE_CONFIG
|
mock_templater.return_value = self.FAKE_CONFIG
|
||||||
|
|
||||||
self.keepalived_mixin.update_vrrp_conf(self.lb_mock,
|
self.keepalived_mixin.update_vrrp_conf(
|
||||||
self.amphorae_network_config)
|
self.lb_mock, self.amphorae_network_config, self.amphora_mock)
|
||||||
|
|
||||||
self.clients[API_VERSION].upload_vrrp_config.assert_called_once_with(
|
self.clients[API_VERSION].upload_vrrp_config.assert_called_once_with(
|
||||||
self.amphora_mock,
|
self.amphora_mock,
|
||||||
self.FAKE_CONFIG)
|
self.FAKE_CONFIG)
|
||||||
|
|
||||||
|
# Test with amphorav2 amphorae_network_config list of dicts
|
||||||
|
mock_templater.reset_mock()
|
||||||
|
self.clients[API_VERSION].upload_vrrp_config.reset_mock()
|
||||||
|
v2_amphorae_network_config = {}
|
||||||
|
vip_subnet_dict = {
|
||||||
|
constants.VIP_SUBNET: {constants.CIDR: '192.0.2.0/24'}}
|
||||||
|
v2_amphorae_network_config[self.amphora_mock.id] = vip_subnet_dict
|
||||||
|
|
||||||
|
self.keepalived_mixin.update_vrrp_conf(
|
||||||
|
self.lb_mock, v2_amphorae_network_config, self.amphora_mock)
|
||||||
|
|
||||||
|
self.clients[API_VERSION].upload_vrrp_config.assert_called_once_with(
|
||||||
|
self.amphora_mock,
|
||||||
|
self.FAKE_CONFIG)
|
||||||
|
|
||||||
|
# Test amphora not in AMPHORA_ALLOCATED state
|
||||||
|
mock_templater.reset_mock()
|
||||||
|
self.clients[API_VERSION].upload_vrrp_config.reset_mock()
|
||||||
|
ready_amphora_mock = mock.MagicMock()
|
||||||
|
ready_amphora_mock.id = uuidutils.generate_uuid()
|
||||||
|
ready_amphora_mock.status = constants.AMPHORA_READY
|
||||||
|
ready_amphora_mock.api_version = API_VERSION
|
||||||
|
|
||||||
|
self.keepalived_mixin.update_vrrp_conf(
|
||||||
|
self.lb_mock, self.amphorae_network_config, ready_amphora_mock)
|
||||||
|
|
||||||
|
mock_templater.assert_not_called()
|
||||||
|
self.clients[API_VERSION].upload_vrrp_config.assert_not_called()
|
||||||
|
|
||||||
def test_stop_vrrp_service(self):
|
def test_stop_vrrp_service(self):
|
||||||
|
|
||||||
self.keepalived_mixin.stop_vrrp_service(self.lb_mock)
|
self.keepalived_mixin.stop_vrrp_service(self.lb_mock)
|
||||||
@ -69,10 +98,21 @@ class TestVRRPRestDriver(base.TestCase):
|
|||||||
|
|
||||||
def test_start_vrrp_service(self):
|
def test_start_vrrp_service(self):
|
||||||
|
|
||||||
self.keepalived_mixin.start_vrrp_service(self.lb_mock)
|
self.keepalived_mixin.start_vrrp_service(self.amphora_mock)
|
||||||
|
|
||||||
self.clients[API_VERSION].start_vrrp.assert_called_once_with(
|
self.clients[API_VERSION].start_vrrp.assert_called_once_with(
|
||||||
self.amphora_mock)
|
self.amphora_mock, timeout_dict=None)
|
||||||
|
|
||||||
|
# Test amphora not in AMPHORA_ALLOCATED state
|
||||||
|
self.clients[API_VERSION].start_vrrp.reset_mock()
|
||||||
|
ready_amphora_mock = mock.MagicMock()
|
||||||
|
ready_amphora_mock.id = uuidutils.generate_uuid()
|
||||||
|
ready_amphora_mock.status = constants.AMPHORA_READY
|
||||||
|
ready_amphora_mock.api_version = API_VERSION
|
||||||
|
|
||||||
|
self.keepalived_mixin.start_vrrp_service(ready_amphora_mock)
|
||||||
|
|
||||||
|
self.clients[API_VERSION].start_vrrp.assert_not_called()
|
||||||
|
|
||||||
def test_reload_vrrp_service(self):
|
def test_reload_vrrp_service(self):
|
||||||
|
|
||||||
|
@ -49,7 +49,7 @@ class TestNoopAmphoraLoadBalancerDriver(base.TestCase):
|
|||||||
self.listener.id = uuidutils.generate_uuid()
|
self.listener.id = uuidutils.generate_uuid()
|
||||||
self.listener.protocol_port = 80
|
self.listener.protocol_port = 80
|
||||||
self.vip = data_models.Vip()
|
self.vip = data_models.Vip()
|
||||||
self.vip.ip_address = "10.0.0.1"
|
self.vip.ip_address = "192.51.100.1"
|
||||||
self.amphora = data_models.Amphora()
|
self.amphora = data_models.Amphora()
|
||||||
self.amphora.id = self.FAKE_UUID_1
|
self.amphora.id = self.FAKE_UUID_1
|
||||||
self.load_balancer = data_models.LoadBalancer(
|
self.load_balancer = data_models.LoadBalancer(
|
||||||
@ -152,3 +152,12 @@ class TestNoopAmphoraLoadBalancerDriver(base.TestCase):
|
|||||||
'update_amphora_agent_config'),
|
'update_amphora_agent_config'),
|
||||||
self.driver.driver.amphoraconfig[(
|
self.driver.driver.amphoraconfig[(
|
||||||
self.amphora.id, self.agent_config)])
|
self.amphora.id, self.agent_config)])
|
||||||
|
|
||||||
|
def test_get_interface_from_ip(self):
|
||||||
|
result = self.driver.get_interface_from_ip(self.amphora,
|
||||||
|
'198.51.100.99')
|
||||||
|
self.assertEqual('noop0', result)
|
||||||
|
|
||||||
|
result = self.driver.get_interface_from_ip(self.amphora,
|
||||||
|
'198.51.100.9')
|
||||||
|
self.assertIsNone(result)
|
||||||
|
@ -11,7 +11,11 @@
|
|||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
from unittest import mock
|
||||||
|
|
||||||
|
from oslo_utils import uuidutils
|
||||||
|
|
||||||
|
from octavia.common import constants
|
||||||
import octavia.common.utils as utils
|
import octavia.common.utils as utils
|
||||||
import octavia.tests.unit.base as base
|
import octavia.tests.unit.base as base
|
||||||
|
|
||||||
@ -21,6 +25,14 @@ class TestConfig(base.TestCase):
|
|||||||
def test_get_hostname(self):
|
def test_get_hostname(self):
|
||||||
self.assertNotEqual(utils.get_hostname(), '')
|
self.assertNotEqual(utils.get_hostname(), '')
|
||||||
|
|
||||||
|
def test_is_ipv4(self):
|
||||||
|
self.assertTrue(utils.is_ipv4('192.0.2.10'))
|
||||||
|
self.assertTrue(utils.is_ipv4('169.254.0.10'))
|
||||||
|
self.assertTrue(utils.is_ipv4('0.0.0.0'))
|
||||||
|
self.assertFalse(utils.is_ipv4('::'))
|
||||||
|
self.assertFalse(utils.is_ipv4('2001:db8::1'))
|
||||||
|
self.assertFalse(utils.is_ipv4('fe80::225:90ff:fefb:53ad'))
|
||||||
|
|
||||||
def test_is_ipv6(self):
|
def test_is_ipv6(self):
|
||||||
self.assertFalse(utils.is_ipv6('192.0.2.10'))
|
self.assertFalse(utils.is_ipv6('192.0.2.10'))
|
||||||
self.assertFalse(utils.is_ipv6('169.254.0.10'))
|
self.assertFalse(utils.is_ipv6('169.254.0.10'))
|
||||||
@ -104,3 +116,22 @@ class TestConfig(base.TestCase):
|
|||||||
]
|
]
|
||||||
for str, sha1 in str_to_sha1:
|
for str, sha1 in str_to_sha1:
|
||||||
self.assertEqual(sha1, utils.base64_sha1_string(str))
|
self.assertEqual(sha1, utils.base64_sha1_string(str))
|
||||||
|
|
||||||
|
@mock.patch('stevedore.driver.DriverManager')
|
||||||
|
def test_get_amphora_driver(self, mock_stevedore_driver):
|
||||||
|
FAKE_AMP_DRIVER = 'fake_amp_drvr'
|
||||||
|
driver_mock = mock.MagicMock()
|
||||||
|
driver_mock.driver = FAKE_AMP_DRIVER
|
||||||
|
mock_stevedore_driver.return_value = driver_mock
|
||||||
|
|
||||||
|
result = utils.get_amphora_driver()
|
||||||
|
|
||||||
|
self.assertEqual(FAKE_AMP_DRIVER, result)
|
||||||
|
|
||||||
|
def test_get_vip_secuirty_group_name(self):
|
||||||
|
FAKE_LB_ID = uuidutils.generate_uuid()
|
||||||
|
self.assertIsNone(utils.get_vip_security_group_name(None))
|
||||||
|
|
||||||
|
expected_sg_name = constants.VIP_SECURITY_GROUP_PREFIX + FAKE_LB_ID
|
||||||
|
self.assertEqual(expected_sg_name,
|
||||||
|
utils.get_vip_security_group_name(FAKE_LB_ID))
|
||||||
|
@ -443,10 +443,72 @@ class TestNovaClient(base.TestCase):
|
|||||||
server=self.compute_id, net_id=self.network_id, fixed_ip=None,
|
server=self.compute_id, net_id=self.network_id, fixed_ip=None,
|
||||||
port_id=None)
|
port_id=None)
|
||||||
|
|
||||||
def test_attach_network_or_port_exception(self):
|
def test_attach_network_or_port_conflict_exception(self):
|
||||||
|
self.manager.manager.interface_attach.side_effect = (
|
||||||
|
nova_exceptions.Conflict('test_exception'))
|
||||||
|
interface_mock = mock.MagicMock()
|
||||||
|
interface_mock.id = self.port_id
|
||||||
|
bad_interface_mock = mock.MagicMock()
|
||||||
|
bad_interface_mock.id = uuidutils.generate_uuid()
|
||||||
|
self.manager.manager.interface_list.side_effect = [
|
||||||
|
[interface_mock], [bad_interface_mock], [], Exception('boom')]
|
||||||
|
|
||||||
|
# No port specified
|
||||||
|
self.assertRaises(exceptions.ComputeUnknownException,
|
||||||
|
self.manager.attach_network_or_port,
|
||||||
|
self.compute_id, self.network_id)
|
||||||
|
|
||||||
|
# Port already attached
|
||||||
|
result = self.manager.attach_network_or_port(self.compute_id,
|
||||||
|
port_id=self.port_id)
|
||||||
|
self.assertEqual(interface_mock, result)
|
||||||
|
|
||||||
|
# Port not found
|
||||||
|
self.assertRaises(exceptions.ComputePortInUseException,
|
||||||
|
self.manager.attach_network_or_port,
|
||||||
|
self.compute_id, port_id=self.port_id)
|
||||||
|
|
||||||
|
# No ports attached
|
||||||
|
self.assertRaises(exceptions.ComputePortInUseException,
|
||||||
|
self.manager.attach_network_or_port,
|
||||||
|
self.compute_id, port_id=self.port_id)
|
||||||
|
|
||||||
|
# Get attached ports list exception
|
||||||
|
self.assertRaises(exceptions.ComputeUnknownException,
|
||||||
|
self.manager.attach_network_or_port,
|
||||||
|
self.compute_id, port_id=self.port_id)
|
||||||
|
|
||||||
|
def test_attach_network_or_port_general_not_found_exception(self):
|
||||||
self.manager.manager.interface_attach.side_effect = [
|
self.manager.manager.interface_attach.side_effect = [
|
||||||
nova_exceptions.NotFound('test_exception')]
|
nova_exceptions.NotFound('test_exception')]
|
||||||
self.assertRaises(nova_exceptions.NotFound,
|
self.assertRaises(exceptions.NotFound,
|
||||||
|
self.manager.attach_network_or_port,
|
||||||
|
self.compute_id, self.network_id)
|
||||||
|
|
||||||
|
def test_attach_network_or_port_instance_not_found_exception(self):
|
||||||
|
self.manager.manager.interface_attach.side_effect = [
|
||||||
|
nova_exceptions.NotFound('Instance disappeared')]
|
||||||
|
self.assertRaises(exceptions.NotFound,
|
||||||
|
self.manager.attach_network_or_port,
|
||||||
|
self.compute_id, self.network_id)
|
||||||
|
|
||||||
|
def test_attach_network_or_port_network_not_found_exception(self):
|
||||||
|
self.manager.manager.interface_attach.side_effect = [
|
||||||
|
nova_exceptions.NotFound('Network disappeared')]
|
||||||
|
self.assertRaises(exceptions.NotFound,
|
||||||
|
self.manager.attach_network_or_port,
|
||||||
|
self.compute_id, self.network_id)
|
||||||
|
|
||||||
|
def test_attach_network_or_port_port_not_found_exception(self):
|
||||||
|
self.manager.manager.interface_attach.side_effect = [
|
||||||
|
nova_exceptions.NotFound('Port disappeared')]
|
||||||
|
self.assertRaises(exceptions.NotFound,
|
||||||
|
self.manager.attach_network_or_port,
|
||||||
|
self.compute_id, self.network_id)
|
||||||
|
|
||||||
|
def test_attach_network_or_port_unknown_exception(self):
|
||||||
|
self.manager.manager.interface_attach.side_effect = [Exception('boom')]
|
||||||
|
self.assertRaises(exceptions.ComputeUnknownException,
|
||||||
self.manager.attach_network_or_port,
|
self.manager.attach_network_or_port,
|
||||||
self.compute_id, self.network_id)
|
self.compute_id, self.network_id)
|
||||||
|
|
||||||
|
@ -16,6 +16,7 @@ from unittest import mock
|
|||||||
|
|
||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_config import fixture as oslo_fixture
|
from oslo_config import fixture as oslo_fixture
|
||||||
|
from oslo_utils import uuidutils
|
||||||
from taskflow.patterns import linear_flow as flow
|
from taskflow.patterns import linear_flow as flow
|
||||||
|
|
||||||
from octavia.common import constants
|
from octavia.common import constants
|
||||||
@ -42,6 +43,7 @@ class TestAmphoraFlows(base.TestCase):
|
|||||||
self.amp1 = data_models.Amphora(id=1)
|
self.amp1 = data_models.Amphora(id=1)
|
||||||
self.amp2 = data_models.Amphora(id=2)
|
self.amp2 = data_models.Amphora(id=2)
|
||||||
self.amp3 = data_models.Amphora(id=3, status=constants.DELETED)
|
self.amp3 = data_models.Amphora(id=3, status=constants.DELETED)
|
||||||
|
self.amp4 = data_models.Amphora(id=uuidutils.generate_uuid())
|
||||||
self.lb = data_models.LoadBalancer(
|
self.lb = data_models.LoadBalancer(
|
||||||
id=4, amphorae=[self.amp1, self.amp2, self.amp3])
|
id=4, amphorae=[self.amp1, self.amp2, self.amp3])
|
||||||
|
|
||||||
@ -57,7 +59,7 @@ class TestAmphoraFlows(base.TestCase):
|
|||||||
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
|
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
|
||||||
|
|
||||||
self.assertEqual(5, len(amp_flow.provides))
|
self.assertEqual(5, len(amp_flow.provides))
|
||||||
self.assertEqual(3, len(amp_flow.requires))
|
self.assertEqual(4, len(amp_flow.requires))
|
||||||
|
|
||||||
def test_get_create_amphora_flow_cert(self, mock_get_net_driver):
|
def test_get_create_amphora_flow_cert(self, mock_get_net_driver):
|
||||||
self.AmpFlow = amphora_flows.AmphoraFlows()
|
self.AmpFlow = amphora_flows.AmphoraFlows()
|
||||||
@ -71,7 +73,7 @@ class TestAmphoraFlows(base.TestCase):
|
|||||||
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
|
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
|
||||||
|
|
||||||
self.assertEqual(5, len(amp_flow.provides))
|
self.assertEqual(5, len(amp_flow.provides))
|
||||||
self.assertEqual(3, len(amp_flow.requires))
|
self.assertEqual(4, len(amp_flow.requires))
|
||||||
|
|
||||||
def test_get_create_amphora_for_lb_flow(self, mock_get_net_driver):
|
def test_get_create_amphora_for_lb_flow(self, mock_get_net_driver):
|
||||||
|
|
||||||
@ -89,7 +91,7 @@ class TestAmphoraFlows(base.TestCase):
|
|||||||
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
|
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
|
||||||
|
|
||||||
self.assertEqual(5, len(amp_flow.provides))
|
self.assertEqual(5, len(amp_flow.provides))
|
||||||
self.assertEqual(4, len(amp_flow.requires))
|
self.assertEqual(5, len(amp_flow.requires))
|
||||||
|
|
||||||
def test_get_cert_create_amphora_for_lb_flow(self, mock_get_net_driver):
|
def test_get_cert_create_amphora_for_lb_flow(self, mock_get_net_driver):
|
||||||
|
|
||||||
@ -109,7 +111,7 @@ class TestAmphoraFlows(base.TestCase):
|
|||||||
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
|
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
|
||||||
|
|
||||||
self.assertEqual(5, len(amp_flow.provides))
|
self.assertEqual(5, len(amp_flow.provides))
|
||||||
self.assertEqual(4, len(amp_flow.requires))
|
self.assertEqual(5, len(amp_flow.requires))
|
||||||
|
|
||||||
def test_get_cert_master_create_amphora_for_lb_flow(
|
def test_get_cert_master_create_amphora_for_lb_flow(
|
||||||
self, mock_get_net_driver):
|
self, mock_get_net_driver):
|
||||||
@ -130,7 +132,7 @@ class TestAmphoraFlows(base.TestCase):
|
|||||||
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
|
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
|
||||||
|
|
||||||
self.assertEqual(5, len(amp_flow.provides))
|
self.assertEqual(5, len(amp_flow.provides))
|
||||||
self.assertEqual(4, len(amp_flow.requires))
|
self.assertEqual(5, len(amp_flow.requires))
|
||||||
|
|
||||||
def test_get_cert_master_rest_anti_affinity_create_amphora_for_lb_flow(
|
def test_get_cert_master_rest_anti_affinity_create_amphora_for_lb_flow(
|
||||||
self, mock_get_net_driver):
|
self, mock_get_net_driver):
|
||||||
@ -143,7 +145,6 @@ class TestAmphoraFlows(base.TestCase):
|
|||||||
|
|
||||||
self.assertIsInstance(amp_flow, flow.Flow)
|
self.assertIsInstance(amp_flow, flow.Flow)
|
||||||
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
|
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
|
||||||
self.assertIn(constants.SERVER_GROUP_ID, amp_flow.requires)
|
|
||||||
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
|
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
|
||||||
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
|
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
|
||||||
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
|
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
|
||||||
@ -170,7 +171,7 @@ class TestAmphoraFlows(base.TestCase):
|
|||||||
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
|
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
|
||||||
|
|
||||||
self.assertEqual(5, len(amp_flow.provides))
|
self.assertEqual(5, len(amp_flow.provides))
|
||||||
self.assertEqual(4, len(amp_flow.requires))
|
self.assertEqual(5, len(amp_flow.requires))
|
||||||
|
|
||||||
def test_get_cert_bogus_create_amphora_for_lb_flow(
|
def test_get_cert_bogus_create_amphora_for_lb_flow(
|
||||||
self, mock_get_net_driver):
|
self, mock_get_net_driver):
|
||||||
@ -190,7 +191,7 @@ class TestAmphoraFlows(base.TestCase):
|
|||||||
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
|
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
|
||||||
|
|
||||||
self.assertEqual(5, len(amp_flow.provides))
|
self.assertEqual(5, len(amp_flow.provides))
|
||||||
self.assertEqual(4, len(amp_flow.requires))
|
self.assertEqual(5, len(amp_flow.requires))
|
||||||
|
|
||||||
def test_get_cert_backup_rest_anti_affinity_create_amphora_for_lb_flow(
|
def test_get_cert_backup_rest_anti_affinity_create_amphora_for_lb_flow(
|
||||||
self, mock_get_net_driver):
|
self, mock_get_net_driver):
|
||||||
@ -202,7 +203,6 @@ class TestAmphoraFlows(base.TestCase):
|
|||||||
|
|
||||||
self.assertIsInstance(amp_flow, flow.Flow)
|
self.assertIsInstance(amp_flow, flow.Flow)
|
||||||
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
|
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
|
||||||
self.assertIn(constants.SERVER_GROUP_ID, amp_flow.requires)
|
|
||||||
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
|
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
|
||||||
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
|
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
|
||||||
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
|
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
|
||||||
@ -213,14 +213,13 @@ class TestAmphoraFlows(base.TestCase):
|
|||||||
|
|
||||||
def test_get_delete_amphora_flow(self, mock_get_net_driver):
|
def test_get_delete_amphora_flow(self, mock_get_net_driver):
|
||||||
|
|
||||||
amp_flow = self.AmpFlow.get_delete_amphora_flow()
|
amp_flow = self.AmpFlow.get_delete_amphora_flow(self.amp4)
|
||||||
|
|
||||||
self.assertIsInstance(amp_flow, flow.Flow)
|
self.assertIsInstance(amp_flow, flow.Flow)
|
||||||
|
|
||||||
self.assertIn(constants.AMPHORA, amp_flow.requires)
|
# This flow injects the required data at flow compile time.
|
||||||
|
|
||||||
self.assertEqual(0, len(amp_flow.provides))
|
self.assertEqual(0, len(amp_flow.provides))
|
||||||
self.assertEqual(1, len(amp_flow.requires))
|
self.assertEqual(0, len(amp_flow.requires))
|
||||||
|
|
||||||
def test_allocate_amp_to_lb_decider(self, mock_get_net_driver):
|
def test_allocate_amp_to_lb_decider(self, mock_get_net_driver):
|
||||||
history = mock.MagicMock()
|
history = mock.MagicMock()
|
||||||
@ -240,98 +239,117 @@ class TestAmphoraFlows(base.TestCase):
|
|||||||
result = self.AmpFlow._create_new_amp_for_lb_decider(history)
|
result = self.AmpFlow._create_new_amp_for_lb_decider(history)
|
||||||
self.assertFalse(result)
|
self.assertFalse(result)
|
||||||
|
|
||||||
def test_get_failover_flow_allocated(self, mock_get_net_driver):
|
def test_get_failover_flow_act_stdby(self, mock_get_net_driver):
|
||||||
|
failed_amphora = data_models.Amphora(
|
||||||
|
id=uuidutils.generate_uuid(), role=constants.ROLE_MASTER,
|
||||||
|
load_balancer_id=uuidutils.generate_uuid())
|
||||||
|
|
||||||
amp_flow = self.AmpFlow.get_failover_flow(
|
amp_flow = self.AmpFlow.get_failover_amphora_flow(
|
||||||
load_balancer=self.lb)
|
failed_amphora, 2)
|
||||||
|
|
||||||
self.assertIsInstance(amp_flow, flow.Flow)
|
self.assertIsInstance(amp_flow, flow.Flow)
|
||||||
|
|
||||||
self.assertIn(constants.FAILED_AMPHORA, amp_flow.requires)
|
self.assertIn(constants.AVAILABILITY_ZONE, amp_flow.requires)
|
||||||
|
self.assertIn(constants.BUILD_TYPE_PRIORITY, amp_flow.requires)
|
||||||
|
self.assertIn(constants.FLAVOR, amp_flow.requires)
|
||||||
|
self.assertIn(constants.LOADBALANCER, amp_flow.requires)
|
||||||
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
|
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
|
||||||
|
self.assertIn(constants.VIP, amp_flow.requires)
|
||||||
|
|
||||||
self.assertIn(constants.AMP_DATA, amp_flow.provides)
|
self.assertIn(constants.ADDED_PORTS, amp_flow.provides)
|
||||||
|
self.assertIn(constants.AMP_VRRP_INT, amp_flow.provides)
|
||||||
self.assertIn(constants.AMPHORA, amp_flow.provides)
|
self.assertIn(constants.AMPHORA, amp_flow.provides)
|
||||||
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
|
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
|
||||||
|
self.assertIn(constants.AMPHORAE, amp_flow.provides)
|
||||||
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, amp_flow.provides)
|
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, amp_flow.provides)
|
||||||
|
self.assertIn(constants.BASE_PORT, amp_flow.provides)
|
||||||
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
|
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
|
||||||
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
|
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
|
||||||
self.assertIn(constants.LISTENERS, amp_flow.provides)
|
self.assertIn(constants.DELTA, amp_flow.provides)
|
||||||
self.assertIn(constants.LOADBALANCER, amp_flow.provides)
|
self.assertIn(constants.LOADBALANCER, amp_flow.provides)
|
||||||
|
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
|
||||||
|
self.assertIn(constants.VIP_SG_ID, amp_flow.provides)
|
||||||
|
|
||||||
self.assertEqual(5, len(amp_flow.requires))
|
self.assertEqual(7, len(amp_flow.requires))
|
||||||
self.assertEqual(12, len(amp_flow.provides))
|
self.assertEqual(13, len(amp_flow.provides))
|
||||||
|
|
||||||
amp_flow = self.AmpFlow.get_failover_flow(
|
def test_get_failover_flow_standalone(self, mock_get_net_driver):
|
||||||
role=constants.ROLE_MASTER, load_balancer=self.lb)
|
failed_amphora = data_models.Amphora(
|
||||||
|
id=uuidutils.generate_uuid(), role=constants.ROLE_STANDALONE,
|
||||||
|
load_balancer_id=uuidutils.generate_uuid(), vrrp_ip='2001:3b8::32')
|
||||||
|
|
||||||
|
amp_flow = self.AmpFlow.get_failover_amphora_flow(
|
||||||
|
failed_amphora, 1)
|
||||||
|
|
||||||
self.assertIsInstance(amp_flow, flow.Flow)
|
self.assertIsInstance(amp_flow, flow.Flow)
|
||||||
|
|
||||||
self.assertIn(constants.FAILED_AMPHORA, amp_flow.requires)
|
self.assertIn(constants.AVAILABILITY_ZONE, amp_flow.requires)
|
||||||
|
self.assertIn(constants.BUILD_TYPE_PRIORITY, amp_flow.requires)
|
||||||
|
self.assertIn(constants.FLAVOR, amp_flow.requires)
|
||||||
|
self.assertIn(constants.LOADBALANCER, amp_flow.requires)
|
||||||
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
|
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
|
||||||
|
self.assertIn(constants.VIP, amp_flow.requires)
|
||||||
|
|
||||||
self.assertIn(constants.AMP_DATA, amp_flow.provides)
|
self.assertIn(constants.ADDED_PORTS, amp_flow.provides)
|
||||||
self.assertIn(constants.AMPHORA, amp_flow.provides)
|
self.assertIn(constants.AMPHORA, amp_flow.provides)
|
||||||
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
|
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
|
||||||
|
self.assertIn(constants.AMPHORAE, amp_flow.provides)
|
||||||
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, amp_flow.provides)
|
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, amp_flow.provides)
|
||||||
|
self.assertIn(constants.BASE_PORT, amp_flow.provides)
|
||||||
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
|
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
|
||||||
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
|
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
|
||||||
self.assertIn(constants.LISTENERS, amp_flow.provides)
|
self.assertIn(constants.DELTA, amp_flow.provides)
|
||||||
self.assertIn(constants.LOADBALANCER, amp_flow.provides)
|
self.assertIn(constants.LOADBALANCER, amp_flow.provides)
|
||||||
|
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
|
||||||
|
self.assertIn(constants.VIP_SG_ID, amp_flow.provides)
|
||||||
|
|
||||||
self.assertEqual(5, len(amp_flow.requires))
|
self.assertEqual(7, len(amp_flow.requires))
|
||||||
self.assertEqual(12, len(amp_flow.provides))
|
self.assertEqual(12, len(amp_flow.provides))
|
||||||
|
|
||||||
amp_flow = self.AmpFlow.get_failover_flow(
|
def test_get_failover_flow_bogus_role(self, mock_get_net_driver):
|
||||||
role=constants.ROLE_BACKUP, load_balancer=self.lb)
|
failed_amphora = data_models.Amphora(id=uuidutils.generate_uuid(),
|
||||||
|
role='bogus')
|
||||||
|
|
||||||
|
amp_flow = self.AmpFlow.get_failover_amphora_flow(
|
||||||
|
failed_amphora, 1)
|
||||||
|
|
||||||
self.assertIsInstance(amp_flow, flow.Flow)
|
self.assertIsInstance(amp_flow, flow.Flow)
|
||||||
|
|
||||||
self.assertIn(constants.FAILED_AMPHORA, amp_flow.requires)
|
# TODO(johnsom) Uncomment after amphora failover builds a replacement
|
||||||
|
# amphora.
|
||||||
|
# self.assertIn(constants.AVAILABILITY_ZONE, amp_flow.requires)
|
||||||
|
# self.assertIn(constants.BUILD_TYPE_PRIORITY, amp_flow.requires)
|
||||||
|
# self.assertIn(constants.FLAVOR, amp_flow.requires)
|
||||||
|
# self.assertEqual(5, len(amp_flow.requires))
|
||||||
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
|
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
|
||||||
|
|
||||||
self.assertIn(constants.AMP_DATA, amp_flow.provides)
|
# self.assertIn(constants.AMPHORA, amp_flow.provides)
|
||||||
self.assertIn(constants.AMPHORA, amp_flow.provides)
|
# self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
|
||||||
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
|
# self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
|
||||||
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, amp_flow.provides)
|
# self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
|
||||||
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
|
# self.assertIn(constants.SERVER_PEM, amp_flow.provides)
|
||||||
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
|
# self.assertIn(constants.VIP_SG_ID, amp_flow.provides)
|
||||||
self.assertIn(constants.LISTENERS, amp_flow.provides)
|
# self.assertEqual(6, len(amp_flow.provides))
|
||||||
self.assertIn(constants.LOADBALANCER, amp_flow.provides)
|
|
||||||
|
|
||||||
self.assertEqual(5, len(amp_flow.requires))
|
self.assertEqual(1, len(amp_flow.requires))
|
||||||
self.assertEqual(12, len(amp_flow.provides))
|
self.assertEqual(1, len(amp_flow.provides))
|
||||||
|
|
||||||
amp_flow = self.AmpFlow.get_failover_flow(
|
|
||||||
role='BOGUSROLE', load_balancer=self.lb)
|
|
||||||
|
|
||||||
self.assertIsInstance(amp_flow, flow.Flow)
|
|
||||||
|
|
||||||
self.assertIn(constants.FAILED_AMPHORA, amp_flow.requires)
|
|
||||||
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
|
|
||||||
|
|
||||||
self.assertIn(constants.AMP_DATA, amp_flow.provides)
|
|
||||||
self.assertIn(constants.AMPHORA, amp_flow.provides)
|
|
||||||
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
|
|
||||||
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, amp_flow.provides)
|
|
||||||
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
|
|
||||||
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
|
|
||||||
self.assertIn(constants.LISTENERS, amp_flow.provides)
|
|
||||||
self.assertIn(constants.LOADBALANCER, amp_flow.provides)
|
|
||||||
|
|
||||||
self.assertEqual(5, len(amp_flow.requires))
|
|
||||||
self.assertEqual(12, len(amp_flow.provides))
|
|
||||||
|
|
||||||
def test_get_failover_flow_spare(self, mock_get_net_driver):
|
def test_get_failover_flow_spare(self, mock_get_net_driver):
|
||||||
|
|
||||||
amp_flow = self.AmpFlow.get_failover_flow()
|
amp_flow = self.AmpFlow.get_failover_amphora_flow(self.amp4, 0)
|
||||||
|
|
||||||
self.assertIsInstance(amp_flow, flow.Flow)
|
self.assertIsInstance(amp_flow, flow.Flow)
|
||||||
|
|
||||||
self.assertIn(constants.FAILED_AMPHORA, amp_flow.requires)
|
# TODO(johnsom) Uncomment after amphora failover builds a replacement
|
||||||
|
# amphora.
|
||||||
|
# self.assertIn(constants.AVAILABILITY_ZONE, amp_flow.requires)
|
||||||
|
# self.assertIn(constants.BUILD_TYPE_PRIORITY, amp_flow.requires)
|
||||||
|
# self.assertEqual(5, len(amp_flow.requires))
|
||||||
|
# self.assertEqual(6, len(amp_flow.provides))
|
||||||
|
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
|
||||||
|
|
||||||
self.assertEqual(1, len(amp_flow.requires))
|
self.assertEqual(1, len(amp_flow.requires))
|
||||||
self.assertEqual(0, len(amp_flow.provides))
|
self.assertEqual(1, len(amp_flow.provides))
|
||||||
|
|
||||||
def test_cert_rotate_amphora_flow(self, mock_get_net_driver):
|
def test_cert_rotate_amphora_flow(self, mock_get_net_driver):
|
||||||
self.AmpFlow = amphora_flows.AmphoraFlows()
|
self.AmpFlow = amphora_flows.AmphoraFlows()
|
||||||
@ -350,12 +368,30 @@ class TestAmphoraFlows(base.TestCase):
|
|||||||
|
|
||||||
self.assertIsInstance(vrrp_subflow, flow.Flow)
|
self.assertIsInstance(vrrp_subflow, flow.Flow)
|
||||||
|
|
||||||
self.assertIn(constants.LOADBALANCER, vrrp_subflow.provides)
|
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, vrrp_subflow.provides)
|
||||||
|
self.assertIn(constants.AMP_VRRP_INT, vrrp_subflow.provides)
|
||||||
|
|
||||||
self.assertIn(constants.LOADBALANCER, vrrp_subflow.requires)
|
self.assertIn(constants.LOADBALANCER_ID, vrrp_subflow.requires)
|
||||||
|
self.assertIn(constants.AMPHORAE, vrrp_subflow.requires)
|
||||||
|
|
||||||
self.assertEqual(2, len(vrrp_subflow.provides))
|
self.assertEqual(2, len(vrrp_subflow.provides))
|
||||||
self.assertEqual(1, len(vrrp_subflow.requires))
|
self.assertEqual(2, len(vrrp_subflow.requires))
|
||||||
|
|
||||||
|
def test_get_vrrp_subflow_dont_create_vrrp_group(
|
||||||
|
self, mock_get_net_driver):
|
||||||
|
vrrp_subflow = self.AmpFlow.get_vrrp_subflow('123',
|
||||||
|
create_vrrp_group=False)
|
||||||
|
|
||||||
|
self.assertIsInstance(vrrp_subflow, flow.Flow)
|
||||||
|
|
||||||
|
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, vrrp_subflow.provides)
|
||||||
|
self.assertIn(constants.AMP_VRRP_INT, vrrp_subflow.provides)
|
||||||
|
|
||||||
|
self.assertIn(constants.LOADBALANCER_ID, vrrp_subflow.requires)
|
||||||
|
self.assertIn(constants.AMPHORAE, vrrp_subflow.requires)
|
||||||
|
|
||||||
|
self.assertEqual(2, len(vrrp_subflow.provides))
|
||||||
|
self.assertEqual(2, len(vrrp_subflow.requires))
|
||||||
|
|
||||||
def test_get_post_map_lb_subflow(self, mock_get_net_driver):
|
def test_get_post_map_lb_subflow(self, mock_get_net_driver):
|
||||||
|
|
||||||
@ -420,3 +456,66 @@ class TestAmphoraFlows(base.TestCase):
|
|||||||
|
|
||||||
self.assertEqual(2, len(amp_flow.requires))
|
self.assertEqual(2, len(amp_flow.requires))
|
||||||
self.assertEqual(0, len(amp_flow.provides))
|
self.assertEqual(0, len(amp_flow.provides))
|
||||||
|
|
||||||
|
def test_get_amphora_for_lb_failover_flow_single(self,
|
||||||
|
mock_get_net_driver):
|
||||||
|
FAILED_PORT_ID = uuidutils.generate_uuid()
|
||||||
|
TEST_PREFIX = 'test_prefix'
|
||||||
|
|
||||||
|
get_amp_flow = self.AmpFlow.get_amphora_for_lb_failover_subflow(
|
||||||
|
TEST_PREFIX, role=constants.ROLE_STANDALONE,
|
||||||
|
failed_amp_vrrp_port_id=FAILED_PORT_ID, is_vrrp_ipv6=True)
|
||||||
|
|
||||||
|
self.assertIsInstance(get_amp_flow, flow.Flow)
|
||||||
|
|
||||||
|
self.assertIn(constants.AVAILABILITY_ZONE, get_amp_flow.requires)
|
||||||
|
self.assertIn(constants.BUILD_TYPE_PRIORITY, get_amp_flow.requires)
|
||||||
|
self.assertIn(constants.FLAVOR, get_amp_flow.requires)
|
||||||
|
self.assertIn(constants.LOADBALANCER, get_amp_flow.requires)
|
||||||
|
self.assertIn(constants.LOADBALANCER_ID, get_amp_flow.requires)
|
||||||
|
self.assertIn(constants.VIP, get_amp_flow.requires)
|
||||||
|
self.assertIn(constants.VIP_SG_ID, get_amp_flow.requires)
|
||||||
|
|
||||||
|
self.assertIn(constants.ADDED_PORTS, get_amp_flow.provides)
|
||||||
|
self.assertIn(constants.AMPHORA, get_amp_flow.provides)
|
||||||
|
self.assertIn(constants.AMPHORA_ID, get_amp_flow.provides)
|
||||||
|
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, get_amp_flow.provides)
|
||||||
|
self.assertIn(constants.BASE_PORT, get_amp_flow.provides)
|
||||||
|
self.assertIn(constants.COMPUTE_ID, get_amp_flow.provides)
|
||||||
|
self.assertIn(constants.COMPUTE_OBJ, get_amp_flow.provides)
|
||||||
|
self.assertIn(constants.DELTA, get_amp_flow.provides)
|
||||||
|
self.assertIn(constants.SERVER_PEM, get_amp_flow.provides)
|
||||||
|
|
||||||
|
self.assertEqual(8, len(get_amp_flow.requires), get_amp_flow.requires)
|
||||||
|
self.assertEqual(9, len(get_amp_flow.provides), get_amp_flow.provides)
|
||||||
|
|
||||||
|
def test_get_amphora_for_lb_failover_flow_act_stdby(self,
|
||||||
|
mock_get_net_driver):
|
||||||
|
TEST_PREFIX = 'test_prefix'
|
||||||
|
|
||||||
|
get_amp_flow = self.AmpFlow.get_amphora_for_lb_failover_subflow(
|
||||||
|
TEST_PREFIX, role=constants.ROLE_MASTER,
|
||||||
|
is_spare=False)
|
||||||
|
|
||||||
|
self.assertIsInstance(get_amp_flow, flow.Flow)
|
||||||
|
|
||||||
|
self.assertIn(constants.AVAILABILITY_ZONE, get_amp_flow.requires)
|
||||||
|
self.assertIn(constants.BUILD_TYPE_PRIORITY, get_amp_flow.requires)
|
||||||
|
self.assertIn(constants.FLAVOR, get_amp_flow.requires)
|
||||||
|
self.assertIn(constants.LOADBALANCER, get_amp_flow.requires)
|
||||||
|
self.assertIn(constants.LOADBALANCER_ID, get_amp_flow.requires)
|
||||||
|
self.assertIn(constants.VIP, get_amp_flow.requires)
|
||||||
|
self.assertIn(constants.VIP_SG_ID, get_amp_flow.requires)
|
||||||
|
|
||||||
|
self.assertIn(constants.ADDED_PORTS, get_amp_flow.provides)
|
||||||
|
self.assertIn(constants.AMPHORA, get_amp_flow.provides)
|
||||||
|
self.assertIn(constants.AMPHORA_ID, get_amp_flow.provides)
|
||||||
|
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, get_amp_flow.provides)
|
||||||
|
self.assertIn(constants.BASE_PORT, get_amp_flow.provides)
|
||||||
|
self.assertIn(constants.COMPUTE_ID, get_amp_flow.provides)
|
||||||
|
self.assertIn(constants.COMPUTE_OBJ, get_amp_flow.provides)
|
||||||
|
self.assertIn(constants.DELTA, get_amp_flow.provides)
|
||||||
|
self.assertIn(constants.SERVER_PEM, get_amp_flow.provides)
|
||||||
|
|
||||||
|
self.assertEqual(8, len(get_amp_flow.requires), get_amp_flow.requires)
|
||||||
|
self.assertEqual(9, len(get_amp_flow.provides), get_amp_flow.provides)
|
||||||
|
@ -16,6 +16,7 @@ from unittest import mock
|
|||||||
|
|
||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_config import fixture as oslo_fixture
|
from oslo_config import fixture as oslo_fixture
|
||||||
|
from oslo_utils import uuidutils
|
||||||
from taskflow.patterns import linear_flow as flow
|
from taskflow.patterns import linear_flow as flow
|
||||||
|
|
||||||
from octavia.common import constants
|
from octavia.common import constants
|
||||||
@ -156,7 +157,7 @@ class TestLoadBalancerFlows(base.TestCase):
|
|||||||
self.assertIn(constants.UPDATE_DICT, amp_flow.requires)
|
self.assertIn(constants.UPDATE_DICT, amp_flow.requires)
|
||||||
self.assertIn(constants.LOADBALANCER, amp_flow.provides)
|
self.assertIn(constants.LOADBALANCER, amp_flow.provides)
|
||||||
|
|
||||||
self.assertEqual(2, len(amp_flow.provides))
|
self.assertEqual(4, len(amp_flow.provides))
|
||||||
self.assertEqual(2, len(amp_flow.requires))
|
self.assertEqual(2, len(amp_flow.requires))
|
||||||
|
|
||||||
# Test mark_active=False
|
# Test mark_active=False
|
||||||
@ -169,7 +170,7 @@ class TestLoadBalancerFlows(base.TestCase):
|
|||||||
self.assertIn(constants.UPDATE_DICT, amp_flow.requires)
|
self.assertIn(constants.UPDATE_DICT, amp_flow.requires)
|
||||||
self.assertIn(constants.LOADBALANCER, amp_flow.provides)
|
self.assertIn(constants.LOADBALANCER, amp_flow.provides)
|
||||||
|
|
||||||
self.assertEqual(2, len(amp_flow.provides))
|
self.assertEqual(4, len(amp_flow.provides))
|
||||||
self.assertEqual(2, len(amp_flow.requires))
|
self.assertEqual(2, len(amp_flow.requires))
|
||||||
|
|
||||||
def test_get_create_load_balancer_flows_single_listeners(
|
def test_get_create_load_balancer_flows_single_listeners(
|
||||||
@ -195,7 +196,7 @@ class TestLoadBalancerFlows(base.TestCase):
|
|||||||
self.assertIn(constants.AMP_DATA, create_flow.provides)
|
self.assertIn(constants.AMP_DATA, create_flow.provides)
|
||||||
self.assertIn(constants.AMPHORA_NETWORK_CONFIG, create_flow.provides)
|
self.assertIn(constants.AMPHORA_NETWORK_CONFIG, create_flow.provides)
|
||||||
|
|
||||||
self.assertEqual(5, len(create_flow.requires))
|
self.assertEqual(6, len(create_flow.requires))
|
||||||
self.assertEqual(13, len(create_flow.provides),
|
self.assertEqual(13, len(create_flow.provides),
|
||||||
create_flow.provides)
|
create_flow.provides)
|
||||||
|
|
||||||
@ -223,6 +224,231 @@ class TestLoadBalancerFlows(base.TestCase):
|
|||||||
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG,
|
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG,
|
||||||
create_flow.provides)
|
create_flow.provides)
|
||||||
|
|
||||||
self.assertEqual(5, len(create_flow.requires))
|
self.assertEqual(6, len(create_flow.requires))
|
||||||
self.assertEqual(14, len(create_flow.provides),
|
self.assertEqual(16, len(create_flow.provides),
|
||||||
create_flow.provides)
|
create_flow.provides)
|
||||||
|
|
||||||
|
def _test_get_failover_LB_flow_single(self, amphorae):
|
||||||
|
lb_mock = mock.MagicMock()
|
||||||
|
lb_mock.id = uuidutils.generate_uuid()
|
||||||
|
lb_mock.topology = constants.TOPOLOGY_SINGLE
|
||||||
|
|
||||||
|
failover_flow = self.LBFlow.get_failover_LB_flow(amphorae, lb_mock)
|
||||||
|
|
||||||
|
self.assertIsInstance(failover_flow, flow.Flow)
|
||||||
|
|
||||||
|
self.assertIn(constants.AVAILABILITY_ZONE, failover_flow.requires)
|
||||||
|
self.assertIn(constants.BUILD_TYPE_PRIORITY, failover_flow.requires)
|
||||||
|
self.assertIn(constants.FLAVOR, failover_flow.requires)
|
||||||
|
self.assertIn(constants.LOADBALANCER, failover_flow.requires)
|
||||||
|
self.assertIn(constants.LOADBALANCER_ID, failover_flow.requires)
|
||||||
|
|
||||||
|
self.assertIn(constants.ADDED_PORTS, failover_flow.provides)
|
||||||
|
self.assertIn(constants.AMPHORA, failover_flow.provides)
|
||||||
|
self.assertIn(constants.AMPHORA_ID, failover_flow.provides)
|
||||||
|
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG,
|
||||||
|
failover_flow.provides)
|
||||||
|
self.assertIn(constants.BASE_PORT, failover_flow.provides)
|
||||||
|
self.assertIn(constants.COMPUTE_ID, failover_flow.provides)
|
||||||
|
self.assertIn(constants.COMPUTE_OBJ, failover_flow.provides)
|
||||||
|
self.assertIn(constants.DELTA, failover_flow.provides)
|
||||||
|
self.assertIn(constants.LOADBALANCER, failover_flow.provides)
|
||||||
|
self.assertIn(constants.SERVER_PEM, failover_flow.provides)
|
||||||
|
self.assertIn(constants.VIP, failover_flow.provides)
|
||||||
|
self.assertIn(constants.VIP_SG_ID, failover_flow.provides)
|
||||||
|
|
||||||
|
self.assertEqual(6, len(failover_flow.requires),
|
||||||
|
failover_flow.requires)
|
||||||
|
self.assertEqual(12, len(failover_flow.provides),
|
||||||
|
failover_flow.provides)
|
||||||
|
|
||||||
|
def test_get_failover_LB_flow_no_amps_single(self, mock_get_net_driver):
|
||||||
|
self._test_get_failover_LB_flow_single([])
|
||||||
|
|
||||||
|
def test_get_failover_LB_flow_one_amp_single(self, mock_get_net_driver):
|
||||||
|
amphora_mock = mock.MagicMock()
|
||||||
|
amphora_mock.role = constants.ROLE_STANDALONE
|
||||||
|
amphora_mock.lb_network_id = uuidutils.generate_uuid()
|
||||||
|
amphora_mock.compute_id = uuidutils.generate_uuid()
|
||||||
|
amphora_mock.vrrp_port_id = None
|
||||||
|
amphora_mock.vrrp_ip = None
|
||||||
|
|
||||||
|
self._test_get_failover_LB_flow_single([amphora_mock])
|
||||||
|
|
||||||
|
def test_get_failover_LB_flow_one_spare_amp_single(self,
|
||||||
|
mock_get_net_driver):
|
||||||
|
amphora_mock = mock.MagicMock()
|
||||||
|
amphora_mock.role = None
|
||||||
|
amphora_mock.lb_network_id = uuidutils.generate_uuid()
|
||||||
|
amphora_mock.compute_id = uuidutils.generate_uuid()
|
||||||
|
amphora_mock.vrrp_port_id = None
|
||||||
|
amphora_mock.vrrp_ip = None
|
||||||
|
|
||||||
|
self._test_get_failover_LB_flow_single([amphora_mock])
|
||||||
|
|
||||||
|
def test_get_failover_LB_flow_one_bogus_amp_single(self,
|
||||||
|
mock_get_net_driver):
|
||||||
|
amphora_mock = mock.MagicMock()
|
||||||
|
amphora_mock.role = 'bogus'
|
||||||
|
amphora_mock.lb_network_id = uuidutils.generate_uuid()
|
||||||
|
amphora_mock.compute_id = uuidutils.generate_uuid()
|
||||||
|
amphora_mock.vrrp_port_id = None
|
||||||
|
amphora_mock.vrrp_ip = None
|
||||||
|
|
||||||
|
self._test_get_failover_LB_flow_single([amphora_mock])
|
||||||
|
|
||||||
|
def test_get_failover_LB_flow_two_amp_single(self, mock_get_net_driver):
|
||||||
|
amphora_mock = mock.MagicMock()
|
||||||
|
amphora2_mock = mock.MagicMock()
|
||||||
|
amphora2_mock.role = constants.ROLE_STANDALONE
|
||||||
|
amphora2_mock.lb_network_id = uuidutils.generate_uuid()
|
||||||
|
amphora2_mock.compute_id = uuidutils.generate_uuid()
|
||||||
|
amphora2_mock.vrrp_port_id = None
|
||||||
|
amphora2_mock.vrrp_ip = None
|
||||||
|
|
||||||
|
self._test_get_failover_LB_flow_single([amphora_mock, amphora2_mock])
|
||||||
|
|
||||||
|
def _test_get_failover_LB_flow_no_amps_act_stdby(self, amphorae):
|
||||||
|
lb_mock = mock.MagicMock()
|
||||||
|
lb_mock.id = uuidutils.generate_uuid()
|
||||||
|
lb_mock.topology = constants.TOPOLOGY_ACTIVE_STANDBY
|
||||||
|
|
||||||
|
failover_flow = self.LBFlow.get_failover_LB_flow(amphorae, lb_mock)
|
||||||
|
|
||||||
|
self.assertIsInstance(failover_flow, flow.Flow)
|
||||||
|
|
||||||
|
self.assertIn(constants.AVAILABILITY_ZONE, failover_flow.requires)
|
||||||
|
self.assertIn(constants.BUILD_TYPE_PRIORITY, failover_flow.requires)
|
||||||
|
self.assertIn(constants.FLAVOR, failover_flow.requires)
|
||||||
|
self.assertIn(constants.LOADBALANCER, failover_flow.requires)
|
||||||
|
self.assertIn(constants.LOADBALANCER_ID, failover_flow.requires)
|
||||||
|
|
||||||
|
self.assertIn(constants.ADDED_PORTS, failover_flow.provides)
|
||||||
|
self.assertIn(constants.AMP_VRRP_INT, failover_flow.provides)
|
||||||
|
self.assertIn(constants.AMPHORA, failover_flow.provides)
|
||||||
|
self.assertIn(constants.AMPHORA_ID, failover_flow.provides)
|
||||||
|
self.assertIn(constants.AMPHORAE, failover_flow.provides)
|
||||||
|
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG,
|
||||||
|
failover_flow.provides)
|
||||||
|
self.assertIn(constants.BASE_PORT, failover_flow.provides)
|
||||||
|
self.assertIn(constants.COMPUTE_ID, failover_flow.provides)
|
||||||
|
self.assertIn(constants.COMPUTE_OBJ, failover_flow.provides)
|
||||||
|
self.assertIn(constants.DELTA, failover_flow.provides)
|
||||||
|
self.assertIn(constants.FIRST_AMP_NETWORK_CONFIGS,
|
||||||
|
failover_flow.provides)
|
||||||
|
self.assertIn(constants.FIRST_AMP_VRRP_INTERFACE,
|
||||||
|
failover_flow.provides)
|
||||||
|
self.assertIn(constants.LOADBALANCER, failover_flow.provides)
|
||||||
|
self.assertIn(constants.SERVER_PEM, failover_flow.provides)
|
||||||
|
self.assertIn(constants.VIP, failover_flow.provides)
|
||||||
|
self.assertIn(constants.VIP_SG_ID, failover_flow.provides)
|
||||||
|
|
||||||
|
self.assertEqual(6, len(failover_flow.requires),
|
||||||
|
failover_flow.requires)
|
||||||
|
self.assertEqual(16, len(failover_flow.provides),
|
||||||
|
failover_flow.provides)
|
||||||
|
|
||||||
|
def test_get_failover_LB_flow_no_amps_act_stdby(self, mock_get_net_driver):
|
||||||
|
self._test_get_failover_LB_flow_no_amps_act_stdby([])
|
||||||
|
|
||||||
|
def test_get_failover_LB_flow_one_amps_act_stdby(self, amphorae):
|
||||||
|
amphora_mock = mock.MagicMock()
|
||||||
|
amphora_mock.role = constants.ROLE_MASTER
|
||||||
|
amphora_mock.lb_network_id = uuidutils.generate_uuid()
|
||||||
|
amphora_mock.compute_id = uuidutils.generate_uuid()
|
||||||
|
amphora_mock.vrrp_port_id = None
|
||||||
|
amphora_mock.vrrp_ip = None
|
||||||
|
|
||||||
|
self._test_get_failover_LB_flow_no_amps_act_stdby([amphora_mock])
|
||||||
|
|
||||||
|
def test_get_failover_LB_flow_two_amps_act_stdby(self,
|
||||||
|
mock_get_net_driver):
|
||||||
|
amphora_mock = mock.MagicMock()
|
||||||
|
amphora_mock.role = constants.ROLE_MASTER
|
||||||
|
amphora_mock.lb_network_id = uuidutils.generate_uuid()
|
||||||
|
amphora_mock.compute_id = uuidutils.generate_uuid()
|
||||||
|
amphora_mock.vrrp_port_id = uuidutils.generate_uuid()
|
||||||
|
amphora_mock.vrrp_ip = '192.0.2.46'
|
||||||
|
amphora2_mock = mock.MagicMock()
|
||||||
|
amphora2_mock.role = constants.ROLE_BACKUP
|
||||||
|
amphora2_mock.lb_network_id = uuidutils.generate_uuid()
|
||||||
|
amphora2_mock.compute_id = uuidutils.generate_uuid()
|
||||||
|
amphora2_mock.vrrp_port_id = uuidutils.generate_uuid()
|
||||||
|
amphora2_mock.vrrp_ip = '2001:db8::46'
|
||||||
|
|
||||||
|
self._test_get_failover_LB_flow_no_amps_act_stdby([amphora_mock,
|
||||||
|
amphora2_mock])
|
||||||
|
|
||||||
|
def test_get_failover_LB_flow_three_amps_act_stdby(self,
|
||||||
|
mock_get_net_driver):
|
||||||
|
amphora_mock = mock.MagicMock()
|
||||||
|
amphora_mock.role = constants.ROLE_MASTER
|
||||||
|
amphora_mock.lb_network_id = uuidutils.generate_uuid()
|
||||||
|
amphora_mock.compute_id = uuidutils.generate_uuid()
|
||||||
|
amphora_mock.vrrp_port_id = uuidutils.generate_uuid()
|
||||||
|
amphora_mock.vrrp_ip = '192.0.2.46'
|
||||||
|
amphora2_mock = mock.MagicMock()
|
||||||
|
amphora2_mock.role = constants.ROLE_BACKUP
|
||||||
|
amphora2_mock.lb_network_id = uuidutils.generate_uuid()
|
||||||
|
amphora2_mock.compute_id = uuidutils.generate_uuid()
|
||||||
|
amphora2_mock.vrrp_port_id = uuidutils.generate_uuid()
|
||||||
|
amphora2_mock.vrrp_ip = '2001:db8::46'
|
||||||
|
amphora3_mock = mock.MagicMock()
|
||||||
|
amphora3_mock.vrrp_ip = None
|
||||||
|
|
||||||
|
self._test_get_failover_LB_flow_no_amps_act_stdby(
|
||||||
|
[amphora_mock, amphora2_mock, amphora3_mock])
|
||||||
|
|
||||||
|
def test_get_failover_LB_flow_two_amps_bogus_act_stdby(
|
||||||
|
self, mock_get_net_driver):
|
||||||
|
amphora_mock = mock.MagicMock()
|
||||||
|
amphora_mock.role = 'bogus'
|
||||||
|
amphora_mock.lb_network_id = uuidutils.generate_uuid()
|
||||||
|
amphora_mock.compute_id = uuidutils.generate_uuid()
|
||||||
|
amphora_mock.vrrp_port_id = uuidutils.generate_uuid()
|
||||||
|
amphora_mock.vrrp_ip = '192.0.2.46'
|
||||||
|
amphora2_mock = mock.MagicMock()
|
||||||
|
amphora2_mock.role = constants.ROLE_MASTER
|
||||||
|
amphora2_mock.lb_network_id = uuidutils.generate_uuid()
|
||||||
|
amphora2_mock.compute_id = uuidutils.generate_uuid()
|
||||||
|
amphora2_mock.vrrp_port_id = uuidutils.generate_uuid()
|
||||||
|
amphora2_mock.vrrp_ip = '2001:db8::46'
|
||||||
|
|
||||||
|
self._test_get_failover_LB_flow_no_amps_act_stdby([amphora_mock,
|
||||||
|
amphora2_mock])
|
||||||
|
|
||||||
|
def test_get_failover_LB_flow_two_amps_spare_act_stdby(
|
||||||
|
self, mock_get_net_driver):
|
||||||
|
amphora_mock = mock.MagicMock()
|
||||||
|
amphora_mock.role = None
|
||||||
|
amphora_mock.lb_network_id = uuidutils.generate_uuid()
|
||||||
|
amphora_mock.compute_id = uuidutils.generate_uuid()
|
||||||
|
amphora_mock.vrrp_port_id = uuidutils.generate_uuid()
|
||||||
|
amphora_mock.vrrp_ip = '192.0.2.46'
|
||||||
|
amphora2_mock = mock.MagicMock()
|
||||||
|
amphora2_mock.role = constants.ROLE_MASTER
|
||||||
|
amphora2_mock.lb_network_id = uuidutils.generate_uuid()
|
||||||
|
amphora2_mock.compute_id = uuidutils.generate_uuid()
|
||||||
|
amphora2_mock.vrrp_port_id = uuidutils.generate_uuid()
|
||||||
|
amphora2_mock.vrrp_ip = '2001:db8::46'
|
||||||
|
|
||||||
|
self._test_get_failover_LB_flow_no_amps_act_stdby([amphora_mock,
|
||||||
|
amphora2_mock])
|
||||||
|
|
||||||
|
def test_get_failover_LB_flow_two_amps_standalone_act_stdby(
|
||||||
|
self, mock_get_net_driver):
|
||||||
|
amphora_mock = mock.MagicMock()
|
||||||
|
amphora_mock.role = constants.ROLE_STANDALONE
|
||||||
|
amphora_mock.lb_network_id = uuidutils.generate_uuid()
|
||||||
|
amphora_mock.compute_id = uuidutils.generate_uuid()
|
||||||
|
amphora_mock.vrrp_port_id = uuidutils.generate_uuid()
|
||||||
|
amphora_mock.vrrp_ip = '192.0.2.46'
|
||||||
|
amphora2_mock = mock.MagicMock()
|
||||||
|
amphora2_mock.role = constants.ROLE_MASTER
|
||||||
|
amphora2_mock.lb_network_id = uuidutils.generate_uuid()
|
||||||
|
amphora2_mock.compute_id = uuidutils.generate_uuid()
|
||||||
|
amphora2_mock.vrrp_port_id = uuidutils.generate_uuid()
|
||||||
|
amphora2_mock.vrrp_ip = '2001:db8::46'
|
||||||
|
|
||||||
|
self._test_get_failover_LB_flow_no_amps_act_stdby([amphora_mock,
|
||||||
|
amphora2_mock])
|
||||||
|
@ -40,6 +40,7 @@ FAKE_CONFIG_FILE = 'fake config file'
|
|||||||
_amphora_mock = mock.MagicMock()
|
_amphora_mock = mock.MagicMock()
|
||||||
_amphora_mock.id = AMP_ID
|
_amphora_mock.id = AMP_ID
|
||||||
_amphora_mock.status = constants.AMPHORA_ALLOCATED
|
_amphora_mock.status = constants.AMPHORA_ALLOCATED
|
||||||
|
_amphora_mock.vrrp_ip = '198.51.100.65'
|
||||||
_load_balancer_mock = mock.MagicMock()
|
_load_balancer_mock = mock.MagicMock()
|
||||||
_load_balancer_mock.id = LB_ID
|
_load_balancer_mock.id = LB_ID
|
||||||
_listener_mock = mock.MagicMock()
|
_listener_mock = mock.MagicMock()
|
||||||
@ -76,33 +77,52 @@ class TestAmphoraDriverTasks(base.TestCase):
|
|||||||
active_connection_rety_interval=CONN_RETRY_INTERVAL)
|
active_connection_rety_interval=CONN_RETRY_INTERVAL)
|
||||||
conf.config(group="controller_worker",
|
conf.config(group="controller_worker",
|
||||||
loadbalancer_topology=constants.TOPOLOGY_SINGLE)
|
loadbalancer_topology=constants.TOPOLOGY_SINGLE)
|
||||||
|
self.timeout_dict = {constants.REQ_CONN_TIMEOUT: 1,
|
||||||
|
constants.REQ_READ_TIMEOUT: 2,
|
||||||
|
constants.CONN_MAX_RETRIES: 3,
|
||||||
|
constants.CONN_RETRY_INTERVAL: 4}
|
||||||
super(TestAmphoraDriverTasks, self).setUp()
|
super(TestAmphoraDriverTasks, self).setUp()
|
||||||
|
|
||||||
def test_amp_listener_update(self,
|
def test_amp_listeners_update(self,
|
||||||
mock_driver,
|
mock_driver,
|
||||||
mock_generate_uuid,
|
mock_generate_uuid,
|
||||||
mock_log,
|
mock_log,
|
||||||
mock_get_session,
|
mock_get_session,
|
||||||
mock_listener_repo_get,
|
mock_listener_repo_get,
|
||||||
mock_listener_repo_update,
|
mock_listener_repo_update,
|
||||||
mock_amphora_repo_update):
|
mock_amphora_repo_update):
|
||||||
|
|
||||||
timeout_dict = {constants.REQ_CONN_TIMEOUT: 1,
|
|
||||||
constants.REQ_READ_TIMEOUT: 2,
|
|
||||||
constants.CONN_MAX_RETRIES: 3,
|
|
||||||
constants.CONN_RETRY_INTERVAL: 4}
|
|
||||||
|
|
||||||
amp_list_update_obj = amphora_driver_tasks.AmpListenersUpdate()
|
amp_list_update_obj = amphora_driver_tasks.AmpListenersUpdate()
|
||||||
amp_list_update_obj.execute(_load_balancer_mock, 0,
|
amp_list_update_obj.execute(_load_balancer_mock, _amphora_mock,
|
||||||
[_amphora_mock], timeout_dict)
|
self.timeout_dict)
|
||||||
|
|
||||||
mock_driver.update_amphora_listeners.assert_called_once_with(
|
mock_driver.update_amphora_listeners.assert_called_once_with(
|
||||||
_load_balancer_mock, _amphora_mock, timeout_dict)
|
_load_balancer_mock, _amphora_mock, self.timeout_dict)
|
||||||
|
|
||||||
|
mock_driver.update_amphora_listeners.side_effect = Exception('boom')
|
||||||
|
|
||||||
|
amp_list_update_obj.execute(_load_balancer_mock, _amphora_mock,
|
||||||
|
self.timeout_dict)
|
||||||
|
|
||||||
|
mock_amphora_repo_update.assert_called_once_with(
|
||||||
|
_session_mock, AMP_ID, status=constants.ERROR)
|
||||||
|
|
||||||
|
def test_amphorae_listeners_update(
|
||||||
|
self, mock_driver, mock_generate_uuid, mock_log, mock_get_session,
|
||||||
|
mock_listener_repo_get, mock_listener_repo_update,
|
||||||
|
mock_amphora_repo_update):
|
||||||
|
|
||||||
|
amp_list_update_obj = amphora_driver_tasks.AmphoraIndexListenerUpdate()
|
||||||
|
amp_list_update_obj.execute(_load_balancer_mock, 0,
|
||||||
|
[_amphora_mock], self.timeout_dict)
|
||||||
|
|
||||||
|
mock_driver.update_amphora_listeners.assert_called_once_with(
|
||||||
|
_load_balancer_mock, _amphora_mock, self.timeout_dict)
|
||||||
|
|
||||||
mock_driver.update_amphora_listeners.side_effect = Exception('boom')
|
mock_driver.update_amphora_listeners.side_effect = Exception('boom')
|
||||||
|
|
||||||
amp_list_update_obj.execute(_load_balancer_mock, 0,
|
amp_list_update_obj.execute(_load_balancer_mock, 0,
|
||||||
[_amphora_mock], timeout_dict)
|
[_amphora_mock], self.timeout_dict)
|
||||||
|
|
||||||
mock_amphora_repo_update.assert_called_once_with(
|
mock_amphora_repo_update.assert_called_once_with(
|
||||||
_session_mock, AMP_ID, status=constants.ERROR)
|
_session_mock, AMP_ID, status=constants.ERROR)
|
||||||
@ -168,6 +188,36 @@ class TestAmphoraDriverTasks(base.TestCase):
|
|||||||
self.assertEqual(2, repo.ListenerRepository.update.call_count)
|
self.assertEqual(2, repo.ListenerRepository.update.call_count)
|
||||||
self.assertIsNone(amp)
|
self.assertIsNone(amp)
|
||||||
|
|
||||||
|
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
|
||||||
|
'mark_listener_prov_status_error')
|
||||||
|
def test_amphora_index_listeners_reload(
|
||||||
|
self, mock_prov_status_error, mock_driver, mock_generate_uuid,
|
||||||
|
mock_log, mock_get_session, mock_listener_repo_get,
|
||||||
|
mock_listener_repo_update, mock_amphora_repo_update):
|
||||||
|
amphora_mock = mock.MagicMock()
|
||||||
|
listeners_reload_obj = (
|
||||||
|
amphora_driver_tasks.AmphoraIndexListenersReload())
|
||||||
|
mock_lb = mock.MagicMock()
|
||||||
|
mock_listener = mock.MagicMock()
|
||||||
|
mock_listener.id = '12345'
|
||||||
|
|
||||||
|
# Test no listeners
|
||||||
|
mock_lb.listeners = None
|
||||||
|
listeners_reload_obj.execute(mock_lb, None, 0)
|
||||||
|
mock_driver.reload.assert_not_called()
|
||||||
|
|
||||||
|
# Test with listeners
|
||||||
|
mock_driver.start.reset_mock()
|
||||||
|
mock_lb.listeners = [mock_listener]
|
||||||
|
listeners_reload_obj.execute(mock_lb, [amphora_mock], 0,
|
||||||
|
timeout_dict=self.timeout_dict)
|
||||||
|
mock_driver.reload.assert_called_once_with(mock_lb, amphora_mock,
|
||||||
|
self.timeout_dict)
|
||||||
|
# Test revert
|
||||||
|
mock_lb.listeners = [mock_listener]
|
||||||
|
listeners_reload_obj.revert(mock_lb)
|
||||||
|
mock_prov_status_error.assert_called_once_with('12345')
|
||||||
|
|
||||||
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
|
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
|
||||||
'mark_listener_prov_status_error')
|
'mark_listener_prov_status_error')
|
||||||
def test_listeners_start(self,
|
def test_listeners_start(self,
|
||||||
@ -296,6 +346,12 @@ class TestAmphoraDriverTasks(base.TestCase):
|
|||||||
status=constants.ERROR)
|
status=constants.ERROR)
|
||||||
self.assertIsNone(amp)
|
self.assertIsNone(amp)
|
||||||
|
|
||||||
|
# Test revert when this task failed
|
||||||
|
repo.AmphoraRepository.update.reset_mock()
|
||||||
|
amp = amphora_finalize_obj.revert(
|
||||||
|
failure.Failure.from_exception(Exception('boom')), _amphora_mock)
|
||||||
|
repo.AmphoraRepository.update.assert_not_called()
|
||||||
|
|
||||||
def test_amphora_post_network_plug(self,
|
def test_amphora_post_network_plug(self,
|
||||||
mock_driver,
|
mock_driver,
|
||||||
mock_generate_uuid,
|
mock_generate_uuid,
|
||||||
@ -332,7 +388,14 @@ class TestAmphoraDriverTasks(base.TestCase):
|
|||||||
|
|
||||||
self.assertIsNone(amp)
|
self.assertIsNone(amp)
|
||||||
|
|
||||||
def test_amphorae_post_network_plug(self, mock_driver,
|
# Test revert when this task failed
|
||||||
|
repo.AmphoraRepository.update.reset_mock()
|
||||||
|
amp = amphora_post_network_plug_obj.revert(
|
||||||
|
failure.Failure.from_exception(Exception('boom')), _amphora_mock)
|
||||||
|
repo.AmphoraRepository.update.assert_not_called()
|
||||||
|
|
||||||
|
@mock.patch('octavia.db.repositories.AmphoraRepository.get_all')
|
||||||
|
def test_amphorae_post_network_plug(self, mock_amp_get_all, mock_driver,
|
||||||
mock_generate_uuid,
|
mock_generate_uuid,
|
||||||
mock_log,
|
mock_log,
|
||||||
mock_get_session,
|
mock_get_session,
|
||||||
@ -342,7 +405,7 @@ class TestAmphoraDriverTasks(base.TestCase):
|
|||||||
mock_driver.get_network.return_value = _network_mock
|
mock_driver.get_network.return_value = _network_mock
|
||||||
_amphora_mock.id = AMP_ID
|
_amphora_mock.id = AMP_ID
|
||||||
_amphora_mock.compute_id = COMPUTE_ID
|
_amphora_mock.compute_id = COMPUTE_ID
|
||||||
_LB_mock.amphorae = [_amphora_mock]
|
mock_amp_get_all.return_value = [[_amphora_mock], None]
|
||||||
amphora_post_network_plug_obj = (amphora_driver_tasks.
|
amphora_post_network_plug_obj = (amphora_driver_tasks.
|
||||||
AmphoraePostNetworkPlug())
|
AmphoraePostNetworkPlug())
|
||||||
|
|
||||||
@ -354,6 +417,14 @@ class TestAmphoraDriverTasks(base.TestCase):
|
|||||||
(mock_driver.post_network_plug.
|
(mock_driver.post_network_plug.
|
||||||
assert_called_once_with(_amphora_mock, port_mock))
|
assert_called_once_with(_amphora_mock, port_mock))
|
||||||
|
|
||||||
|
# Test with no ports to plug
|
||||||
|
mock_driver.post_network_plug.reset_mock()
|
||||||
|
|
||||||
|
_deltas_mock = {'0': [port_mock]}
|
||||||
|
|
||||||
|
amphora_post_network_plug_obj.execute(_LB_mock, _deltas_mock)
|
||||||
|
mock_driver.post_network_plug.assert_not_called()
|
||||||
|
|
||||||
# Test revert
|
# Test revert
|
||||||
amp = amphora_post_network_plug_obj.revert(None, _LB_mock,
|
amp = amphora_post_network_plug_obj.revert(None, _LB_mock,
|
||||||
_deltas_mock)
|
_deltas_mock)
|
||||||
@ -376,6 +447,13 @@ class TestAmphoraDriverTasks(base.TestCase):
|
|||||||
|
|
||||||
self.assertIsNone(amp)
|
self.assertIsNone(amp)
|
||||||
|
|
||||||
|
# Test revert when this task failed
|
||||||
|
repo.AmphoraRepository.update.reset_mock()
|
||||||
|
amp = amphora_post_network_plug_obj.revert(
|
||||||
|
failure.Failure.from_exception(Exception('boom')), _amphora_mock,
|
||||||
|
None)
|
||||||
|
repo.AmphoraRepository.update.assert_not_called()
|
||||||
|
|
||||||
@mock.patch('octavia.db.repositories.LoadBalancerRepository.update')
|
@mock.patch('octavia.db.repositories.LoadBalancerRepository.update')
|
||||||
def test_amphora_post_vip_plug(self,
|
def test_amphora_post_vip_plug(self,
|
||||||
mock_loadbalancer_repo_update,
|
mock_loadbalancer_repo_update,
|
||||||
@ -426,6 +504,13 @@ class TestAmphoraDriverTasks(base.TestCase):
|
|||||||
|
|
||||||
self.assertIsNone(amp)
|
self.assertIsNone(amp)
|
||||||
|
|
||||||
|
# Test revert when this task failed
|
||||||
|
repo.AmphoraRepository.update.reset_mock()
|
||||||
|
amp = amphora_post_vip_plug_obj.revert(
|
||||||
|
failure.Failure.from_exception(Exception('boom')), _amphora_mock,
|
||||||
|
None)
|
||||||
|
repo.AmphoraRepository.update.assert_not_called()
|
||||||
|
|
||||||
@mock.patch('octavia.db.repositories.LoadBalancerRepository.update')
|
@mock.patch('octavia.db.repositories.LoadBalancerRepository.update')
|
||||||
def test_amphorae_post_vip_plug(self,
|
def test_amphorae_post_vip_plug(self,
|
||||||
mock_loadbalancer_repo_update,
|
mock_loadbalancer_repo_update,
|
||||||
@ -465,6 +550,13 @@ class TestAmphoraDriverTasks(base.TestCase):
|
|||||||
|
|
||||||
self.assertIsNone(amp)
|
self.assertIsNone(amp)
|
||||||
|
|
||||||
|
# Test revert when this task failed
|
||||||
|
repo.AmphoraRepository.update.reset_mock()
|
||||||
|
amp = amphora_post_vip_plug_obj.revert(
|
||||||
|
failure.Failure.from_exception(Exception('boom')), _amphora_mock,
|
||||||
|
None)
|
||||||
|
repo.AmphoraRepository.update.assert_not_called()
|
||||||
|
|
||||||
def test_amphora_cert_upload(self,
|
def test_amphora_cert_upload(self,
|
||||||
mock_driver,
|
mock_driver,
|
||||||
mock_generate_uuid,
|
mock_generate_uuid,
|
||||||
@ -491,45 +583,59 @@ class TestAmphoraDriverTasks(base.TestCase):
|
|||||||
mock_listener_repo_get,
|
mock_listener_repo_get,
|
||||||
mock_listener_repo_update,
|
mock_listener_repo_update,
|
||||||
mock_amphora_repo_update):
|
mock_amphora_repo_update):
|
||||||
|
FAKE_INTERFACE = 'fake0'
|
||||||
_LB_mock.amphorae = _amphorae_mock
|
_LB_mock.amphorae = _amphorae_mock
|
||||||
|
mock_driver.get_interface_from_ip.side_effect = [FAKE_INTERFACE,
|
||||||
|
Exception('boom')]
|
||||||
|
|
||||||
timeout_dict = {constants.CONN_MAX_RETRIES: CONN_MAX_RETRIES,
|
timeout_dict = {constants.CONN_MAX_RETRIES: CONN_MAX_RETRIES,
|
||||||
constants.CONN_RETRY_INTERVAL: CONN_RETRY_INTERVAL}
|
constants.CONN_RETRY_INTERVAL: CONN_RETRY_INTERVAL}
|
||||||
|
|
||||||
amphora_update_vrrp_interface_obj = (
|
amphora_update_vrrp_interface_obj = (
|
||||||
amphora_driver_tasks.AmphoraUpdateVRRPInterface())
|
amphora_driver_tasks.AmphoraUpdateVRRPInterface())
|
||||||
amphora_update_vrrp_interface_obj.execute(_LB_mock)
|
amphora_update_vrrp_interface_obj.execute(_amphora_mock, timeout_dict)
|
||||||
mock_driver.get_vrrp_interface.assert_called_once_with(
|
mock_driver.get_interface_from_ip.assert_called_once_with(
|
||||||
_amphora_mock, timeout_dict=timeout_dict)
|
_amphora_mock, _amphora_mock.vrrp_ip, timeout_dict=timeout_dict)
|
||||||
|
mock_amphora_repo_update.assert_called_once_with(
|
||||||
|
_session_mock, _amphora_mock.id, vrrp_interface=FAKE_INTERFACE)
|
||||||
|
|
||||||
# Test revert
|
# Test with an exception
|
||||||
mock_driver.reset_mock()
|
|
||||||
|
|
||||||
_LB_mock.amphorae = _amphorae_mock
|
|
||||||
amphora_update_vrrp_interface_obj.revert("BADRESULT", _LB_mock)
|
|
||||||
mock_amphora_repo_update.assert_called_with(_session_mock,
|
|
||||||
_amphora_mock.id,
|
|
||||||
vrrp_interface=None)
|
|
||||||
|
|
||||||
mock_driver.reset_mock()
|
|
||||||
mock_amphora_repo_update.reset_mock()
|
mock_amphora_repo_update.reset_mock()
|
||||||
|
amphora_update_vrrp_interface_obj.execute(_amphora_mock, timeout_dict)
|
||||||
|
mock_amphora_repo_update.assert_called_once_with(
|
||||||
|
_session_mock, _amphora_mock.id, status=constants.ERROR)
|
||||||
|
|
||||||
failure_obj = failure.Failure.from_exception(Exception("TESTEXCEPT"))
|
def test_amphora_index_update_vrrp_interface(
|
||||||
amphora_update_vrrp_interface_obj.revert(failure_obj, _LB_mock)
|
self, mock_driver, mock_generate_uuid, mock_log, mock_get_session,
|
||||||
self.assertFalse(mock_amphora_repo_update.called)
|
mock_listener_repo_get, mock_listener_repo_update,
|
||||||
|
mock_amphora_repo_update):
|
||||||
# Test revert with exception
|
FAKE_INTERFACE = 'fake0'
|
||||||
mock_driver.reset_mock()
|
|
||||||
mock_amphora_repo_update.reset_mock()
|
|
||||||
mock_amphora_repo_update.side_effect = Exception('fail')
|
|
||||||
|
|
||||||
_LB_mock.amphorae = _amphorae_mock
|
_LB_mock.amphorae = _amphorae_mock
|
||||||
amphora_update_vrrp_interface_obj.revert("BADRESULT", _LB_mock)
|
mock_driver.get_interface_from_ip.side_effect = [FAKE_INTERFACE,
|
||||||
mock_amphora_repo_update.assert_called_with(_session_mock,
|
Exception('boom')]
|
||||||
_amphora_mock.id,
|
|
||||||
vrrp_interface=None)
|
|
||||||
|
|
||||||
|
timeout_dict = {constants.CONN_MAX_RETRIES: CONN_MAX_RETRIES,
|
||||||
|
constants.CONN_RETRY_INTERVAL: CONN_RETRY_INTERVAL}
|
||||||
|
|
||||||
|
amphora_update_vrrp_interface_obj = (
|
||||||
|
amphora_driver_tasks.AmphoraIndexUpdateVRRPInterface())
|
||||||
|
amphora_update_vrrp_interface_obj.execute(
|
||||||
|
[_amphora_mock], 0, timeout_dict)
|
||||||
|
mock_driver.get_interface_from_ip.assert_called_once_with(
|
||||||
|
_amphora_mock, _amphora_mock.vrrp_ip, timeout_dict=timeout_dict)
|
||||||
|
mock_amphora_repo_update.assert_called_once_with(
|
||||||
|
_session_mock, _amphora_mock.id, vrrp_interface=FAKE_INTERFACE)
|
||||||
|
|
||||||
|
# Test with an exception
|
||||||
|
mock_amphora_repo_update.reset_mock()
|
||||||
|
amphora_update_vrrp_interface_obj.execute(
|
||||||
|
[_amphora_mock], 0, timeout_dict)
|
||||||
|
mock_amphora_repo_update.assert_called_once_with(
|
||||||
|
_session_mock, _amphora_mock.id, status=constants.ERROR)
|
||||||
|
|
||||||
|
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
|
||||||
def test_amphora_vrrp_update(self,
|
def test_amphora_vrrp_update(self,
|
||||||
|
mock_lb_get,
|
||||||
mock_driver,
|
mock_driver,
|
||||||
mock_generate_uuid,
|
mock_generate_uuid,
|
||||||
mock_log,
|
mock_log,
|
||||||
@ -538,11 +644,53 @@ class TestAmphoraDriverTasks(base.TestCase):
|
|||||||
mock_listener_repo_update,
|
mock_listener_repo_update,
|
||||||
mock_amphora_repo_update):
|
mock_amphora_repo_update):
|
||||||
amphorae_network_config = mock.MagicMock()
|
amphorae_network_config = mock.MagicMock()
|
||||||
|
mock_driver.update_vrrp_conf.side_effect = [mock.DEFAULT,
|
||||||
|
Exception('boom')]
|
||||||
|
mock_lb_get.return_value = _LB_mock
|
||||||
amphora_vrrp_update_obj = (
|
amphora_vrrp_update_obj = (
|
||||||
amphora_driver_tasks.AmphoraVRRPUpdate())
|
amphora_driver_tasks.AmphoraVRRPUpdate())
|
||||||
amphora_vrrp_update_obj.execute(_LB_mock, amphorae_network_config)
|
amphora_vrrp_update_obj.execute(_LB_mock.id, amphorae_network_config,
|
||||||
|
_amphora_mock, 'fakeint0')
|
||||||
mock_driver.update_vrrp_conf.assert_called_once_with(
|
mock_driver.update_vrrp_conf.assert_called_once_with(
|
||||||
_LB_mock, amphorae_network_config)
|
_LB_mock, amphorae_network_config, _amphora_mock, None)
|
||||||
|
|
||||||
|
# Test with an exception
|
||||||
|
mock_amphora_repo_update.reset_mock()
|
||||||
|
amphora_vrrp_update_obj.execute(_LB_mock.id, amphorae_network_config,
|
||||||
|
_amphora_mock, 'fakeint0')
|
||||||
|
mock_amphora_repo_update.assert_called_once_with(
|
||||||
|
_session_mock, _amphora_mock.id, status=constants.ERROR)
|
||||||
|
|
||||||
|
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
|
||||||
|
def test_amphora_index_vrrp_update(self,
|
||||||
|
mock_lb_get,
|
||||||
|
mock_driver,
|
||||||
|
mock_generate_uuid,
|
||||||
|
mock_log,
|
||||||
|
mock_get_session,
|
||||||
|
mock_listener_repo_get,
|
||||||
|
mock_listener_repo_update,
|
||||||
|
mock_amphora_repo_update):
|
||||||
|
amphorae_network_config = mock.MagicMock()
|
||||||
|
mock_driver.update_vrrp_conf.side_effect = [mock.DEFAULT,
|
||||||
|
Exception('boom')]
|
||||||
|
mock_lb_get.return_value = _LB_mock
|
||||||
|
amphora_vrrp_update_obj = (
|
||||||
|
amphora_driver_tasks.AmphoraIndexVRRPUpdate())
|
||||||
|
|
||||||
|
amphora_vrrp_update_obj.execute(_LB_mock.id, amphorae_network_config,
|
||||||
|
0, [_amphora_mock], 'fakeint0',
|
||||||
|
timeout_dict=self.timeout_dict)
|
||||||
|
mock_driver.update_vrrp_conf.assert_called_once_with(
|
||||||
|
_LB_mock, amphorae_network_config, _amphora_mock,
|
||||||
|
self.timeout_dict)
|
||||||
|
|
||||||
|
# Test with an exception
|
||||||
|
mock_amphora_repo_update.reset_mock()
|
||||||
|
amphora_vrrp_update_obj.execute(_LB_mock.id, amphorae_network_config,
|
||||||
|
0, [_amphora_mock], 'fakeint0')
|
||||||
|
mock_amphora_repo_update.assert_called_once_with(
|
||||||
|
_session_mock, _amphora_mock.id, status=constants.ERROR)
|
||||||
|
|
||||||
def test_amphora_vrrp_stop(self,
|
def test_amphora_vrrp_stop(self,
|
||||||
mock_driver,
|
mock_driver,
|
||||||
@ -567,8 +715,25 @@ class TestAmphoraDriverTasks(base.TestCase):
|
|||||||
mock_amphora_repo_update):
|
mock_amphora_repo_update):
|
||||||
amphora_vrrp_start_obj = (
|
amphora_vrrp_start_obj = (
|
||||||
amphora_driver_tasks.AmphoraVRRPStart())
|
amphora_driver_tasks.AmphoraVRRPStart())
|
||||||
amphora_vrrp_start_obj.execute(_LB_mock)
|
amphora_vrrp_start_obj.execute(_amphora_mock,
|
||||||
mock_driver.start_vrrp_service.assert_called_once_with(_LB_mock)
|
timeout_dict=self.timeout_dict)
|
||||||
|
mock_driver.start_vrrp_service.assert_called_once_with(
|
||||||
|
_amphora_mock, self.timeout_dict)
|
||||||
|
|
||||||
|
def test_amphora_index_vrrp_start(self,
|
||||||
|
mock_driver,
|
||||||
|
mock_generate_uuid,
|
||||||
|
mock_log,
|
||||||
|
mock_get_session,
|
||||||
|
mock_listener_repo_get,
|
||||||
|
mock_listener_repo_update,
|
||||||
|
mock_amphora_repo_update):
|
||||||
|
amphora_vrrp_start_obj = (
|
||||||
|
amphora_driver_tasks.AmphoraIndexVRRPStart())
|
||||||
|
amphora_vrrp_start_obj.execute(0, [_amphora_mock],
|
||||||
|
timeout_dict=self.timeout_dict)
|
||||||
|
mock_driver.start_vrrp_service.assert_called_once_with(
|
||||||
|
_amphora_mock, self.timeout_dict)
|
||||||
|
|
||||||
def test_amphora_compute_connectivity_wait(self,
|
def test_amphora_compute_connectivity_wait(self,
|
||||||
mock_driver,
|
mock_driver,
|
||||||
|
@ -18,6 +18,7 @@ from cryptography import fernet
|
|||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_config import fixture as oslo_fixture
|
from oslo_config import fixture as oslo_fixture
|
||||||
from oslo_utils import uuidutils
|
from oslo_utils import uuidutils
|
||||||
|
import tenacity
|
||||||
|
|
||||||
from octavia.common import constants
|
from octavia.common import constants
|
||||||
from octavia.common import exceptions
|
from octavia.common import exceptions
|
||||||
@ -170,7 +171,8 @@ class TestComputeTasks(base.TestCase):
|
|||||||
|
|
||||||
mock_driver.build.return_value = COMPUTE_ID
|
mock_driver.build.return_value = COMPUTE_ID
|
||||||
# Test execute()
|
# Test execute()
|
||||||
compute_id = createcompute.execute(_amphora_mock.id, ports=[_port])
|
compute_id = createcompute.execute(_amphora_mock.id, ports=[_port],
|
||||||
|
server_group_id=None)
|
||||||
|
|
||||||
# Validate that the build method was called properly
|
# Validate that the build method was called properly
|
||||||
mock_driver.build.assert_called_once_with(
|
mock_driver.build.assert_called_once_with(
|
||||||
@ -502,19 +504,54 @@ class TestComputeTasks(base.TestCase):
|
|||||||
@mock.patch('stevedore.driver.DriverManager.driver')
|
@mock.patch('stevedore.driver.DriverManager.driver')
|
||||||
def test_delete_amphorae_on_load_balancer(self, mock_driver):
|
def test_delete_amphorae_on_load_balancer(self, mock_driver):
|
||||||
|
|
||||||
|
mock_driver.delete.side_effect = [mock.DEFAULT,
|
||||||
|
exceptions.OctaviaException('boom')]
|
||||||
|
|
||||||
delete_amps = compute_tasks.DeleteAmphoraeOnLoadBalancer()
|
delete_amps = compute_tasks.DeleteAmphoraeOnLoadBalancer()
|
||||||
|
|
||||||
delete_amps.execute(_load_balancer_mock)
|
delete_amps.execute(_load_balancer_mock)
|
||||||
|
|
||||||
mock_driver.delete.assert_called_once_with(COMPUTE_ID)
|
mock_driver.delete.assert_called_once_with(COMPUTE_ID)
|
||||||
|
|
||||||
|
# Test compute driver exception is raised
|
||||||
|
self.assertRaises(exceptions.OctaviaException, delete_amps.execute,
|
||||||
|
_load_balancer_mock)
|
||||||
|
|
||||||
@mock.patch('stevedore.driver.DriverManager.driver')
|
@mock.patch('stevedore.driver.DriverManager.driver')
|
||||||
def test_compute_delete(self, mock_driver):
|
def test_compute_delete(self, mock_driver):
|
||||||
|
mock_driver.delete.side_effect = [
|
||||||
|
mock.DEFAULT, exceptions.OctaviaException('boom'),
|
||||||
|
mock.DEFAULT, exceptions.OctaviaException('boom'),
|
||||||
|
exceptions.OctaviaException('boom'),
|
||||||
|
exceptions.OctaviaException('boom'),
|
||||||
|
exceptions.OctaviaException('boom')]
|
||||||
|
|
||||||
delete_compute = compute_tasks.ComputeDelete()
|
delete_compute = compute_tasks.ComputeDelete()
|
||||||
|
|
||||||
|
# Limit the retry attempts for the test run to save time
|
||||||
|
delete_compute.execute.retry.stop = tenacity.stop_after_attempt(2)
|
||||||
|
|
||||||
delete_compute.execute(_amphora_mock)
|
delete_compute.execute(_amphora_mock)
|
||||||
|
|
||||||
mock_driver.delete.assert_called_once_with(COMPUTE_ID)
|
mock_driver.delete.assert_called_once_with(COMPUTE_ID)
|
||||||
|
|
||||||
|
# Test retry after a compute exception
|
||||||
|
mock_driver.reset_mock()
|
||||||
|
delete_compute.execute(_amphora_mock)
|
||||||
|
mock_driver.delete.assert_has_calls([mock.call(COMPUTE_ID),
|
||||||
|
mock.call(COMPUTE_ID)])
|
||||||
|
|
||||||
|
# Test passive failure
|
||||||
|
mock_driver.reset_mock()
|
||||||
|
delete_compute.execute(_amphora_mock, passive_failure=True)
|
||||||
|
mock_driver.delete.assert_has_calls([mock.call(COMPUTE_ID),
|
||||||
|
mock.call(COMPUTE_ID)])
|
||||||
|
|
||||||
|
# Test non-passive failure
|
||||||
|
mock_driver.reset_mock()
|
||||||
|
self.assertRaises(exceptions.OctaviaException, delete_compute.execute,
|
||||||
|
_amphora_mock, passive_failure=False)
|
||||||
|
|
||||||
@mock.patch('stevedore.driver.DriverManager.driver')
|
@mock.patch('stevedore.driver.DriverManager.driver')
|
||||||
def test_nova_server_group_create(self, mock_driver):
|
def test_nova_server_group_create(self, mock_driver):
|
||||||
nova_sever_group_obj = compute_tasks.NovaServerGroupCreate()
|
nova_sever_group_obj = compute_tasks.NovaServerGroupCreate()
|
||||||
@ -560,3 +597,34 @@ class TestComputeTasks(base.TestCase):
|
|||||||
sg_id = None
|
sg_id = None
|
||||||
nova_sever_group_obj.execute(sg_id)
|
nova_sever_group_obj.execute(sg_id)
|
||||||
self.assertFalse(mock_driver.delete_server_group.called, sg_id)
|
self.assertFalse(mock_driver.delete_server_group.called, sg_id)
|
||||||
|
|
||||||
|
@mock.patch('stevedore.driver.DriverManager.driver')
|
||||||
|
def test_attach_port(self, mock_driver):
|
||||||
|
COMPUTE_ID = uuidutils.generate_uuid()
|
||||||
|
PORT_ID = uuidutils.generate_uuid()
|
||||||
|
amphora_mock = mock.MagicMock()
|
||||||
|
port_mock = mock.MagicMock()
|
||||||
|
amphora_mock.compute_id = COMPUTE_ID
|
||||||
|
port_mock.id = PORT_ID
|
||||||
|
|
||||||
|
attach_port_obj = compute_tasks.AttachPort()
|
||||||
|
|
||||||
|
# Test execute
|
||||||
|
attach_port_obj.execute(amphora_mock, port_mock)
|
||||||
|
|
||||||
|
mock_driver.attach_network_or_port.assert_called_once_with(
|
||||||
|
COMPUTE_ID, port_id=PORT_ID)
|
||||||
|
|
||||||
|
# Test revert
|
||||||
|
mock_driver.reset_mock()
|
||||||
|
|
||||||
|
attach_port_obj.revert(amphora_mock, port_mock)
|
||||||
|
|
||||||
|
mock_driver.detach_port.assert_called_once_with(COMPUTE_ID, PORT_ID)
|
||||||
|
|
||||||
|
# Test rever exception
|
||||||
|
mock_driver.reset_mock()
|
||||||
|
mock_driver.detach_port.side_effect = [Exception('boom')]
|
||||||
|
|
||||||
|
# should not raise
|
||||||
|
attach_port_obj.revert(amphora_mock, port_mock)
|
||||||
|
@ -48,7 +48,7 @@ VIP_IP = '192.0.5.2'
|
|||||||
VRRP_IP = '192.0.5.3'
|
VRRP_IP = '192.0.5.3'
|
||||||
HA_IP = '192.0.5.4'
|
HA_IP = '192.0.5.4'
|
||||||
AMP_ROLE = 'FAKE_ROLE'
|
AMP_ROLE = 'FAKE_ROLE'
|
||||||
VRRP_ID = random.randrange(255)
|
VRRP_ID = 1
|
||||||
VRRP_PRIORITY = random.randrange(100)
|
VRRP_PRIORITY = random.randrange(100)
|
||||||
CACHED_ZONE = 'zone1'
|
CACHED_ZONE = 'zone1'
|
||||||
IMAGE_ID = uuidutils.generate_uuid()
|
IMAGE_ID = uuidutils.generate_uuid()
|
||||||
@ -489,9 +489,17 @@ class TestDatabaseTasks(base.TestCase):
|
|||||||
mock_listener_repo_update,
|
mock_listener_repo_update,
|
||||||
mock_amphora_repo_update,
|
mock_amphora_repo_update,
|
||||||
mock_amphora_repo_delete):
|
mock_amphora_repo_delete):
|
||||||
|
mock_base_port = mock.MagicMock()
|
||||||
|
mock_base_port.id = VRRP_PORT_ID
|
||||||
|
mock_fixed_ip = mock.MagicMock()
|
||||||
|
mock_fixed_ip.ip_address = VRRP_IP
|
||||||
|
mock_base_port.fixed_ips = [mock_fixed_ip]
|
||||||
|
mock_vip = mock.MagicMock()
|
||||||
|
mock_vip.ip_address = HA_IP
|
||||||
|
mock_vip.port_id = HA_PORT_ID
|
||||||
|
|
||||||
update_amp_fo_details = database_tasks.UpdateAmpFailoverDetails()
|
update_amp_fo_details = database_tasks.UpdateAmpFailoverDetails()
|
||||||
update_amp_fo_details.execute(_amphora_mock, _amphora_mock)
|
update_amp_fo_details.execute(_amphora_mock, mock_vip, mock_base_port)
|
||||||
|
|
||||||
mock_amphora_repo_update.assert_called_once_with(
|
mock_amphora_repo_update.assert_called_once_with(
|
||||||
'TEST',
|
'TEST',
|
||||||
@ -1770,9 +1778,11 @@ class TestDatabaseTasks(base.TestCase):
|
|||||||
repo.AmphoraRepository.update.assert_called_once_with(
|
repo.AmphoraRepository.update.assert_called_once_with(
|
||||||
'TEST', AMP_ID, role=None, vrrp_priority=None)
|
'TEST', AMP_ID, role=None, vrrp_priority=None)
|
||||||
|
|
||||||
|
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
|
||||||
@mock.patch('octavia.db.repositories.AmphoraRepository.get')
|
@mock.patch('octavia.db.repositories.AmphoraRepository.get')
|
||||||
def test_get_amphorae_from_loadbalancer(self,
|
def test_get_amphorae_from_loadbalancer(self,
|
||||||
mock_amphora_get,
|
mock_amphora_get,
|
||||||
|
mock_lb_get,
|
||||||
mock_generate_uuid,
|
mock_generate_uuid,
|
||||||
mock_LOG,
|
mock_LOG,
|
||||||
mock_get_session,
|
mock_get_session,
|
||||||
@ -1786,6 +1796,7 @@ class TestDatabaseTasks(base.TestCase):
|
|||||||
amp2.id = uuidutils.generate_uuid()
|
amp2.id = uuidutils.generate_uuid()
|
||||||
lb = mock.MagicMock()
|
lb = mock.MagicMock()
|
||||||
lb.amphorae = [amp1, amp2]
|
lb.amphorae = [amp1, amp2]
|
||||||
|
mock_lb_get.return_value = lb
|
||||||
|
|
||||||
mock_amphora_get.side_effect = [_amphora_mock, None]
|
mock_amphora_get.side_effect = [_amphora_mock, None]
|
||||||
|
|
||||||
@ -1810,6 +1821,23 @@ class TestDatabaseTasks(base.TestCase):
|
|||||||
mock_listener_get.assert_called_once_with('TEST', id=_listener_mock.id)
|
mock_listener_get.assert_called_once_with('TEST', id=_listener_mock.id)
|
||||||
self.assertEqual([_listener_mock], result)
|
self.assertEqual([_listener_mock], result)
|
||||||
|
|
||||||
|
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
|
||||||
|
def test_get_loadbalancer(self, mock_lb_get, mock_generate_uuid, mock_LOG,
|
||||||
|
mock_get_session, mock_loadbalancer_repo_update,
|
||||||
|
mock_listener_repo_update,
|
||||||
|
mock_amphora_repo_update,
|
||||||
|
mock_amphora_repo_delete):
|
||||||
|
FAKE_LB = 'fake LB'
|
||||||
|
LB_ID = uuidutils.generate_uuid()
|
||||||
|
get_loadbalancer_obj = database_tasks.GetLoadBalancer()
|
||||||
|
|
||||||
|
mock_lb_get.return_value = FAKE_LB
|
||||||
|
|
||||||
|
result = get_loadbalancer_obj.execute(LB_ID)
|
||||||
|
|
||||||
|
self.assertEqual(FAKE_LB, result)
|
||||||
|
mock_lb_get.assert_called_once_with('TEST', id=LB_ID)
|
||||||
|
|
||||||
def test_get_vip_from_loadbalancer(self,
|
def test_get_vip_from_loadbalancer(self,
|
||||||
mock_generate_uuid,
|
mock_generate_uuid,
|
||||||
mock_LOG,
|
mock_LOG,
|
||||||
@ -1837,7 +1865,7 @@ class TestDatabaseTasks(base.TestCase):
|
|||||||
mock_get_session.side_effect = ['TEST',
|
mock_get_session.side_effect = ['TEST',
|
||||||
odb_exceptions.DBDuplicateEntry]
|
odb_exceptions.DBDuplicateEntry]
|
||||||
create_vrrp_group = database_tasks.CreateVRRPGroupForLB()
|
create_vrrp_group = database_tasks.CreateVRRPGroupForLB()
|
||||||
create_vrrp_group.execute(_loadbalancer_mock)
|
create_vrrp_group.execute(_loadbalancer_mock.id)
|
||||||
mock_vrrp_group_create.assert_called_once_with(
|
mock_vrrp_group_create.assert_called_once_with(
|
||||||
'TEST', load_balancer_id=LB_ID,
|
'TEST', load_balancer_id=LB_ID,
|
||||||
vrrp_group_name=LB_ID.replace('-', ''),
|
vrrp_group_name=LB_ID.replace('-', ''),
|
||||||
|
@ -11,16 +11,17 @@
|
|||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
#
|
|
||||||
from unittest import mock
|
from unittest import mock
|
||||||
|
|
||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_config import fixture as oslo_fixture
|
from oslo_config import fixture as oslo_fixture
|
||||||
from oslo_utils import uuidutils
|
from oslo_utils import uuidutils
|
||||||
from taskflow.types import failure
|
from taskflow.types import failure
|
||||||
|
import tenacity
|
||||||
|
|
||||||
from octavia.common import constants
|
from octavia.common import constants
|
||||||
from octavia.common import data_models as o_data_models
|
from octavia.common import data_models as o_data_models
|
||||||
|
from octavia.common import exceptions
|
||||||
from octavia.controller.worker.v1.tasks import network_tasks
|
from octavia.controller.worker.v1.tasks import network_tasks
|
||||||
from octavia.network import base as net_base
|
from octavia.network import base as net_base
|
||||||
from octavia.network import data_models
|
from octavia.network import data_models
|
||||||
@ -79,11 +80,75 @@ class TestNetworkTasks(base.TestCase):
|
|||||||
self.amphora_mock.id = AMPHORA_ID
|
self.amphora_mock.id = AMPHORA_ID
|
||||||
self.amphora_mock.compute_id = COMPUTE_ID
|
self.amphora_mock.compute_id = COMPUTE_ID
|
||||||
self.amphora_mock.status = constants.AMPHORA_ALLOCATED
|
self.amphora_mock.status = constants.AMPHORA_ALLOCATED
|
||||||
conf = oslo_fixture.Config(cfg.CONF)
|
self.boot_net_id = NETWORK_ID
|
||||||
conf.config(group="controller_worker", amp_boot_network_list=['netid'])
|
conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
|
||||||
|
conf.config(group="controller_worker",
|
||||||
|
amp_boot_network_list=[self.boot_net_id])
|
||||||
|
conf.config(group="networking", max_retries=1)
|
||||||
super(TestNetworkTasks, self).setUp()
|
super(TestNetworkTasks, self).setUp()
|
||||||
|
|
||||||
|
def test_calculate_amphora_delta(self, mock_get_net_driver):
|
||||||
|
DELETE_NETWORK_ID = uuidutils.generate_uuid()
|
||||||
|
MEMBER_NETWORK_ID = uuidutils.generate_uuid()
|
||||||
|
MEMBER_SUBNET_ID = uuidutils.generate_uuid()
|
||||||
|
VRRP_PORT_ID = uuidutils.generate_uuid()
|
||||||
|
mock_driver = mock.MagicMock()
|
||||||
|
mock_get_net_driver.return_value = mock_driver
|
||||||
|
member_mock = mock.MagicMock()
|
||||||
|
member_mock.subnet_id = MEMBER_SUBNET_ID
|
||||||
|
pool_mock = mock.MagicMock()
|
||||||
|
pool_mock.members = [member_mock]
|
||||||
|
lb_mock = mock.MagicMock()
|
||||||
|
lb_mock.pools = [pool_mock]
|
||||||
|
amphora_mock = mock.MagicMock()
|
||||||
|
amphora_mock.id = AMPHORA_ID
|
||||||
|
amphora_mock.compute_id = COMPUTE_ID
|
||||||
|
amphora_mock.vrrp_port_id = VRRP_PORT_ID
|
||||||
|
vrrp_port_mock = mock.MagicMock()
|
||||||
|
vrrp_port_mock.network_id = self.boot_net_id
|
||||||
|
mock_subnet = mock.MagicMock()
|
||||||
|
mock_subnet.network_id = MEMBER_NETWORK_ID
|
||||||
|
nic1_delete_mock = mock.MagicMock()
|
||||||
|
nic1_delete_mock.network_id = DELETE_NETWORK_ID
|
||||||
|
nic2_keep_mock = mock.MagicMock()
|
||||||
|
nic2_keep_mock.network_id = self.boot_net_id
|
||||||
|
|
||||||
|
mock_driver.get_port.return_value = vrrp_port_mock
|
||||||
|
mock_driver.get_subnet.return_value = mock_subnet
|
||||||
|
mock_driver.get_plugged_networks.return_value = [nic1_delete_mock,
|
||||||
|
nic2_keep_mock]
|
||||||
|
|
||||||
|
calc_amp_delta = network_tasks.CalculateAmphoraDelta()
|
||||||
|
|
||||||
|
# Test vrrp_port_id is None
|
||||||
|
result = calc_amp_delta.execute(lb_mock, amphora_mock, {})
|
||||||
|
|
||||||
|
self.assertEqual(AMPHORA_ID, result.amphora_id)
|
||||||
|
self.assertEqual(COMPUTE_ID, result.compute_id)
|
||||||
|
self.assertEqual(1, len(result.add_nics))
|
||||||
|
self.assertEqual(MEMBER_NETWORK_ID, result.add_nics[0].network_id)
|
||||||
|
self.assertEqual(1, len(result.delete_nics))
|
||||||
|
self.assertEqual(DELETE_NETWORK_ID, result.delete_nics[0].network_id)
|
||||||
|
mock_driver.get_port.assert_called_once_with(VRRP_PORT_ID)
|
||||||
|
mock_driver.get_subnet.assert_called_once_with(MEMBER_SUBNET_ID)
|
||||||
|
mock_driver.get_plugged_networks.assert_called_once_with(COMPUTE_ID)
|
||||||
|
|
||||||
|
# Test with vrrp_port_id
|
||||||
|
mock_driver.reset_mock()
|
||||||
|
|
||||||
|
result = calc_amp_delta.execute(lb_mock, amphora_mock, {},
|
||||||
|
vrrp_port=vrrp_port_mock)
|
||||||
|
|
||||||
|
self.assertEqual(AMPHORA_ID, result.amphora_id)
|
||||||
|
self.assertEqual(COMPUTE_ID, result.compute_id)
|
||||||
|
self.assertEqual(1, len(result.add_nics))
|
||||||
|
self.assertEqual(MEMBER_NETWORK_ID, result.add_nics[0].network_id)
|
||||||
|
self.assertEqual(1, len(result.delete_nics))
|
||||||
|
self.assertEqual(DELETE_NETWORK_ID, result.delete_nics[0].network_id)
|
||||||
|
mock_driver.get_port.assert_not_called()
|
||||||
|
mock_driver.get_subnet.assert_called_once_with(MEMBER_SUBNET_ID)
|
||||||
|
mock_driver.get_plugged_networks.assert_called_once_with(COMPUTE_ID)
|
||||||
|
|
||||||
def test_calculate_delta(self, mock_get_net_driver):
|
def test_calculate_delta(self, mock_get_net_driver):
|
||||||
mock_driver = mock.MagicMock()
|
mock_driver = mock.MagicMock()
|
||||||
mock_get_net_driver.return_value = mock_driver
|
mock_get_net_driver.return_value = mock_driver
|
||||||
@ -678,12 +743,39 @@ class TestNetworkTasks(base.TestCase):
|
|||||||
net_task.execute(lb)
|
net_task.execute(lb)
|
||||||
mock_driver.update_vip.assert_called_once_with(lb, for_delete=True)
|
mock_driver.update_vip.assert_called_once_with(lb, for_delete=True)
|
||||||
|
|
||||||
def test_get_amphorae_network_configs(self, mock_get_net_driver):
|
@mock.patch('octavia.db.api.get_session', return_value='TEST')
|
||||||
|
@mock.patch('octavia.db.repositories.AmphoraRepository.get')
|
||||||
|
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
|
||||||
|
def test_get_amphora_network_configs_by_id(
|
||||||
|
self, mock_lb_get, mock_amp_get,
|
||||||
|
mock_get_session, mock_get_net_driver):
|
||||||
|
LB_ID = uuidutils.generate_uuid()
|
||||||
|
AMP_ID = uuidutils.generate_uuid()
|
||||||
|
mock_driver = mock.MagicMock()
|
||||||
|
mock_get_net_driver.return_value = mock_driver
|
||||||
|
mock_amp_get.return_value = 'mock amphora'
|
||||||
|
mock_lb_get.return_value = 'mock load balancer'
|
||||||
|
|
||||||
|
net_task = network_tasks.GetAmphoraNetworkConfigsByID()
|
||||||
|
|
||||||
|
net_task.execute(LB_ID, AMP_ID)
|
||||||
|
|
||||||
|
mock_driver.get_network_configs.assert_called_once_with(
|
||||||
|
'mock load balancer', amphora='mock amphora')
|
||||||
|
mock_amp_get.assert_called_once_with('TEST', id=AMP_ID)
|
||||||
|
mock_lb_get.assert_called_once_with('TEST', id=LB_ID)
|
||||||
|
|
||||||
|
@mock.patch('octavia.db.api.get_session', return_value='TEST')
|
||||||
|
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
|
||||||
|
def test_get_amphorae_network_configs(self, mock_lb_get, mock_get_session,
|
||||||
|
mock_get_net_driver):
|
||||||
mock_driver = mock.MagicMock()
|
mock_driver = mock.MagicMock()
|
||||||
mock_get_net_driver.return_value = mock_driver
|
mock_get_net_driver.return_value = mock_driver
|
||||||
lb = o_data_models.LoadBalancer()
|
lb = o_data_models.LoadBalancer()
|
||||||
|
mock_lb_get.return_value = lb
|
||||||
net_task = network_tasks.GetAmphoraeNetworkConfigs()
|
net_task = network_tasks.GetAmphoraeNetworkConfigs()
|
||||||
net_task.execute(lb)
|
net_task.execute(lb.id)
|
||||||
|
mock_lb_get.assert_called_once_with('TEST', id=lb.id)
|
||||||
mock_driver.get_network_configs.assert_called_once_with(lb)
|
mock_driver.get_network_configs.assert_called_once_with(lb)
|
||||||
|
|
||||||
def test_failover_preparation_for_amphora(self, mock_get_net_driver):
|
def test_failover_preparation_for_amphora(self, mock_get_net_driver):
|
||||||
@ -755,41 +847,20 @@ class TestNetworkTasks(base.TestCase):
|
|||||||
mock_driver.plug_port.assert_any_call(amphora, port1)
|
mock_driver.plug_port.assert_any_call(amphora, port1)
|
||||||
mock_driver.plug_port.assert_any_call(amphora, port2)
|
mock_driver.plug_port.assert_any_call(amphora, port2)
|
||||||
|
|
||||||
def test_plug_vip_port(self, mock_get_net_driver):
|
@mock.patch('octavia.db.api.get_session', return_value='TEST')
|
||||||
mock_driver = mock.MagicMock()
|
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
|
||||||
mock_get_net_driver.return_value = mock_driver
|
def test_update_vip_sg(self, mock_lb_get, mock_get_session,
|
||||||
vrrp_port = mock.MagicMock()
|
mock_get_net_driver):
|
||||||
|
|
||||||
amphorae_network_config = mock.MagicMock()
|
|
||||||
amphorae_network_config.get().vrrp_port = vrrp_port
|
|
||||||
|
|
||||||
plugvipport = network_tasks.PlugVIPPort()
|
|
||||||
plugvipport.execute(self.amphora_mock, amphorae_network_config)
|
|
||||||
mock_driver.plug_port.assert_any_call(self.amphora_mock, vrrp_port)
|
|
||||||
|
|
||||||
# test revert
|
|
||||||
plugvipport.revert(None, self.amphora_mock, amphorae_network_config)
|
|
||||||
mock_driver.unplug_port.assert_any_call(self.amphora_mock, vrrp_port)
|
|
||||||
|
|
||||||
def test_wait_for_port_detach(self, mock_get_net_driver):
|
|
||||||
mock_driver = mock.MagicMock()
|
|
||||||
mock_get_net_driver.return_value = mock_driver
|
|
||||||
|
|
||||||
amphora = o_data_models.Amphora(id=AMPHORA_ID,
|
|
||||||
lb_network_ip=IP_ADDRESS)
|
|
||||||
|
|
||||||
waitforportdetach = network_tasks.WaitForPortDetach()
|
|
||||||
waitforportdetach.execute(amphora)
|
|
||||||
|
|
||||||
mock_driver.wait_for_port_detach.assert_called_once_with(amphora)
|
|
||||||
|
|
||||||
def test_update_vip_sg(self, mock_get_net_driver):
|
|
||||||
mock_driver = mock.MagicMock()
|
mock_driver = mock.MagicMock()
|
||||||
mock_get_net_driver.return_value = mock_driver
|
mock_get_net_driver.return_value = mock_driver
|
||||||
|
mock_lb_get.return_value = self.load_balancer_mock
|
||||||
net = network_tasks.UpdateVIPSecurityGroup()
|
net = network_tasks.UpdateVIPSecurityGroup()
|
||||||
|
|
||||||
net.execute(LB)
|
net.execute(self.load_balancer_mock.id)
|
||||||
mock_driver.update_vip_sg.assert_called_once_with(LB, LB.vip)
|
mock_lb_get.assert_called_once_with('TEST',
|
||||||
|
id=self.load_balancer_mock.id)
|
||||||
|
mock_driver.update_vip_sg.assert_called_once_with(
|
||||||
|
self.load_balancer_mock, self.load_balancer_mock.vip)
|
||||||
|
|
||||||
def test_get_subnet_from_vip(self, mock_get_net_driver):
|
def test_get_subnet_from_vip(self, mock_get_net_driver):
|
||||||
mock_driver = mock.MagicMock()
|
mock_driver = mock.MagicMock()
|
||||||
@ -816,3 +887,274 @@ class TestNetworkTasks(base.TestCase):
|
|||||||
net.revert(AMPS_DATA[0], LB, self.amphora_mock, mockSubnet)
|
net.revert(AMPS_DATA[0], LB, self.amphora_mock, mockSubnet)
|
||||||
mock_driver.unplug_aap_port.assert_called_once_with(
|
mock_driver.unplug_aap_port.assert_called_once_with(
|
||||||
LB.vip, self.amphora_mock, mockSubnet)
|
LB.vip, self.amphora_mock, mockSubnet)
|
||||||
|
|
||||||
|
@mock.patch('octavia.controller.worker.v1.tasks.network_tasks.DeletePort.'
|
||||||
|
'update_progress')
|
||||||
|
def test_delete_port(self, mock_update_progress, mock_get_net_driver):
|
||||||
|
PORT_ID = uuidutils.generate_uuid()
|
||||||
|
mock_driver = mock.MagicMock()
|
||||||
|
mock_get_net_driver.return_value = mock_driver
|
||||||
|
mock_driver.delete_port.side_effect = [
|
||||||
|
mock.DEFAULT, exceptions.OctaviaException('boom'), mock.DEFAULT,
|
||||||
|
exceptions.OctaviaException('boom'),
|
||||||
|
exceptions.OctaviaException('boom'),
|
||||||
|
exceptions.OctaviaException('boom'),
|
||||||
|
exceptions.OctaviaException('boom'),
|
||||||
|
exceptions.OctaviaException('boom'),
|
||||||
|
exceptions.OctaviaException('boom')]
|
||||||
|
mock_driver.admin_down_port.side_effect = [
|
||||||
|
mock.DEFAULT, exceptions.OctaviaException('boom')]
|
||||||
|
|
||||||
|
net_task = network_tasks.DeletePort()
|
||||||
|
|
||||||
|
# Limit the retry attempts for the test run to save time
|
||||||
|
net_task.execute.retry.stop = tenacity.stop_after_attempt(2)
|
||||||
|
|
||||||
|
# Test port ID is None (no-op)
|
||||||
|
net_task.execute(None)
|
||||||
|
|
||||||
|
mock_update_progress.assert_not_called()
|
||||||
|
mock_driver.delete_port.assert_not_called()
|
||||||
|
|
||||||
|
# Test successful delete
|
||||||
|
mock_update_progress.reset_mock()
|
||||||
|
mock_driver.reset_mock()
|
||||||
|
|
||||||
|
net_task.execute(PORT_ID)
|
||||||
|
|
||||||
|
mock_update_progress.assert_called_once_with(0.5)
|
||||||
|
mock_driver.delete_port.assert_called_once_with(PORT_ID)
|
||||||
|
|
||||||
|
# Test exception and successful retry
|
||||||
|
mock_update_progress.reset_mock()
|
||||||
|
mock_driver.reset_mock()
|
||||||
|
|
||||||
|
net_task.execute(PORT_ID)
|
||||||
|
|
||||||
|
mock_update_progress.assert_has_calls([mock.call(0.5), mock.call(1.0)])
|
||||||
|
mock_driver.delete_port.assert_has_calls([mock.call(PORT_ID),
|
||||||
|
mock.call(PORT_ID)])
|
||||||
|
|
||||||
|
# Test passive failure
|
||||||
|
mock_update_progress.reset_mock()
|
||||||
|
mock_driver.reset_mock()
|
||||||
|
|
||||||
|
net_task.execute(PORT_ID, passive_failure=True)
|
||||||
|
|
||||||
|
mock_update_progress.assert_has_calls([mock.call(0.5), mock.call(1.0)])
|
||||||
|
mock_driver.delete_port.assert_has_calls([mock.call(PORT_ID),
|
||||||
|
mock.call(PORT_ID)])
|
||||||
|
mock_driver.admin_down_port.assert_called_once_with(PORT_ID)
|
||||||
|
|
||||||
|
# Test passive failure admin down failure
|
||||||
|
mock_update_progress.reset_mock()
|
||||||
|
mock_driver.reset_mock()
|
||||||
|
mock_driver.admin_down_port.reset_mock()
|
||||||
|
|
||||||
|
net_task.execute(PORT_ID, passive_failure=True)
|
||||||
|
|
||||||
|
mock_update_progress.assert_has_calls([mock.call(0.5), mock.call(1.0)])
|
||||||
|
mock_driver.delete_port.assert_has_calls([mock.call(PORT_ID),
|
||||||
|
mock.call(PORT_ID)])
|
||||||
|
mock_driver.admin_down_port.assert_called_once_with(PORT_ID)
|
||||||
|
|
||||||
|
# Test non-passive failure
|
||||||
|
mock_update_progress.reset_mock()
|
||||||
|
mock_driver.reset_mock()
|
||||||
|
mock_driver.admin_down_port.reset_mock()
|
||||||
|
|
||||||
|
mock_driver.admin_down_port.side_effect = [
|
||||||
|
exceptions.OctaviaException('boom')]
|
||||||
|
|
||||||
|
self.assertRaises(exceptions.OctaviaException, net_task.execute,
|
||||||
|
PORT_ID)
|
||||||
|
|
||||||
|
mock_update_progress.assert_has_calls([mock.call(0.5), mock.call(1.0)])
|
||||||
|
mock_driver.delete_port.assert_has_calls([mock.call(PORT_ID),
|
||||||
|
mock.call(PORT_ID)])
|
||||||
|
mock_driver.admin_down_port.assert_not_called()
|
||||||
|
|
||||||
|
def test_create_vip_base_port(self, mock_get_net_driver):
|
||||||
|
AMP_ID = uuidutils.generate_uuid()
|
||||||
|
PORT_ID = uuidutils.generate_uuid()
|
||||||
|
VIP_NETWORK_ID = uuidutils.generate_uuid()
|
||||||
|
VIP_QOS_ID = uuidutils.generate_uuid()
|
||||||
|
VIP_SG_ID = uuidutils.generate_uuid()
|
||||||
|
VIP_SUBNET_ID = uuidutils.generate_uuid()
|
||||||
|
VIP_IP_ADDRESS = '203.0.113.81'
|
||||||
|
mock_driver = mock.MagicMock()
|
||||||
|
mock_get_net_driver.return_value = mock_driver
|
||||||
|
vip_mock = mock.MagicMock()
|
||||||
|
vip_mock.ip_address = VIP_IP_ADDRESS
|
||||||
|
vip_mock.network_id = VIP_NETWORK_ID
|
||||||
|
vip_mock.qos_policy_id = VIP_QOS_ID
|
||||||
|
vip_mock.subnet_id = VIP_SUBNET_ID
|
||||||
|
port_mock = mock.MagicMock()
|
||||||
|
port_mock.id = PORT_ID
|
||||||
|
|
||||||
|
mock_driver.create_port.side_effect = [
|
||||||
|
port_mock, exceptions.OctaviaException('boom'),
|
||||||
|
exceptions.OctaviaException('boom'),
|
||||||
|
exceptions.OctaviaException('boom')]
|
||||||
|
mock_driver.delete_port.side_effect = [mock.DEFAULT, Exception('boom')]
|
||||||
|
|
||||||
|
net_task = network_tasks.CreateVIPBasePort()
|
||||||
|
|
||||||
|
# Limit the retry attempts for the test run to save time
|
||||||
|
net_task.execute.retry.stop = tenacity.stop_after_attempt(2)
|
||||||
|
|
||||||
|
# Test execute
|
||||||
|
result = net_task.execute(vip_mock, VIP_SG_ID, AMP_ID)
|
||||||
|
|
||||||
|
self.assertEqual(port_mock, result)
|
||||||
|
mock_driver.create_port.assert_called_once_with(
|
||||||
|
VIP_NETWORK_ID, name=constants.AMP_BASE_PORT_PREFIX + AMP_ID,
|
||||||
|
fixed_ips=[{constants.SUBNET_ID: VIP_SUBNET_ID}],
|
||||||
|
secondary_ips=[VIP_IP_ADDRESS], security_group_ids=[VIP_SG_ID],
|
||||||
|
qos_policy_id=VIP_QOS_ID)
|
||||||
|
|
||||||
|
# Test execute exception
|
||||||
|
mock_driver.reset_mock()
|
||||||
|
|
||||||
|
self.assertRaises(exceptions.OctaviaException, net_task.execute,
|
||||||
|
vip_mock, None, AMP_ID)
|
||||||
|
|
||||||
|
# Test revert when this task failed
|
||||||
|
mock_driver.reset_mock()
|
||||||
|
|
||||||
|
net_task.revert(failure.Failure.from_exception(Exception('boom')),
|
||||||
|
vip_mock, VIP_SG_ID, AMP_ID)
|
||||||
|
|
||||||
|
mock_driver.delete_port.assert_not_called()
|
||||||
|
|
||||||
|
# Test revert
|
||||||
|
mock_driver.reset_mock()
|
||||||
|
|
||||||
|
net_task.revert([port_mock], vip_mock, VIP_SG_ID, AMP_ID)
|
||||||
|
|
||||||
|
mock_driver.delete_port.assert_called_once_with(PORT_ID)
|
||||||
|
|
||||||
|
# Test revert exception
|
||||||
|
mock_driver.reset_mock()
|
||||||
|
|
||||||
|
net_task.revert([port_mock], vip_mock, VIP_SG_ID, AMP_ID)
|
||||||
|
|
||||||
|
mock_driver.delete_port.assert_called_once_with(PORT_ID)
|
||||||
|
|
||||||
|
@mock.patch('time.sleep')
|
||||||
|
def test_admin_down_port(self, mock_sleep, mock_get_net_driver):
|
||||||
|
PORT_ID = uuidutils.generate_uuid()
|
||||||
|
mock_driver = mock.MagicMock()
|
||||||
|
mock_get_net_driver.return_value = mock_driver
|
||||||
|
port_down_mock = mock.MagicMock()
|
||||||
|
port_down_mock.status = constants.DOWN
|
||||||
|
port_up_mock = mock.MagicMock()
|
||||||
|
port_up_mock.status = constants.UP
|
||||||
|
mock_driver.set_port_admin_state_up.side_effect = [
|
||||||
|
mock.DEFAULT, net_base.PortNotFound, mock.DEFAULT, mock.DEFAULT,
|
||||||
|
Exception('boom')]
|
||||||
|
mock_driver.get_port.side_effect = [port_down_mock, port_up_mock]
|
||||||
|
|
||||||
|
net_task = network_tasks.AdminDownPort()
|
||||||
|
|
||||||
|
# Test execute
|
||||||
|
net_task.execute(PORT_ID)
|
||||||
|
|
||||||
|
mock_driver.set_port_admin_state_up.assert_called_once_with(PORT_ID,
|
||||||
|
False)
|
||||||
|
mock_driver.get_port.assert_called_once_with(PORT_ID)
|
||||||
|
|
||||||
|
# Test passive fail on port not found
|
||||||
|
mock_driver.reset_mock()
|
||||||
|
|
||||||
|
net_task.execute(PORT_ID)
|
||||||
|
|
||||||
|
mock_driver.set_port_admin_state_up.assert_called_once_with(PORT_ID,
|
||||||
|
False)
|
||||||
|
mock_driver.get_port.assert_not_called()
|
||||||
|
|
||||||
|
# Test passive fail on port stays up
|
||||||
|
mock_driver.reset_mock()
|
||||||
|
|
||||||
|
net_task.execute(PORT_ID)
|
||||||
|
|
||||||
|
mock_driver.set_port_admin_state_up.assert_called_once_with(PORT_ID,
|
||||||
|
False)
|
||||||
|
mock_driver.get_port.assert_called_once_with(PORT_ID)
|
||||||
|
|
||||||
|
# Test revert when this task failed
|
||||||
|
mock_driver.reset_mock()
|
||||||
|
|
||||||
|
net_task.revert(failure.Failure.from_exception(Exception('boom')),
|
||||||
|
PORT_ID)
|
||||||
|
|
||||||
|
mock_driver.set_port_admin_state_up.assert_not_called()
|
||||||
|
|
||||||
|
# Test revert
|
||||||
|
mock_driver.reset_mock()
|
||||||
|
|
||||||
|
net_task.revert(None, PORT_ID)
|
||||||
|
|
||||||
|
mock_driver.set_port_admin_state_up.assert_called_once_with(PORT_ID,
|
||||||
|
True)
|
||||||
|
|
||||||
|
# Test revert exception passive failure
|
||||||
|
mock_driver.reset_mock()
|
||||||
|
|
||||||
|
net_task.revert(None, PORT_ID)
|
||||||
|
|
||||||
|
mock_driver.set_port_admin_state_up.assert_called_once_with(PORT_ID,
|
||||||
|
True)
|
||||||
|
|
||||||
|
@mock.patch('octavia.common.utils.get_vip_security_group_name')
|
||||||
|
def test_get_vip_security_group_id(self, mock_get_sg_name,
|
||||||
|
mock_get_net_driver):
|
||||||
|
LB_ID = uuidutils.generate_uuid()
|
||||||
|
SG_ID = uuidutils.generate_uuid()
|
||||||
|
SG_NAME = 'fake_SG_name'
|
||||||
|
mock_driver = mock.MagicMock()
|
||||||
|
mock_get_net_driver.return_value = mock_driver
|
||||||
|
mock_get_sg_name.return_value = SG_NAME
|
||||||
|
sg_mock = mock.MagicMock()
|
||||||
|
sg_mock.id = SG_ID
|
||||||
|
mock_driver.get_security_group.side_effect = [
|
||||||
|
sg_mock, None, net_base.SecurityGroupNotFound,
|
||||||
|
net_base.SecurityGroupNotFound]
|
||||||
|
|
||||||
|
net_task = network_tasks.GetVIPSecurityGroupID()
|
||||||
|
|
||||||
|
# Test execute
|
||||||
|
result = net_task.execute(LB_ID)
|
||||||
|
|
||||||
|
mock_driver.get_security_group.assert_called_once_with(SG_NAME)
|
||||||
|
mock_get_sg_name.assert_called_once_with(LB_ID)
|
||||||
|
|
||||||
|
# Test execute with empty get subnet response
|
||||||
|
mock_driver.reset_mock()
|
||||||
|
mock_get_sg_name.reset_mock()
|
||||||
|
|
||||||
|
result = net_task.execute(LB_ID)
|
||||||
|
|
||||||
|
self.assertIsNone(result)
|
||||||
|
mock_get_sg_name.assert_called_once_with(LB_ID)
|
||||||
|
|
||||||
|
# Test execute no security group found, security groups enabled
|
||||||
|
mock_driver.reset_mock()
|
||||||
|
mock_get_sg_name.reset_mock()
|
||||||
|
mock_driver.sec_grp_enabled = True
|
||||||
|
|
||||||
|
self.assertRaises(net_base.SecurityGroupNotFound, net_task.execute,
|
||||||
|
LB_ID)
|
||||||
|
mock_driver.get_security_group.assert_called_once_with(SG_NAME)
|
||||||
|
mock_get_sg_name.assert_called_once_with(LB_ID)
|
||||||
|
|
||||||
|
# Test execute no security group found, security groups disabled
|
||||||
|
mock_driver.reset_mock()
|
||||||
|
mock_get_sg_name.reset_mock()
|
||||||
|
mock_driver.sec_grp_enabled = False
|
||||||
|
|
||||||
|
result = net_task.execute(LB_ID)
|
||||||
|
|
||||||
|
self.assertIsNone(result)
|
||||||
|
mock_driver.get_security_group.assert_called_once_with(SG_NAME)
|
||||||
|
mock_get_sg_name.assert_called_once_with(LB_ID)
|
||||||
|
@ -0,0 +1,47 @@
|
|||||||
|
# Copyright 2020 Red Hat, Inc. All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
from unittest import mock
|
||||||
|
|
||||||
|
from taskflow import retry
|
||||||
|
|
||||||
|
from octavia.controller.worker.v1.tasks import retry_tasks
|
||||||
|
import octavia.tests.unit.base as base
|
||||||
|
|
||||||
|
|
||||||
|
class TestRetryTasks(base.TestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super(TestRetryTasks, self).setUp()
|
||||||
|
|
||||||
|
@mock.patch('time.sleep')
|
||||||
|
def test_sleeping_retry_times_controller(self, mock_sleep):
|
||||||
|
retry_ctrlr = retry_tasks.SleepingRetryTimesController(
|
||||||
|
attempts=2, name='test_retry')
|
||||||
|
|
||||||
|
# Test on_failure that should RETRY
|
||||||
|
history = ['boom']
|
||||||
|
|
||||||
|
result = retry_ctrlr.on_failure(history)
|
||||||
|
|
||||||
|
self.assertEqual(retry.RETRY, result)
|
||||||
|
|
||||||
|
# Test on_failure retries exhausted, should REVERT
|
||||||
|
history = ['boom', 'bang', 'pow']
|
||||||
|
|
||||||
|
result = retry_ctrlr.on_failure(history)
|
||||||
|
|
||||||
|
self.assertEqual(retry.REVERT, result)
|
||||||
|
|
||||||
|
# Test revert - should not raise
|
||||||
|
retry_ctrlr.revert(history)
|
@ -21,6 +21,7 @@ from oslo_utils import uuidutils
|
|||||||
from octavia.common import base_taskflow
|
from octavia.common import base_taskflow
|
||||||
from octavia.common import constants
|
from octavia.common import constants
|
||||||
from octavia.common import data_models
|
from octavia.common import data_models
|
||||||
|
from octavia.common import exceptions
|
||||||
from octavia.controller.worker.v1 import controller_worker
|
from octavia.controller.worker.v1 import controller_worker
|
||||||
import octavia.tests.unit.base as base
|
import octavia.tests.unit.base as base
|
||||||
|
|
||||||
@ -50,6 +51,9 @@ _vip_mock = mock.MagicMock()
|
|||||||
_listener_mock = mock.MagicMock()
|
_listener_mock = mock.MagicMock()
|
||||||
_load_balancer_mock = mock.MagicMock()
|
_load_balancer_mock = mock.MagicMock()
|
||||||
_load_balancer_mock.listeners = [_listener_mock]
|
_load_balancer_mock.listeners = [_listener_mock]
|
||||||
|
_load_balancer_mock.topology = constants.TOPOLOGY_SINGLE
|
||||||
|
_load_balancer_mock.flavor_id = None
|
||||||
|
_load_balancer_mock.availability_zone = None
|
||||||
_member_mock = mock.MagicMock()
|
_member_mock = mock.MagicMock()
|
||||||
_pool_mock = mock.MagicMock()
|
_pool_mock = mock.MagicMock()
|
||||||
_l7policy_mock = mock.MagicMock()
|
_l7policy_mock = mock.MagicMock()
|
||||||
@ -144,6 +148,7 @@ class TestControllerWorker(base.TestCase):
|
|||||||
store={constants.BUILD_TYPE_PRIORITY:
|
store={constants.BUILD_TYPE_PRIORITY:
|
||||||
constants.LB_CREATE_SPARES_POOL_PRIORITY,
|
constants.LB_CREATE_SPARES_POOL_PRIORITY,
|
||||||
constants.FLAVOR: None,
|
constants.FLAVOR: None,
|
||||||
|
constants.SERVER_GROUP_ID: None,
|
||||||
constants.AVAILABILITY_ZONE: None}))
|
constants.AVAILABILITY_ZONE: None}))
|
||||||
|
|
||||||
_flow_mock.run.assert_called_once_with()
|
_flow_mock.run.assert_called_once_with()
|
||||||
@ -185,6 +190,7 @@ class TestControllerWorker(base.TestCase):
|
|||||||
store={constants.BUILD_TYPE_PRIORITY:
|
store={constants.BUILD_TYPE_PRIORITY:
|
||||||
constants.LB_CREATE_SPARES_POOL_PRIORITY,
|
constants.LB_CREATE_SPARES_POOL_PRIORITY,
|
||||||
constants.FLAVOR: None,
|
constants.FLAVOR: None,
|
||||||
|
constants.SERVER_GROUP_ID: None,
|
||||||
constants.AVAILABILITY_ZONE: az_data}))
|
constants.AVAILABILITY_ZONE: az_data}))
|
||||||
|
|
||||||
_flow_mock.run.assert_called_once_with()
|
_flow_mock.run.assert_called_once_with()
|
||||||
@ -193,38 +199,6 @@ class TestControllerWorker(base.TestCase):
|
|||||||
|
|
||||||
self.assertEqual(AMP_ID, amp)
|
self.assertEqual(AMP_ID, amp)
|
||||||
|
|
||||||
@mock.patch('octavia.controller.worker.v1.flows.'
|
|
||||||
'amphora_flows.AmphoraFlows.get_delete_amphora_flow',
|
|
||||||
return_value='TEST')
|
|
||||||
def test_delete_amphora(self,
|
|
||||||
mock_get_delete_amp_flow,
|
|
||||||
mock_api_get_session,
|
|
||||||
mock_dyn_log_listener,
|
|
||||||
mock_taskflow_load,
|
|
||||||
mock_pool_repo_get,
|
|
||||||
mock_member_repo_get,
|
|
||||||
mock_l7rule_repo_get,
|
|
||||||
mock_l7policy_repo_get,
|
|
||||||
mock_listener_repo_get,
|
|
||||||
mock_lb_repo_get,
|
|
||||||
mock_health_mon_repo_get,
|
|
||||||
mock_amp_repo_get):
|
|
||||||
|
|
||||||
_flow_mock.reset_mock()
|
|
||||||
|
|
||||||
cw = controller_worker.ControllerWorker()
|
|
||||||
cw.delete_amphora(AMP_ID)
|
|
||||||
|
|
||||||
mock_amp_repo_get.assert_called_once_with(
|
|
||||||
_db_session,
|
|
||||||
id=AMP_ID)
|
|
||||||
|
|
||||||
(base_taskflow.BaseTaskFlowEngine._taskflow_load.
|
|
||||||
assert_called_once_with('TEST',
|
|
||||||
store={constants.AMPHORA: _amphora_mock}))
|
|
||||||
|
|
||||||
_flow_mock.run.assert_called_once_with()
|
|
||||||
|
|
||||||
@mock.patch('octavia.controller.worker.v1.flows.'
|
@mock.patch('octavia.controller.worker.v1.flows.'
|
||||||
'health_monitor_flows.HealthMonitorFlows.'
|
'health_monitor_flows.HealthMonitorFlows.'
|
||||||
'get_create_health_monitor_flow',
|
'get_create_health_monitor_flow',
|
||||||
@ -465,8 +439,8 @@ class TestControllerWorker(base.TestCase):
|
|||||||
constants.LOADBALANCER_ID: LB_ID,
|
constants.LOADBALANCER_ID: LB_ID,
|
||||||
'update_dict': {'topology': constants.TOPOLOGY_SINGLE},
|
'update_dict': {'topology': constants.TOPOLOGY_SINGLE},
|
||||||
constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_NORMAL_PRIORITY,
|
constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_NORMAL_PRIORITY,
|
||||||
constants.FLAVOR: None,
|
constants.FLAVOR: None, constants.AVAILABILITY_ZONE: None,
|
||||||
constants.AVAILABILITY_ZONE: None,
|
constants.SERVER_GROUP_ID: None
|
||||||
}
|
}
|
||||||
lb_mock = mock.MagicMock()
|
lb_mock = mock.MagicMock()
|
||||||
lb_mock.listeners = []
|
lb_mock.listeners = []
|
||||||
@ -513,7 +487,7 @@ class TestControllerWorker(base.TestCase):
|
|||||||
constants.LOADBALANCER_ID: LB_ID,
|
constants.LOADBALANCER_ID: LB_ID,
|
||||||
'update_dict': {'topology': constants.TOPOLOGY_ACTIVE_STANDBY},
|
'update_dict': {'topology': constants.TOPOLOGY_ACTIVE_STANDBY},
|
||||||
constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_NORMAL_PRIORITY,
|
constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_NORMAL_PRIORITY,
|
||||||
constants.FLAVOR: None,
|
constants.FLAVOR: None, constants.SERVER_GROUP_ID: None,
|
||||||
constants.AVAILABILITY_ZONE: None,
|
constants.AVAILABILITY_ZONE: None,
|
||||||
}
|
}
|
||||||
setattr(mock_lb_repo_get.return_value, 'topology',
|
setattr(mock_lb_repo_get.return_value, 'topology',
|
||||||
@ -561,7 +535,7 @@ class TestControllerWorker(base.TestCase):
|
|||||||
constants.LOADBALANCER_ID: LB_ID,
|
constants.LOADBALANCER_ID: LB_ID,
|
||||||
'update_dict': {'topology': constants.TOPOLOGY_SINGLE},
|
'update_dict': {'topology': constants.TOPOLOGY_SINGLE},
|
||||||
constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_NORMAL_PRIORITY,
|
constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_NORMAL_PRIORITY,
|
||||||
constants.FLAVOR: None,
|
constants.FLAVOR: None, constants.SERVER_GROUP_ID: None,
|
||||||
constants.AVAILABILITY_ZONE: None,
|
constants.AVAILABILITY_ZONE: None,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -615,7 +589,7 @@ class TestControllerWorker(base.TestCase):
|
|||||||
constants.LOADBALANCER_ID: LB_ID,
|
constants.LOADBALANCER_ID: LB_ID,
|
||||||
'update_dict': {'topology': constants.TOPOLOGY_ACTIVE_STANDBY},
|
'update_dict': {'topology': constants.TOPOLOGY_ACTIVE_STANDBY},
|
||||||
constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_NORMAL_PRIORITY,
|
constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_NORMAL_PRIORITY,
|
||||||
constants.FLAVOR: None,
|
constants.FLAVOR: None, constants.SERVER_GROUP_ID: None,
|
||||||
constants.AVAILABILITY_ZONE: None,
|
constants.AVAILABILITY_ZONE: None,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1213,27 +1187,27 @@ class TestControllerWorker(base.TestCase):
|
|||||||
@mock.patch('octavia.db.repositories.FlavorRepository.'
|
@mock.patch('octavia.db.repositories.FlavorRepository.'
|
||||||
'get_flavor_metadata_dict', return_value={})
|
'get_flavor_metadata_dict', return_value={})
|
||||||
@mock.patch('octavia.controller.worker.v1.flows.'
|
@mock.patch('octavia.controller.worker.v1.flows.'
|
||||||
'amphora_flows.AmphoraFlows.get_failover_flow',
|
'amphora_flows.AmphoraFlows.get_failover_amphora_flow',
|
||||||
return_value=_flow_mock)
|
return_value=_flow_mock)
|
||||||
@mock.patch('octavia.db.repositories.LoadBalancerRepository.update')
|
@mock.patch('octavia.db.repositories.LoadBalancerRepository.update')
|
||||||
def test_failover_amphora(self,
|
def test_failover_amphora_lb_single(self,
|
||||||
mock_update,
|
mock_update,
|
||||||
mock_get_failover_flow,
|
mock_get_failover_flow,
|
||||||
mock_get_flavor_meta,
|
mock_get_flavor_meta,
|
||||||
mock_get_az_meta,
|
mock_get_az_meta,
|
||||||
mock_api_get_session,
|
mock_api_get_session,
|
||||||
mock_dyn_log_listener,
|
mock_dyn_log_listener,
|
||||||
mock_taskflow_load,
|
mock_taskflow_load,
|
||||||
mock_pool_repo_get,
|
mock_pool_repo_get,
|
||||||
mock_member_repo_get,
|
mock_member_repo_get,
|
||||||
mock_l7rule_repo_get,
|
mock_l7rule_repo_get,
|
||||||
mock_l7policy_repo_get,
|
mock_l7policy_repo_get,
|
||||||
mock_listener_repo_get,
|
mock_listener_repo_get,
|
||||||
mock_lb_repo_get,
|
mock_lb_repo_get,
|
||||||
mock_health_mon_repo_get,
|
mock_health_mon_repo_get,
|
||||||
mock_amp_repo_get):
|
mock_amp_repo_get):
|
||||||
|
|
||||||
_flow_mock.reset_mock()
|
_flow_mock.reset_mock()
|
||||||
|
mock_lb_repo_get.return_value = _load_balancer_mock
|
||||||
|
|
||||||
cw = controller_worker.ControllerWorker()
|
cw = controller_worker.ControllerWorker()
|
||||||
cw.failover_amphora(AMP_ID)
|
cw.failover_amphora(AMP_ID)
|
||||||
@ -1241,23 +1215,247 @@ class TestControllerWorker(base.TestCase):
|
|||||||
(base_taskflow.BaseTaskFlowEngine._taskflow_load.
|
(base_taskflow.BaseTaskFlowEngine._taskflow_load.
|
||||||
assert_called_once_with(
|
assert_called_once_with(
|
||||||
_flow_mock,
|
_flow_mock,
|
||||||
store={constants.FAILED_AMPHORA: _amphora_mock,
|
store={constants.FLAVOR: {'loadbalancer_topology':
|
||||||
|
_load_balancer_mock.topology},
|
||||||
|
constants.LOADBALANCER: _load_balancer_mock,
|
||||||
constants.LOADBALANCER_ID:
|
constants.LOADBALANCER_ID:
|
||||||
_amphora_mock.load_balancer_id,
|
_load_balancer_mock.id,
|
||||||
constants.BUILD_TYPE_PRIORITY:
|
constants.BUILD_TYPE_PRIORITY:
|
||||||
constants.LB_CREATE_FAILOVER_PRIORITY,
|
constants.LB_CREATE_FAILOVER_PRIORITY,
|
||||||
constants.FLAVOR: {},
|
constants.SERVER_GROUP_ID:
|
||||||
constants.AVAILABILITY_ZONE: {}
|
_load_balancer_mock.server_group_id,
|
||||||
|
constants.AVAILABILITY_ZONE: {},
|
||||||
|
constants.VIP: _load_balancer_mock.vip
|
||||||
}))
|
}))
|
||||||
|
|
||||||
_flow_mock.run.assert_called_once_with()
|
_flow_mock.run.assert_called_once_with()
|
||||||
mock_update.assert_called_with(_db_session, LB_ID,
|
|
||||||
provisioning_status=constants.ACTIVE)
|
|
||||||
|
|
||||||
@mock.patch('octavia.controller.worker.v1.controller_worker.'
|
@mock.patch('octavia.db.repositories.AvailabilityZoneRepository.'
|
||||||
'ControllerWorker._perform_amphora_failover')
|
'get_availability_zone_metadata_dict', return_value={})
|
||||||
|
@mock.patch('octavia.db.repositories.FlavorRepository.'
|
||||||
|
'get_flavor_metadata_dict', return_value={})
|
||||||
|
@mock.patch('octavia.controller.worker.v1.flows.'
|
||||||
|
'amphora_flows.AmphoraFlows.get_failover_amphora_flow',
|
||||||
|
return_value=_flow_mock)
|
||||||
|
@mock.patch('octavia.db.repositories.LoadBalancerRepository.update')
|
||||||
|
def test_failover_amphora_lb_act_stdby(self,
|
||||||
|
mock_update,
|
||||||
|
mock_get_failover_flow,
|
||||||
|
mock_get_flavor_meta,
|
||||||
|
mock_get_az_meta,
|
||||||
|
mock_api_get_session,
|
||||||
|
mock_dyn_log_listener,
|
||||||
|
mock_taskflow_load,
|
||||||
|
mock_pool_repo_get,
|
||||||
|
mock_member_repo_get,
|
||||||
|
mock_l7rule_repo_get,
|
||||||
|
mock_l7policy_repo_get,
|
||||||
|
mock_listener_repo_get,
|
||||||
|
mock_lb_repo_get,
|
||||||
|
mock_health_mon_repo_get,
|
||||||
|
mock_amp_repo_get):
|
||||||
|
_flow_mock.reset_mock()
|
||||||
|
load_balancer_mock = mock.MagicMock()
|
||||||
|
load_balancer_mock.listeners = [_listener_mock]
|
||||||
|
load_balancer_mock.topology = constants.TOPOLOGY_ACTIVE_STANDBY
|
||||||
|
load_balancer_mock.flavor_id = None
|
||||||
|
load_balancer_mock.availability_zone = None
|
||||||
|
load_balancer_mock.vip = _vip_mock
|
||||||
|
|
||||||
|
mock_lb_repo_get.return_value = load_balancer_mock
|
||||||
|
|
||||||
|
cw = controller_worker.ControllerWorker()
|
||||||
|
cw.failover_amphora(AMP_ID)
|
||||||
|
|
||||||
|
(base_taskflow.BaseTaskFlowEngine._taskflow_load.
|
||||||
|
assert_called_once_with(
|
||||||
|
_flow_mock,
|
||||||
|
store={constants.FLAVOR: {'loadbalancer_topology':
|
||||||
|
load_balancer_mock.topology},
|
||||||
|
constants.LOADBALANCER: load_balancer_mock,
|
||||||
|
constants.LOADBALANCER_ID: load_balancer_mock.id,
|
||||||
|
constants.BUILD_TYPE_PRIORITY:
|
||||||
|
constants.LB_CREATE_FAILOVER_PRIORITY,
|
||||||
|
constants.AVAILABILITY_ZONE: {},
|
||||||
|
constants.SERVER_GROUP_ID:
|
||||||
|
load_balancer_mock.server_group_id,
|
||||||
|
constants.VIP: load_balancer_mock.vip
|
||||||
|
}))
|
||||||
|
|
||||||
|
_flow_mock.run.assert_called_once_with()
|
||||||
|
|
||||||
|
@mock.patch('octavia.db.repositories.AvailabilityZoneRepository.'
|
||||||
|
'get_availability_zone_metadata_dict', return_value={})
|
||||||
|
@mock.patch('octavia.db.repositories.FlavorRepository.'
|
||||||
|
'get_flavor_metadata_dict', return_value={})
|
||||||
|
@mock.patch('octavia.controller.worker.v1.flows.'
|
||||||
|
'amphora_flows.AmphoraFlows.get_failover_amphora_flow',
|
||||||
|
return_value=_flow_mock)
|
||||||
|
@mock.patch('octavia.db.repositories.LoadBalancerRepository.update')
|
||||||
|
def test_failover_amphora_unknown_topology(self,
|
||||||
|
mock_update,
|
||||||
|
mock_get_failover_flow,
|
||||||
|
mock_get_flavor_meta,
|
||||||
|
mock_get_az_meta,
|
||||||
|
mock_api_get_session,
|
||||||
|
mock_dyn_log_listener,
|
||||||
|
mock_taskflow_load,
|
||||||
|
mock_pool_repo_get,
|
||||||
|
mock_member_repo_get,
|
||||||
|
mock_l7rule_repo_get,
|
||||||
|
mock_l7policy_repo_get,
|
||||||
|
mock_listener_repo_get,
|
||||||
|
mock_lb_repo_get,
|
||||||
|
mock_health_mon_repo_get,
|
||||||
|
mock_amp_repo_get):
|
||||||
|
|
||||||
|
_flow_mock.reset_mock()
|
||||||
|
load_balancer_mock = mock.MagicMock()
|
||||||
|
load_balancer_mock.listeners = [_listener_mock]
|
||||||
|
load_balancer_mock.topology = 'bogus'
|
||||||
|
load_balancer_mock.flavor_id = None
|
||||||
|
load_balancer_mock.availability_zone = None
|
||||||
|
load_balancer_mock.vip = _vip_mock
|
||||||
|
|
||||||
|
mock_lb_repo_get.return_value = load_balancer_mock
|
||||||
|
|
||||||
|
cw = controller_worker.ControllerWorker()
|
||||||
|
cw.failover_amphora(AMP_ID)
|
||||||
|
|
||||||
|
(base_taskflow.BaseTaskFlowEngine._taskflow_load.
|
||||||
|
assert_called_once_with(
|
||||||
|
_flow_mock,
|
||||||
|
store={constants.FLAVOR: {'loadbalancer_topology':
|
||||||
|
load_balancer_mock.topology},
|
||||||
|
constants.LOADBALANCER: load_balancer_mock,
|
||||||
|
constants.LOADBALANCER_ID: load_balancer_mock.id,
|
||||||
|
constants.BUILD_TYPE_PRIORITY:
|
||||||
|
constants.LB_CREATE_FAILOVER_PRIORITY,
|
||||||
|
constants.SERVER_GROUP_ID:
|
||||||
|
load_balancer_mock.server_group_id,
|
||||||
|
constants.AVAILABILITY_ZONE: {},
|
||||||
|
constants.VIP: load_balancer_mock.vip
|
||||||
|
}))
|
||||||
|
|
||||||
|
_flow_mock.run.assert_called_once_with()
|
||||||
|
|
||||||
|
@mock.patch('octavia.db.repositories.AvailabilityZoneRepository.'
|
||||||
|
'get_availability_zone_metadata_dict', return_value={})
|
||||||
|
@mock.patch('octavia.db.repositories.FlavorRepository.'
|
||||||
|
'get_flavor_metadata_dict', return_value={})
|
||||||
|
@mock.patch('octavia.controller.worker.v1.flows.'
|
||||||
|
'amphora_flows.AmphoraFlows.get_failover_amphora_flow',
|
||||||
|
return_value=_flow_mock)
|
||||||
|
@mock.patch('octavia.db.repositories.LoadBalancerRepository.update')
|
||||||
|
def test_failover_amphora_with_flavor(self,
|
||||||
|
mock_update,
|
||||||
|
mock_get_failover_flow,
|
||||||
|
mock_get_flavor_meta,
|
||||||
|
mock_get_az_meta,
|
||||||
|
mock_api_get_session,
|
||||||
|
mock_dyn_log_listener,
|
||||||
|
mock_taskflow_load,
|
||||||
|
mock_pool_repo_get,
|
||||||
|
mock_member_repo_get,
|
||||||
|
mock_l7rule_repo_get,
|
||||||
|
mock_l7policy_repo_get,
|
||||||
|
mock_listener_repo_get,
|
||||||
|
mock_lb_repo_get,
|
||||||
|
mock_health_mon_repo_get,
|
||||||
|
mock_amp_repo_get):
|
||||||
|
_flow_mock.reset_mock()
|
||||||
|
load_balancer_mock = mock.MagicMock()
|
||||||
|
load_balancer_mock.listeners = [_listener_mock]
|
||||||
|
load_balancer_mock.topology = constants.TOPOLOGY_SINGLE
|
||||||
|
load_balancer_mock.flavor_id = uuidutils.generate_uuid()
|
||||||
|
load_balancer_mock.availability_zone = None
|
||||||
|
load_balancer_mock.vip = _vip_mock
|
||||||
|
mock_get_flavor_meta.return_value = {'taste': 'spicy'}
|
||||||
|
|
||||||
|
mock_lb_repo_get.return_value = load_balancer_mock
|
||||||
|
|
||||||
|
cw = controller_worker.ControllerWorker()
|
||||||
|
cw.failover_amphora(AMP_ID)
|
||||||
|
|
||||||
|
(base_taskflow.BaseTaskFlowEngine._taskflow_load.
|
||||||
|
assert_called_once_with(
|
||||||
|
_flow_mock,
|
||||||
|
store={constants.FLAVOR: {'loadbalancer_topology':
|
||||||
|
load_balancer_mock.topology,
|
||||||
|
'taste': 'spicy'},
|
||||||
|
constants.LOADBALANCER: load_balancer_mock,
|
||||||
|
constants.LOADBALANCER_ID: load_balancer_mock.id,
|
||||||
|
constants.BUILD_TYPE_PRIORITY:
|
||||||
|
constants.LB_CREATE_FAILOVER_PRIORITY,
|
||||||
|
constants.SERVER_GROUP_ID: None,
|
||||||
|
constants.AVAILABILITY_ZONE: {},
|
||||||
|
constants.SERVER_GROUP_ID:
|
||||||
|
load_balancer_mock.server_group_id,
|
||||||
|
constants.VIP: load_balancer_mock.vip
|
||||||
|
}))
|
||||||
|
|
||||||
|
_flow_mock.run.assert_called_once_with()
|
||||||
|
|
||||||
|
@mock.patch('octavia.db.repositories.AvailabilityZoneRepository.'
|
||||||
|
'get_availability_zone_metadata_dict', return_value={})
|
||||||
|
@mock.patch('octavia.db.repositories.FlavorRepository.'
|
||||||
|
'get_flavor_metadata_dict', return_value={})
|
||||||
|
@mock.patch('octavia.controller.worker.v1.flows.'
|
||||||
|
'amphora_flows.AmphoraFlows.get_failover_amphora_flow',
|
||||||
|
return_value=_flow_mock)
|
||||||
|
@mock.patch('octavia.db.repositories.LoadBalancerRepository.update')
|
||||||
|
def test_failover_amphora_with_az(self,
|
||||||
|
mock_update,
|
||||||
|
mock_get_failover_flow,
|
||||||
|
mock_get_flavor_meta,
|
||||||
|
mock_get_az_meta,
|
||||||
|
mock_api_get_session,
|
||||||
|
mock_dyn_log_listener,
|
||||||
|
mock_taskflow_load,
|
||||||
|
mock_pool_repo_get,
|
||||||
|
mock_member_repo_get,
|
||||||
|
mock_l7rule_repo_get,
|
||||||
|
mock_l7policy_repo_get,
|
||||||
|
mock_listener_repo_get,
|
||||||
|
mock_lb_repo_get,
|
||||||
|
mock_health_mon_repo_get,
|
||||||
|
mock_amp_repo_get):
|
||||||
|
|
||||||
|
_flow_mock.reset_mock()
|
||||||
|
load_balancer_mock = mock.MagicMock()
|
||||||
|
load_balancer_mock.listeners = [_listener_mock]
|
||||||
|
load_balancer_mock.topology = 'bogus'
|
||||||
|
load_balancer_mock.flavor_id = None
|
||||||
|
load_balancer_mock.availability_zone = uuidutils.generate_uuid()
|
||||||
|
load_balancer_mock.vip = _vip_mock
|
||||||
|
mock_get_az_meta.return_value = {'planet': 'jupiter'}
|
||||||
|
|
||||||
|
mock_lb_repo_get.return_value = load_balancer_mock
|
||||||
|
|
||||||
|
cw = controller_worker.ControllerWorker()
|
||||||
|
cw.failover_amphora(AMP_ID)
|
||||||
|
|
||||||
|
(base_taskflow.BaseTaskFlowEngine._taskflow_load.
|
||||||
|
assert_called_once_with(
|
||||||
|
_flow_mock,
|
||||||
|
store={constants.FLAVOR: {'loadbalancer_topology':
|
||||||
|
load_balancer_mock.topology},
|
||||||
|
constants.LOADBALANCER: load_balancer_mock,
|
||||||
|
constants.LOADBALANCER_ID: load_balancer_mock.id,
|
||||||
|
constants.BUILD_TYPE_PRIORITY:
|
||||||
|
constants.LB_CREATE_FAILOVER_PRIORITY,
|
||||||
|
constants.SERVER_GROUP_ID:
|
||||||
|
load_balancer_mock.server_group_id,
|
||||||
|
constants.AVAILABILITY_ZONE: {'planet': 'jupiter'},
|
||||||
|
constants.VIP: load_balancer_mock.vip
|
||||||
|
}))
|
||||||
|
|
||||||
|
_flow_mock.run.assert_called_once_with()
|
||||||
|
|
||||||
|
@mock.patch('octavia.controller.worker.v1.flows.amphora_flows.'
|
||||||
|
'AmphoraFlows.get_failover_amphora_flow')
|
||||||
def test_failover_amp_missing_amp(self,
|
def test_failover_amp_missing_amp(self,
|
||||||
mock_perform_amp_failover,
|
mock_get_amp_failover,
|
||||||
mock_api_get_session,
|
mock_api_get_session,
|
||||||
mock_dyn_log_listener,
|
mock_dyn_log_listener,
|
||||||
mock_taskflow_load,
|
mock_taskflow_load,
|
||||||
@ -1275,14 +1473,11 @@ class TestControllerWorker(base.TestCase):
|
|||||||
cw = controller_worker.ControllerWorker()
|
cw = controller_worker.ControllerWorker()
|
||||||
cw.failover_amphora(AMP_ID)
|
cw.failover_amphora(AMP_ID)
|
||||||
|
|
||||||
mock_perform_amp_failover.assert_not_called()
|
mock_get_amp_failover.assert_not_called()
|
||||||
|
|
||||||
@mock.patch('octavia.controller.worker.v1.controller_worker.'
|
|
||||||
'ControllerWorker._perform_amphora_failover')
|
|
||||||
@mock.patch('octavia.db.repositories.LoadBalancerRepository.update')
|
@mock.patch('octavia.db.repositories.LoadBalancerRepository.update')
|
||||||
def test_failover_amp_flow_exception(self,
|
def test_failover_amp_flow_exception(self,
|
||||||
mock_update,
|
mock_update,
|
||||||
mock_perform_amp_failover,
|
|
||||||
mock_api_get_session,
|
mock_api_get_session,
|
||||||
mock_dyn_log_listener,
|
mock_dyn_log_listener,
|
||||||
mock_taskflow_load,
|
mock_taskflow_load,
|
||||||
@ -1295,18 +1490,21 @@ class TestControllerWorker(base.TestCase):
|
|||||||
mock_health_mon_repo_get,
|
mock_health_mon_repo_get,
|
||||||
mock_amp_repo_get):
|
mock_amp_repo_get):
|
||||||
|
|
||||||
mock_perform_amp_failover.side_effect = TestException('boom')
|
mock_amphora = mock.MagicMock()
|
||||||
|
mock_amphora.id = AMP_ID
|
||||||
|
mock_amphora.load_balancer_id = LB_ID
|
||||||
|
mock_amp_repo_get.return_value = mock_amphora
|
||||||
|
|
||||||
|
mock_lb_repo_get.side_effect = TestException('boom')
|
||||||
cw = controller_worker.ControllerWorker()
|
cw = controller_worker.ControllerWorker()
|
||||||
self.assertRaises(TestException, cw.failover_amphora, AMP_ID)
|
cw.failover_amphora(AMP_ID)
|
||||||
mock_update.assert_called_with(_db_session, LB_ID,
|
mock_update.assert_called_with(_db_session, LB_ID,
|
||||||
provisioning_status=constants.ERROR)
|
provisioning_status=constants.ERROR)
|
||||||
|
|
||||||
@mock.patch('octavia.controller.worker.v1.controller_worker.'
|
@mock.patch('octavia.controller.worker.v1.flows.amphora_flows.'
|
||||||
'ControllerWorker._perform_amphora_failover')
|
'AmphoraFlows.get_failover_amphora_flow')
|
||||||
@mock.patch('octavia.db.repositories.LoadBalancerRepository.update')
|
|
||||||
def test_failover_amp_no_lb(self,
|
def test_failover_amp_no_lb(self,
|
||||||
mock_lb_update,
|
mock_get_failover_amp_flow,
|
||||||
mock_perform_amp_failover,
|
|
||||||
mock_api_get_session,
|
mock_api_get_session,
|
||||||
mock_dyn_log_listener,
|
mock_dyn_log_listener,
|
||||||
mock_taskflow_load,
|
mock_taskflow_load,
|
||||||
@ -1318,23 +1516,36 @@ class TestControllerWorker(base.TestCase):
|
|||||||
mock_lb_repo_get,
|
mock_lb_repo_get,
|
||||||
mock_health_mon_repo_get,
|
mock_health_mon_repo_get,
|
||||||
mock_amp_repo_get):
|
mock_amp_repo_get):
|
||||||
|
_flow_mock.run.reset_mock()
|
||||||
amphora = mock.MagicMock()
|
FAKE_FLOW = 'FAKE_FLOW'
|
||||||
amphora.load_balancer_id = None
|
mock_amphora = mock.MagicMock()
|
||||||
mock_amp_repo_get.return_value = amphora
|
mock_amphora.load_balancer_id = None
|
||||||
|
mock_amphora.id = AMP_ID
|
||||||
|
mock_amphora.status = constants.AMPHORA_READY
|
||||||
|
mock_amp_repo_get.return_value = mock_amphora
|
||||||
|
mock_get_failover_amp_flow.return_value = FAKE_FLOW
|
||||||
|
expected_stored_params = {constants.AVAILABILITY_ZONE: {},
|
||||||
|
constants.BUILD_TYPE_PRIORITY:
|
||||||
|
constants.LB_CREATE_FAILOVER_PRIORITY,
|
||||||
|
constants.FLAVOR: {},
|
||||||
|
constants.LOADBALANCER: None,
|
||||||
|
constants.LOADBALANCER_ID: None,
|
||||||
|
constants.SERVER_GROUP_ID: None,
|
||||||
|
constants.VIP: None}
|
||||||
|
|
||||||
cw = controller_worker.ControllerWorker()
|
cw = controller_worker.ControllerWorker()
|
||||||
cw.failover_amphora(AMP_ID)
|
cw.failover_amphora(AMP_ID)
|
||||||
|
|
||||||
mock_lb_update.assert_not_called()
|
mock_get_failover_amp_flow.assert_called_once_with(mock_amphora, None)
|
||||||
mock_perform_amp_failover.assert_called_once_with(
|
(base_taskflow.BaseTaskFlowEngine._taskflow_load.
|
||||||
amphora, constants.LB_CREATE_FAILOVER_PRIORITY)
|
assert_called_once_with(FAKE_FLOW, store=expected_stored_params))
|
||||||
|
_flow_mock.run.assert_called_once_with()
|
||||||
|
|
||||||
@mock.patch(
|
@mock.patch(
|
||||||
'octavia.db.repositories.AmphoraRepository.get_lb_for_amphora',
|
'octavia.db.repositories.AmphoraRepository.get_lb_for_amphora',
|
||||||
return_value=None)
|
return_value=None)
|
||||||
@mock.patch('octavia.controller.worker.v1.flows.'
|
@mock.patch('octavia.controller.worker.v1.flows.'
|
||||||
'amphora_flows.AmphoraFlows.get_failover_flow',
|
'amphora_flows.AmphoraFlows.get_failover_amphora_flow',
|
||||||
return_value=_flow_mock)
|
return_value=_flow_mock)
|
||||||
def test_failover_spare_amphora(self,
|
def test_failover_spare_amphora(self,
|
||||||
mock_get_failover_flow,
|
mock_get_failover_flow,
|
||||||
@ -1358,20 +1569,22 @@ class TestControllerWorker(base.TestCase):
|
|||||||
mock_amphora.id = AMP_ID
|
mock_amphora.id = AMP_ID
|
||||||
mock_amphora.status = constants.AMPHORA_READY
|
mock_amphora.status = constants.AMPHORA_READY
|
||||||
mock_amphora.load_balancer_id = None
|
mock_amphora.load_balancer_id = None
|
||||||
|
mock_amp_repo_get.return_value = mock_amphora
|
||||||
|
|
||||||
cw = controller_worker.ControllerWorker()
|
cw = controller_worker.ControllerWorker()
|
||||||
cw._perform_amphora_failover(mock_amphora,
|
cw.failover_amphora(AMP_ID)
|
||||||
constants.LB_CREATE_FAILOVER_PRIORITY)
|
|
||||||
|
|
||||||
(base_taskflow.BaseTaskFlowEngine._taskflow_load.
|
(base_taskflow.BaseTaskFlowEngine._taskflow_load.
|
||||||
assert_called_once_with(
|
assert_called_once_with(
|
||||||
_flow_mock,
|
_flow_mock,
|
||||||
store={constants.FAILED_AMPHORA: mock_amphora,
|
store={constants.LOADBALANCER: None,
|
||||||
constants.LOADBALANCER_ID: None,
|
constants.LOADBALANCER_ID: None,
|
||||||
constants.BUILD_TYPE_PRIORITY:
|
constants.BUILD_TYPE_PRIORITY:
|
||||||
constants.LB_CREATE_FAILOVER_PRIORITY,
|
constants.LB_CREATE_FAILOVER_PRIORITY,
|
||||||
constants.FLAVOR: {},
|
constants.FLAVOR: {},
|
||||||
constants.AVAILABILITY_ZONE: {}
|
constants.SERVER_GROUP_ID: None,
|
||||||
|
constants.AVAILABILITY_ZONE: {},
|
||||||
|
constants.VIP: None
|
||||||
}))
|
}))
|
||||||
|
|
||||||
_flow_mock.run.assert_called_once_with()
|
_flow_mock.run.assert_called_once_with()
|
||||||
@ -1395,71 +1608,410 @@ class TestControllerWorker(base.TestCase):
|
|||||||
mock_amphora = mock.MagicMock()
|
mock_amphora = mock.MagicMock()
|
||||||
mock_amphora.id = AMP_ID
|
mock_amphora.id = AMP_ID
|
||||||
mock_amphora.status = constants.DELETED
|
mock_amphora.status = constants.DELETED
|
||||||
|
mock_amp_repo_get.return_value = mock_amphora
|
||||||
|
|
||||||
cw = controller_worker.ControllerWorker()
|
cw = controller_worker.ControllerWorker()
|
||||||
cw._perform_amphora_failover(mock_amphora, 10)
|
cw.failover_amphora(AMP_ID)
|
||||||
|
|
||||||
mock_delete.assert_called_with(_db_session, amphora_id=AMP_ID)
|
mock_delete.assert_called_with(_db_session, amphora_id=AMP_ID)
|
||||||
mock_taskflow_load.assert_not_called()
|
mock_taskflow_load.assert_not_called()
|
||||||
|
|
||||||
@mock.patch('octavia.controller.worker.v1.'
|
def test_get_amphorae_for_failover_single(self,
|
||||||
'controller_worker.ControllerWorker._perform_amphora_failover')
|
mock_api_get_session,
|
||||||
@mock.patch('octavia.db.repositories.LoadBalancerRepository.update')
|
mock_dyn_log_listener,
|
||||||
def test_failover_loadbalancer(self,
|
mock_taskflow_load,
|
||||||
mock_update,
|
mock_pool_repo_get,
|
||||||
mock_perform,
|
mock_member_repo_get,
|
||||||
mock_api_get_session,
|
mock_l7rule_repo_get,
|
||||||
mock_dyn_log_listener,
|
mock_l7policy_repo_get,
|
||||||
mock_taskflow_load,
|
mock_listener_repo_get,
|
||||||
mock_pool_repo_get,
|
mock_lb_repo_get,
|
||||||
mock_member_repo_get,
|
mock_health_mon_repo_get,
|
||||||
mock_l7rule_repo_get,
|
mock_amp_repo_get):
|
||||||
mock_l7policy_repo_get,
|
amphora1_mock = mock.MagicMock()
|
||||||
mock_listener_repo_get,
|
amphora1_mock.status = constants.AMPHORA_ALLOCATED
|
||||||
mock_lb_repo_get,
|
amphora2_mock = mock.MagicMock()
|
||||||
mock_health_mon_repo_get,
|
amphora2_mock.status = constants.DELETED
|
||||||
mock_amp_repo_get):
|
|
||||||
_amphora_mock2 = mock.MagicMock()
|
load_balancer_mock = mock.MagicMock()
|
||||||
_amphora_mock3 = mock.MagicMock()
|
load_balancer_mock.topology = constants.TOPOLOGY_SINGLE
|
||||||
_amphora_mock3.status = constants.DELETED
|
load_balancer_mock.amphorae = [amphora1_mock, amphora2_mock]
|
||||||
_load_balancer_mock.amphorae = [
|
|
||||||
_amphora_mock, _amphora_mock2, _amphora_mock3]
|
|
||||||
cw = controller_worker.ControllerWorker()
|
cw = controller_worker.ControllerWorker()
|
||||||
cw.failover_loadbalancer('123')
|
result = cw._get_amphorae_for_failover(load_balancer_mock)
|
||||||
mock_perform.assert_called_with(
|
|
||||||
_amphora_mock2, constants.LB_CREATE_ADMIN_FAILOVER_PRIORITY)
|
|
||||||
mock_update.assert_called_with(_db_session, '123',
|
|
||||||
provisioning_status=constants.ACTIVE)
|
|
||||||
|
|
||||||
_load_balancer_mock.amphorae = [
|
self.assertEqual([amphora1_mock], result)
|
||||||
_amphora_mock, _amphora_mock2, _amphora_mock3]
|
|
||||||
_amphora_mock2.role = constants.ROLE_BACKUP
|
|
||||||
cw.failover_loadbalancer('123')
|
|
||||||
# because mock2 gets failed over earlier now _amphora_mock
|
|
||||||
# is the last one
|
|
||||||
mock_perform.assert_called_with(
|
|
||||||
_amphora_mock, constants.LB_CREATE_ADMIN_FAILOVER_PRIORITY)
|
|
||||||
mock_update.assert_called_with(_db_session, '123',
|
|
||||||
provisioning_status=constants.ACTIVE)
|
|
||||||
|
|
||||||
mock_perform.side_effect = OverflowError()
|
@mock.patch('octavia.common.utils.get_amphora_driver')
|
||||||
self.assertRaises(OverflowError, cw.failover_loadbalancer, 123)
|
def test_get_amphorae_for_failover_act_stdby(self,
|
||||||
mock_update.assert_called_with(_db_session, 123,
|
mock_get_amp_driver,
|
||||||
provisioning_status=constants.ERROR)
|
mock_api_get_session,
|
||||||
|
mock_dyn_log_listener,
|
||||||
|
mock_taskflow_load,
|
||||||
|
mock_pool_repo_get,
|
||||||
|
mock_member_repo_get,
|
||||||
|
mock_l7rule_repo_get,
|
||||||
|
mock_l7policy_repo_get,
|
||||||
|
mock_listener_repo_get,
|
||||||
|
mock_lb_repo_get,
|
||||||
|
mock_health_mon_repo_get,
|
||||||
|
mock_amp_repo_get):
|
||||||
|
# Note: This test uses three amphora even though we only have
|
||||||
|
# two per load balancer to properly test the ordering from
|
||||||
|
# this method.
|
||||||
|
amp_driver_mock = mock.MagicMock()
|
||||||
|
amp_driver_mock.get_interface_from_ip.side_effect = [
|
||||||
|
'fake0', None, 'fake1']
|
||||||
|
mock_get_amp_driver.return_value = amp_driver_mock
|
||||||
|
backup_amphora_mock = mock.MagicMock()
|
||||||
|
backup_amphora_mock.status = constants.AMPHORA_ALLOCATED
|
||||||
|
deleted_amphora_mock = mock.MagicMock()
|
||||||
|
deleted_amphora_mock.status = constants.DELETED
|
||||||
|
master_amphora_mock = mock.MagicMock()
|
||||||
|
master_amphora_mock.status = constants.AMPHORA_ALLOCATED
|
||||||
|
bogus_amphora_mock = mock.MagicMock()
|
||||||
|
bogus_amphora_mock.status = constants.AMPHORA_ALLOCATED
|
||||||
|
|
||||||
|
load_balancer_mock = mock.MagicMock()
|
||||||
|
load_balancer_mock.topology = constants.TOPOLOGY_ACTIVE_STANDBY
|
||||||
|
load_balancer_mock.amphorae = [
|
||||||
|
master_amphora_mock, deleted_amphora_mock, backup_amphora_mock,
|
||||||
|
bogus_amphora_mock]
|
||||||
|
|
||||||
|
cw = controller_worker.ControllerWorker()
|
||||||
|
result = cw._get_amphorae_for_failover(load_balancer_mock)
|
||||||
|
|
||||||
|
self.assertEqual([master_amphora_mock, bogus_amphora_mock,
|
||||||
|
backup_amphora_mock], result)
|
||||||
|
|
||||||
|
@mock.patch('octavia.common.utils.get_amphora_driver')
|
||||||
|
def test_get_amphorae_for_failover_act_stdby_net_split(
|
||||||
|
self, mock_get_amp_driver, mock_api_get_session,
|
||||||
|
mock_dyn_log_listener, mock_taskflow_load, mock_pool_repo_get,
|
||||||
|
mock_member_repo_get, mock_l7rule_repo_get, mock_l7policy_repo_get,
|
||||||
|
mock_listener_repo_get, mock_lb_repo_get, mock_health_mon_repo_get,
|
||||||
|
mock_amp_repo_get):
|
||||||
|
# Case where the amps can't see eachother and somehow end up with
|
||||||
|
# two amphora with an interface. This is highly unlikely as the
|
||||||
|
# higher priority amphora should get the IP in a net split, but
|
||||||
|
# let's test the code for this odd case.
|
||||||
|
# Note: This test uses three amphora even though we only have
|
||||||
|
# two per load balancer to properly test the ordering from
|
||||||
|
# this method.
|
||||||
|
amp_driver_mock = mock.MagicMock()
|
||||||
|
amp_driver_mock.get_interface_from_ip.side_effect = [
|
||||||
|
'fake0', 'fake1']
|
||||||
|
mock_get_amp_driver.return_value = amp_driver_mock
|
||||||
|
backup_amphora_mock = mock.MagicMock()
|
||||||
|
backup_amphora_mock.status = constants.AMPHORA_ALLOCATED
|
||||||
|
deleted_amphora_mock = mock.MagicMock()
|
||||||
|
deleted_amphora_mock.status = constants.DELETED
|
||||||
|
master_amphora_mock = mock.MagicMock()
|
||||||
|
master_amphora_mock.status = constants.AMPHORA_ALLOCATED
|
||||||
|
|
||||||
|
load_balancer_mock = mock.MagicMock()
|
||||||
|
load_balancer_mock.topology = constants.TOPOLOGY_ACTIVE_STANDBY
|
||||||
|
load_balancer_mock.amphorae = [
|
||||||
|
backup_amphora_mock, deleted_amphora_mock, master_amphora_mock]
|
||||||
|
|
||||||
|
cw = controller_worker.ControllerWorker()
|
||||||
|
result = cw._get_amphorae_for_failover(load_balancer_mock)
|
||||||
|
|
||||||
|
self.assertEqual([backup_amphora_mock, master_amphora_mock], result)
|
||||||
|
|
||||||
|
def test_get_amphorae_for_failover_bogus_topology(self,
|
||||||
|
mock_api_get_session,
|
||||||
|
mock_dyn_log_listener,
|
||||||
|
mock_taskflow_load,
|
||||||
|
mock_pool_repo_get,
|
||||||
|
mock_member_repo_get,
|
||||||
|
mock_l7rule_repo_get,
|
||||||
|
mock_l7policy_repo_get,
|
||||||
|
mock_listener_repo_get,
|
||||||
|
mock_lb_repo_get,
|
||||||
|
mock_health_mon_repo_get,
|
||||||
|
mock_amp_repo_get):
|
||||||
|
load_balancer_mock = mock.MagicMock()
|
||||||
|
load_balancer_mock.topology = 'bogus'
|
||||||
|
|
||||||
|
cw = controller_worker.ControllerWorker()
|
||||||
|
self.assertRaises(exceptions.InvalidTopology,
|
||||||
|
cw._get_amphorae_for_failover,
|
||||||
|
load_balancer_mock)
|
||||||
|
|
||||||
|
@mock.patch('octavia.controller.worker.v1.flows.load_balancer_flows.'
|
||||||
|
'LoadBalancerFlows.get_failover_LB_flow')
|
||||||
|
@mock.patch('octavia.controller.worker.v1.controller_worker.'
|
||||||
|
'ControllerWorker._get_amphorae_for_failover')
|
||||||
|
def test_failover_loadbalancer_single(self,
|
||||||
|
mock_get_amps_for_failover,
|
||||||
|
mock_get_failover_lb_flow,
|
||||||
|
mock_api_get_session,
|
||||||
|
mock_dyn_log_listener,
|
||||||
|
mock_taskflow_load,
|
||||||
|
mock_pool_repo_get,
|
||||||
|
mock_member_repo_get,
|
||||||
|
mock_l7rule_repo_get,
|
||||||
|
mock_l7policy_repo_get,
|
||||||
|
mock_listener_repo_get,
|
||||||
|
mock_lb_repo_get,
|
||||||
|
mock_health_mon_repo_get,
|
||||||
|
mock_amp_repo_get):
|
||||||
|
FAKE_FLOW = 'FAKE_FLOW'
|
||||||
|
_flow_mock.reset_mock()
|
||||||
|
mock_lb_repo_get.return_value = _load_balancer_mock
|
||||||
|
mock_get_amps_for_failover.return_value = [_amphora_mock]
|
||||||
|
mock_get_failover_lb_flow.return_value = FAKE_FLOW
|
||||||
|
|
||||||
|
expected_flavor = {constants.LOADBALANCER_TOPOLOGY:
|
||||||
|
_load_balancer_mock.topology}
|
||||||
|
expected_flow_store = {constants.LOADBALANCER: _load_balancer_mock,
|
||||||
|
constants.BUILD_TYPE_PRIORITY:
|
||||||
|
constants.LB_CREATE_FAILOVER_PRIORITY,
|
||||||
|
constants.LOADBALANCER_ID:
|
||||||
|
_load_balancer_mock.id,
|
||||||
|
constants.SERVER_GROUP_ID:
|
||||||
|
_load_balancer_mock.server_group_id,
|
||||||
|
constants.FLAVOR: expected_flavor,
|
||||||
|
constants.AVAILABILITY_ZONE: {}}
|
||||||
|
|
||||||
|
cw = controller_worker.ControllerWorker()
|
||||||
|
cw.failover_loadbalancer(LB_ID)
|
||||||
|
|
||||||
|
mock_lb_repo_get.assert_called_once_with(_db_session, id=LB_ID)
|
||||||
|
mock_get_amps_for_failover.assert_called_once_with(_load_balancer_mock)
|
||||||
|
mock_get_failover_lb_flow.assert_called_once_with([_amphora_mock],
|
||||||
|
_load_balancer_mock)
|
||||||
|
mock_taskflow_load.assert_called_once_with(FAKE_FLOW,
|
||||||
|
store=expected_flow_store)
|
||||||
|
_flow_mock.run.assert_called_once_with()
|
||||||
|
|
||||||
|
@mock.patch('octavia.controller.worker.v1.flows.load_balancer_flows.'
|
||||||
|
'LoadBalancerFlows.get_failover_LB_flow')
|
||||||
|
@mock.patch('octavia.controller.worker.v1.controller_worker.'
|
||||||
|
'ControllerWorker._get_amphorae_for_failover')
|
||||||
|
def test_failover_loadbalancer_act_stdby(self,
|
||||||
|
mock_get_amps_for_failover,
|
||||||
|
mock_get_failover_lb_flow,
|
||||||
|
mock_api_get_session,
|
||||||
|
mock_dyn_log_listener,
|
||||||
|
mock_taskflow_load,
|
||||||
|
mock_pool_repo_get,
|
||||||
|
mock_member_repo_get,
|
||||||
|
mock_l7rule_repo_get,
|
||||||
|
mock_l7policy_repo_get,
|
||||||
|
mock_listener_repo_get,
|
||||||
|
mock_lb_repo_get,
|
||||||
|
mock_health_mon_repo_get,
|
||||||
|
mock_amp_repo_get):
|
||||||
|
FAKE_FLOW = 'FAKE_FLOW'
|
||||||
|
_flow_mock.reset_mock()
|
||||||
|
load_balancer_mock = mock.MagicMock()
|
||||||
|
load_balancer_mock.listeners = [_listener_mock]
|
||||||
|
load_balancer_mock.topology = constants.TOPOLOGY_ACTIVE_STANDBY
|
||||||
|
load_balancer_mock.flavor_id = None
|
||||||
|
load_balancer_mock.availability_zone = None
|
||||||
|
load_balancer_mock.vip = _vip_mock
|
||||||
|
mock_lb_repo_get.return_value = load_balancer_mock
|
||||||
|
mock_get_amps_for_failover.return_value = [_amphora_mock,
|
||||||
|
_amphora_mock]
|
||||||
|
mock_get_failover_lb_flow.return_value = FAKE_FLOW
|
||||||
|
|
||||||
|
expected_flavor = {constants.LOADBALANCER_TOPOLOGY:
|
||||||
|
load_balancer_mock.topology}
|
||||||
|
expected_flow_store = {constants.LOADBALANCER: load_balancer_mock,
|
||||||
|
constants.BUILD_TYPE_PRIORITY:
|
||||||
|
constants.LB_CREATE_FAILOVER_PRIORITY,
|
||||||
|
constants.LOADBALANCER_ID:
|
||||||
|
load_balancer_mock.id,
|
||||||
|
constants.SERVER_GROUP_ID:
|
||||||
|
load_balancer_mock.server_group_id,
|
||||||
|
constants.FLAVOR: expected_flavor,
|
||||||
|
constants.AVAILABILITY_ZONE: {}}
|
||||||
|
|
||||||
|
cw = controller_worker.ControllerWorker()
|
||||||
|
cw.failover_loadbalancer(LB_ID)
|
||||||
|
|
||||||
|
mock_lb_repo_get.assert_called_once_with(_db_session, id=LB_ID)
|
||||||
|
mock_get_amps_for_failover.assert_called_once_with(load_balancer_mock)
|
||||||
|
mock_get_failover_lb_flow.assert_called_once_with(
|
||||||
|
[_amphora_mock, _amphora_mock], load_balancer_mock)
|
||||||
|
mock_taskflow_load.assert_called_once_with(FAKE_FLOW,
|
||||||
|
store=expected_flow_store)
|
||||||
|
_flow_mock.run.assert_called_once_with()
|
||||||
|
|
||||||
|
@mock.patch('octavia.db.repositories.LoadBalancerRepository.update')
|
||||||
|
def test_failover_loadbalancer_no_lb(self,
|
||||||
|
mock_lb_repo_update,
|
||||||
|
mock_api_get_session,
|
||||||
|
mock_dyn_log_listener,
|
||||||
|
mock_taskflow_load,
|
||||||
|
mock_pool_repo_get,
|
||||||
|
mock_member_repo_get,
|
||||||
|
mock_l7rule_repo_get,
|
||||||
|
mock_l7policy_repo_get,
|
||||||
|
mock_listener_repo_get,
|
||||||
|
mock_lb_repo_get,
|
||||||
|
mock_health_mon_repo_get,
|
||||||
|
mock_amp_repo_get):
|
||||||
|
mock_lb_repo_get.return_value = None
|
||||||
|
|
||||||
|
cw = controller_worker.ControllerWorker()
|
||||||
|
cw.failover_loadbalancer(LB_ID)
|
||||||
|
|
||||||
|
mock_lb_repo_update.assert_called_once_with(
|
||||||
|
_db_session, LB_ID, provisioning_status=constants.ERROR)
|
||||||
|
|
||||||
|
@mock.patch('octavia.db.repositories.LoadBalancerRepository.update')
|
||||||
|
@mock.patch('octavia.controller.worker.v1.controller_worker.'
|
||||||
|
'ControllerWorker._get_amphorae_for_failover')
|
||||||
|
def test_failover_loadbalancer_with_bogus_topology(
|
||||||
|
self, mock_get_amps_for_failover, mock_lb_repo_update,
|
||||||
|
mock_api_get_session, mock_dyn_log_listener, mock_taskflow_load,
|
||||||
|
mock_pool_repo_get, mock_member_repo_get, mock_l7rule_repo_get,
|
||||||
|
mock_l7policy_repo_get, mock_listener_repo_get, mock_lb_repo_get,
|
||||||
|
mock_health_mon_repo_get, mock_amp_repo_get):
|
||||||
|
_flow_mock.reset_mock()
|
||||||
|
load_balancer_mock = mock.MagicMock()
|
||||||
|
load_balancer_mock.topology = 'bogus'
|
||||||
|
mock_lb_repo_get.return_value = load_balancer_mock
|
||||||
|
mock_get_amps_for_failover.return_value = [_amphora_mock]
|
||||||
|
|
||||||
|
cw = controller_worker.ControllerWorker()
|
||||||
|
result = cw.failover_loadbalancer(LB_ID)
|
||||||
|
|
||||||
|
self.assertIsNone(result)
|
||||||
|
mock_lb_repo_update.assert_called_once_with(
|
||||||
|
_db_session, LB_ID, provisioning_status=constants.ERROR)
|
||||||
|
mock_lb_repo_get.assert_called_once_with(_db_session, id=LB_ID)
|
||||||
|
mock_get_amps_for_failover.assert_called_once_with(load_balancer_mock)
|
||||||
|
|
||||||
|
@mock.patch('octavia.db.repositories.AvailabilityZoneRepository.'
|
||||||
|
'get_availability_zone_metadata_dict', return_value={})
|
||||||
|
@mock.patch('octavia.controller.worker.v1.flows.load_balancer_flows.'
|
||||||
|
'LoadBalancerFlows.get_failover_LB_flow')
|
||||||
|
@mock.patch('octavia.controller.worker.v1.controller_worker.'
|
||||||
|
'ControllerWorker._get_amphorae_for_failover')
|
||||||
|
def test_failover_loadbalancer_with_az(self,
|
||||||
|
mock_get_amps_for_failover,
|
||||||
|
mock_get_failover_lb_flow,
|
||||||
|
mock_get_az_meta,
|
||||||
|
mock_api_get_session,
|
||||||
|
mock_dyn_log_listener,
|
||||||
|
mock_taskflow_load,
|
||||||
|
mock_pool_repo_get,
|
||||||
|
mock_member_repo_get,
|
||||||
|
mock_l7rule_repo_get,
|
||||||
|
mock_l7policy_repo_get,
|
||||||
|
mock_listener_repo_get,
|
||||||
|
mock_lb_repo_get,
|
||||||
|
mock_health_mon_repo_get,
|
||||||
|
mock_amp_repo_get):
|
||||||
|
FAKE_FLOW = 'FAKE_FLOW'
|
||||||
|
_flow_mock.reset_mock()
|
||||||
|
load_balancer_mock = mock.MagicMock()
|
||||||
|
load_balancer_mock.listeners = [_listener_mock]
|
||||||
|
load_balancer_mock.topology = constants.TOPOLOGY_ACTIVE_STANDBY
|
||||||
|
load_balancer_mock.flavor_id = None
|
||||||
|
load_balancer_mock.availability_zone = uuidutils.generate_uuid()
|
||||||
|
load_balancer_mock.vip = _vip_mock
|
||||||
|
mock_lb_repo_get.return_value = load_balancer_mock
|
||||||
|
mock_get_amps_for_failover.return_value = [_amphora_mock]
|
||||||
|
mock_get_failover_lb_flow.return_value = FAKE_FLOW
|
||||||
|
mock_get_az_meta.return_value = {'planet': 'jupiter'}
|
||||||
|
|
||||||
|
expected_flavor = {constants.LOADBALANCER_TOPOLOGY:
|
||||||
|
load_balancer_mock.topology}
|
||||||
|
expected_flow_store = {constants.LOADBALANCER: load_balancer_mock,
|
||||||
|
constants.BUILD_TYPE_PRIORITY:
|
||||||
|
constants.LB_CREATE_FAILOVER_PRIORITY,
|
||||||
|
constants.LOADBALANCER_ID:
|
||||||
|
load_balancer_mock.id,
|
||||||
|
constants.FLAVOR: expected_flavor,
|
||||||
|
constants.SERVER_GROUP_ID:
|
||||||
|
load_balancer_mock.server_group_id,
|
||||||
|
constants.AVAILABILITY_ZONE: {
|
||||||
|
'planet': 'jupiter'}}
|
||||||
|
|
||||||
|
cw = controller_worker.ControllerWorker()
|
||||||
|
cw.failover_loadbalancer(LB_ID)
|
||||||
|
|
||||||
|
mock_lb_repo_get.assert_called_once_with(_db_session, id=LB_ID)
|
||||||
|
mock_get_amps_for_failover.assert_called_once_with(load_balancer_mock)
|
||||||
|
mock_get_failover_lb_flow.assert_called_once_with([_amphora_mock],
|
||||||
|
load_balancer_mock)
|
||||||
|
mock_taskflow_load.assert_called_once_with(FAKE_FLOW,
|
||||||
|
store=expected_flow_store)
|
||||||
|
_flow_mock.run.assert_called_once_with()
|
||||||
|
|
||||||
|
@mock.patch('octavia.db.repositories.FlavorRepository.'
|
||||||
|
'get_flavor_metadata_dict', return_value={'taste': 'spicy'})
|
||||||
|
@mock.patch('octavia.controller.worker.v1.flows.load_balancer_flows.'
|
||||||
|
'LoadBalancerFlows.get_failover_LB_flow')
|
||||||
|
@mock.patch('octavia.controller.worker.v1.controller_worker.'
|
||||||
|
'ControllerWorker._get_amphorae_for_failover')
|
||||||
|
def test_failover_loadbalancer_with_flavor(self,
|
||||||
|
mock_get_amps_for_failover,
|
||||||
|
mock_get_failover_lb_flow,
|
||||||
|
mock_get_flavor_meta,
|
||||||
|
mock_api_get_session,
|
||||||
|
mock_dyn_log_listener,
|
||||||
|
mock_taskflow_load,
|
||||||
|
mock_pool_repo_get,
|
||||||
|
mock_member_repo_get,
|
||||||
|
mock_l7rule_repo_get,
|
||||||
|
mock_l7policy_repo_get,
|
||||||
|
mock_listener_repo_get,
|
||||||
|
mock_lb_repo_get,
|
||||||
|
mock_health_mon_repo_get,
|
||||||
|
mock_amp_repo_get):
|
||||||
|
FAKE_FLOW = 'FAKE_FLOW'
|
||||||
|
_flow_mock.reset_mock()
|
||||||
|
load_balancer_mock = mock.MagicMock()
|
||||||
|
load_balancer_mock.listeners = [_listener_mock]
|
||||||
|
load_balancer_mock.topology = constants.TOPOLOGY_SINGLE
|
||||||
|
load_balancer_mock.flavor_id = uuidutils.generate_uuid()
|
||||||
|
load_balancer_mock.availability_zone = None
|
||||||
|
load_balancer_mock.vip = _vip_mock
|
||||||
|
mock_lb_repo_get.return_value = load_balancer_mock
|
||||||
|
mock_get_amps_for_failover.return_value = [_amphora_mock,
|
||||||
|
_amphora_mock]
|
||||||
|
mock_get_failover_lb_flow.return_value = FAKE_FLOW
|
||||||
|
|
||||||
|
expected_flavor = {'taste': 'spicy', constants.LOADBALANCER_TOPOLOGY:
|
||||||
|
load_balancer_mock.topology}
|
||||||
|
expected_flow_store = {constants.LOADBALANCER: load_balancer_mock,
|
||||||
|
constants.BUILD_TYPE_PRIORITY:
|
||||||
|
constants.LB_CREATE_FAILOVER_PRIORITY,
|
||||||
|
constants.LOADBALANCER_ID:
|
||||||
|
load_balancer_mock.id,
|
||||||
|
constants.FLAVOR: expected_flavor,
|
||||||
|
constants.SERVER_GROUP_ID:
|
||||||
|
load_balancer_mock.server_group_id,
|
||||||
|
constants.AVAILABILITY_ZONE: {}}
|
||||||
|
|
||||||
|
cw = controller_worker.ControllerWorker()
|
||||||
|
cw.failover_loadbalancer(LB_ID)
|
||||||
|
|
||||||
|
mock_lb_repo_get.assert_called_once_with(_db_session, id=LB_ID)
|
||||||
|
mock_get_amps_for_failover.assert_called_once_with(load_balancer_mock)
|
||||||
|
mock_get_failover_lb_flow.assert_called_once_with(
|
||||||
|
[_amphora_mock, _amphora_mock], load_balancer_mock)
|
||||||
|
mock_taskflow_load.assert_called_once_with(FAKE_FLOW,
|
||||||
|
store=expected_flow_store)
|
||||||
|
_flow_mock.run.assert_called_once_with()
|
||||||
|
|
||||||
@mock.patch('octavia.db.repositories.AvailabilityZoneRepository.'
|
@mock.patch('octavia.db.repositories.AvailabilityZoneRepository.'
|
||||||
'get_availability_zone_metadata_dict', return_value={})
|
'get_availability_zone_metadata_dict', return_value={})
|
||||||
@mock.patch('octavia.db.repositories.FlavorRepository.'
|
@mock.patch('octavia.db.repositories.FlavorRepository.'
|
||||||
'get_flavor_metadata_dict', return_value={})
|
'get_flavor_metadata_dict', return_value={})
|
||||||
@mock.patch('octavia.controller.worker.v1.flows.'
|
@mock.patch('octavia.controller.worker.v1.flows.'
|
||||||
'amphora_flows.AmphoraFlows.get_failover_flow',
|
'amphora_flows.AmphoraFlows.get_failover_amphora_flow',
|
||||||
return_value=_flow_mock)
|
return_value=_flow_mock)
|
||||||
@mock.patch(
|
@mock.patch(
|
||||||
'octavia.db.repositories.AmphoraRepository.get_lb_for_amphora',
|
'octavia.db.repositories.AmphoraRepository.get_lb_for_amphora',
|
||||||
return_value=_load_balancer_mock)
|
return_value=_load_balancer_mock)
|
||||||
@mock.patch('octavia.db.repositories.LoadBalancerRepository.update')
|
|
||||||
def test_failover_amphora_anti_affinity(self,
|
def test_failover_amphora_anti_affinity(self,
|
||||||
mock_update,
|
|
||||||
mock_get_lb_for_amphora,
|
mock_get_lb_for_amphora,
|
||||||
mock_get_update_listener_flow,
|
mock_get_update_listener_flow,
|
||||||
mock_get_flavor_meta,
|
mock_get_flavor_meta,
|
||||||
@ -1486,19 +2038,19 @@ class TestControllerWorker(base.TestCase):
|
|||||||
(base_taskflow.BaseTaskFlowEngine._taskflow_load.
|
(base_taskflow.BaseTaskFlowEngine._taskflow_load.
|
||||||
assert_called_once_with(
|
assert_called_once_with(
|
||||||
_flow_mock,
|
_flow_mock,
|
||||||
store={constants.FAILED_AMPHORA: _amphora_mock,
|
store={constants.LOADBALANCER_ID: _load_balancer_mock.id,
|
||||||
constants.LOADBALANCER_ID:
|
|
||||||
_amphora_mock.load_balancer_id,
|
|
||||||
constants.BUILD_TYPE_PRIORITY:
|
constants.BUILD_TYPE_PRIORITY:
|
||||||
constants.LB_CREATE_FAILOVER_PRIORITY,
|
constants.LB_CREATE_FAILOVER_PRIORITY,
|
||||||
constants.SERVER_GROUP_ID: "123",
|
constants.FLAVOR: {'loadbalancer_topology':
|
||||||
constants.FLAVOR: {},
|
_load_balancer_mock.topology},
|
||||||
constants.AVAILABILITY_ZONE: {}
|
constants.AVAILABILITY_ZONE: {},
|
||||||
|
constants.LOADBALANCER: _load_balancer_mock,
|
||||||
|
constants.VIP: _load_balancer_mock.vip,
|
||||||
|
constants.SERVER_GROUP_ID:
|
||||||
|
_load_balancer_mock.server_group_id
|
||||||
}))
|
}))
|
||||||
|
|
||||||
_flow_mock.run.assert_called_once_with()
|
_flow_mock.run.assert_called_once_with()
|
||||||
mock_update.assert_called_with(_db_session, LB_ID,
|
|
||||||
provisioning_status=constants.ACTIVE)
|
|
||||||
|
|
||||||
@mock.patch('octavia.controller.worker.v1.flows.'
|
@mock.patch('octavia.controller.worker.v1.flows.'
|
||||||
'amphora_flows.AmphoraFlows.cert_rotate_amphora_flow',
|
'amphora_flows.AmphoraFlows.cert_rotate_amphora_flow',
|
||||||
|
@ -535,8 +535,9 @@ class TestAmphoraDriverTasks(base.TestCase):
|
|||||||
amphora_update_vrrp_interface_obj = (
|
amphora_update_vrrp_interface_obj = (
|
||||||
amphora_driver_tasks.AmphoraUpdateVRRPInterface())
|
amphora_driver_tasks.AmphoraUpdateVRRPInterface())
|
||||||
amphora_update_vrrp_interface_obj.execute(_LB_mock)
|
amphora_update_vrrp_interface_obj.execute(_LB_mock)
|
||||||
mock_driver.get_vrrp_interface.assert_called_once_with(
|
mock_driver.get_interface_from_ip.assert_called_once_with(
|
||||||
_db_amphora_mock, timeout_dict=timeout_dict)
|
_db_amphora_mock, _db_amphora_mock.vrrp_ip,
|
||||||
|
timeout_dict=timeout_dict)
|
||||||
|
|
||||||
# Test revert
|
# Test revert
|
||||||
mock_driver.reset_mock()
|
mock_driver.reset_mock()
|
||||||
|
@ -341,15 +341,65 @@ class TestAllowedAddressPairsDriver(base.TestCase):
|
|||||||
for amp in amps:
|
for amp in amps:
|
||||||
mock_plug_aap.assert_any_call(lb, lb.vip, amp, subnet)
|
mock_plug_aap.assert_any_call(lb, lb.vip, amp, subnet)
|
||||||
|
|
||||||
def test_update_vip_sg(self):
|
@mock.patch('octavia.common.utils.get_vip_security_group_name')
|
||||||
lb = dmh.generate_load_balancer_tree()
|
def test_update_vip_sg(self, mock_get_sg_name):
|
||||||
list_security_groups = self.driver.neutron_client.list_security_groups
|
LB_ID = uuidutils.generate_uuid()
|
||||||
list_security_groups.return_value = {
|
SG_ID = uuidutils.generate_uuid()
|
||||||
'security_groups': [
|
VIP_PORT_ID = uuidutils.generate_uuid()
|
||||||
{'id': 'lb-sec-grp1'}
|
TEST_SG_NAME = 'test_SG_name'
|
||||||
]
|
lb_mock = mock.MagicMock()
|
||||||
}
|
lb_mock.id = LB_ID
|
||||||
self.driver.update_vip_sg(lb, lb.vip)
|
vip_mock = mock.MagicMock()
|
||||||
|
vip_mock.port_id = VIP_PORT_ID
|
||||||
|
security_group_dict = {'id': SG_ID}
|
||||||
|
mock_get_sg_name.return_value = TEST_SG_NAME
|
||||||
|
|
||||||
|
test_driver = allowed_address_pairs.AllowedAddressPairsDriver()
|
||||||
|
|
||||||
|
test_driver._add_vip_security_group_to_port = mock.MagicMock()
|
||||||
|
test_driver._create_security_group = mock.MagicMock()
|
||||||
|
test_driver._get_lb_security_group = mock.MagicMock()
|
||||||
|
test_driver._update_security_group_rules = mock.MagicMock()
|
||||||
|
test_driver._get_lb_security_group.side_effect = [security_group_dict,
|
||||||
|
None]
|
||||||
|
test_driver._create_security_group.return_value = security_group_dict
|
||||||
|
|
||||||
|
# Test security groups disabled
|
||||||
|
test_driver.sec_grp_enabled = False
|
||||||
|
|
||||||
|
result = test_driver.update_vip_sg(lb_mock, vip_mock)
|
||||||
|
|
||||||
|
self.assertIsNone(result)
|
||||||
|
test_driver._add_vip_security_group_to_port.assert_not_called()
|
||||||
|
test_driver._get_lb_security_group.assert_not_called()
|
||||||
|
test_driver._update_security_group_rules.assert_not_called()
|
||||||
|
|
||||||
|
# Test by security group ID
|
||||||
|
test_driver.sec_grp_enabled = True
|
||||||
|
|
||||||
|
result = test_driver.update_vip_sg(lb_mock, vip_mock)
|
||||||
|
|
||||||
|
self.assertEqual(SG_ID, result)
|
||||||
|
test_driver._update_security_group_rules.assert_called_once_with(
|
||||||
|
lb_mock, SG_ID)
|
||||||
|
test_driver._add_vip_security_group_to_port.assert_called_once_with(
|
||||||
|
LB_ID, VIP_PORT_ID, SG_ID)
|
||||||
|
|
||||||
|
# Test by security group name
|
||||||
|
test_driver._add_vip_security_group_to_port.reset_mock()
|
||||||
|
test_driver._get_lb_security_group.reset_mock()
|
||||||
|
test_driver._update_security_group_rules.reset_mock()
|
||||||
|
|
||||||
|
result = test_driver.update_vip_sg(lb_mock, vip_mock)
|
||||||
|
|
||||||
|
self.assertEqual(SG_ID, result)
|
||||||
|
mock_get_sg_name.assert_called_once_with(LB_ID)
|
||||||
|
test_driver._create_security_group.assert_called_once_with(
|
||||||
|
TEST_SG_NAME)
|
||||||
|
test_driver._update_security_group_rules.assert_called_once_with(
|
||||||
|
lb_mock, SG_ID)
|
||||||
|
test_driver._add_vip_security_group_to_port.assert_called_once_with(
|
||||||
|
LB_ID, VIP_PORT_ID, SG_ID)
|
||||||
|
|
||||||
def test_plug_aap_port(self):
|
def test_plug_aap_port(self):
|
||||||
lb = dmh.generate_load_balancer_tree()
|
lb = dmh.generate_load_balancer_tree()
|
||||||
@ -452,12 +502,38 @@ class TestAllowedAddressPairsDriver(base.TestCase):
|
|||||||
t_constants.MOCK_VRRP_IP2])
|
t_constants.MOCK_VRRP_IP2])
|
||||||
self.assertEqual(lb.vip.ip_address, amp.ha_ip)
|
self.assertEqual(lb.vip.ip_address, amp.ha_ip)
|
||||||
|
|
||||||
|
def test_validate_fixed_ip(self):
|
||||||
|
IP_ADDRESS = '203.0.113.61'
|
||||||
|
OTHER_IP_ADDRESS = '203.0.113.62'
|
||||||
|
SUBNET_ID = uuidutils.generate_uuid()
|
||||||
|
OTHER_SUBNET_ID = uuidutils.generate_uuid()
|
||||||
|
fixed_ip_mock = mock.MagicMock()
|
||||||
|
fixed_ip_mock.subnet_id = SUBNET_ID
|
||||||
|
fixed_ip_mock.ip_address = IP_ADDRESS
|
||||||
|
|
||||||
|
# valid
|
||||||
|
result = self.driver._validate_fixed_ip([fixed_ip_mock], SUBNET_ID,
|
||||||
|
IP_ADDRESS)
|
||||||
|
self.assertTrue(result)
|
||||||
|
|
||||||
|
# no subnet match
|
||||||
|
result = self.driver._validate_fixed_ip(
|
||||||
|
[fixed_ip_mock], OTHER_SUBNET_ID, IP_ADDRESS)
|
||||||
|
self.assertFalse(result)
|
||||||
|
|
||||||
|
# no IP match
|
||||||
|
result = self.driver._validate_fixed_ip([fixed_ip_mock], SUBNET_ID,
|
||||||
|
OTHER_IP_ADDRESS)
|
||||||
|
self.assertFalse(result)
|
||||||
|
|
||||||
def test_allocate_vip_when_port_already_provided(self):
|
def test_allocate_vip_when_port_already_provided(self):
|
||||||
show_port = self.driver.neutron_client.show_port
|
show_port = self.driver.neutron_client.show_port
|
||||||
show_port.return_value = t_constants.MOCK_NEUTRON_PORT
|
show_port.return_value = t_constants.MOCK_NEUTRON_PORT
|
||||||
fake_lb_vip = data_models.Vip(
|
fake_lb_vip = data_models.Vip(
|
||||||
port_id=t_constants.MOCK_PORT_ID,
|
port_id=t_constants.MOCK_PORT_ID,
|
||||||
subnet_id=t_constants.MOCK_SUBNET_ID)
|
subnet_id=t_constants.MOCK_SUBNET_ID,
|
||||||
|
network_id=t_constants.MOCK_NETWORK_ID,
|
||||||
|
ip_address=t_constants.MOCK_IP_ADDRESS)
|
||||||
fake_lb = data_models.LoadBalancer(id='1', vip=fake_lb_vip)
|
fake_lb = data_models.LoadBalancer(id='1', vip=fake_lb_vip)
|
||||||
vip = self.driver.allocate_vip(fake_lb)
|
vip = self.driver.allocate_vip(fake_lb)
|
||||||
self.assertIsInstance(vip, data_models.Vip)
|
self.assertIsInstance(vip, data_models.Vip)
|
||||||
@ -466,6 +542,108 @@ class TestAllowedAddressPairsDriver(base.TestCase):
|
|||||||
self.assertEqual(t_constants.MOCK_PORT_ID, vip.port_id)
|
self.assertEqual(t_constants.MOCK_PORT_ID, vip.port_id)
|
||||||
self.assertEqual(fake_lb.id, vip.load_balancer_id)
|
self.assertEqual(fake_lb.id, vip.load_balancer_id)
|
||||||
|
|
||||||
|
@mock.patch('octavia.network.drivers.neutron.base.BaseNeutronDriver.'
|
||||||
|
'_check_extension_enabled', return_value=True)
|
||||||
|
def test_allocate_vip_with_port_mismatch(self, mock_check_ext):
|
||||||
|
bad_existing_port = mock.MagicMock()
|
||||||
|
bad_existing_port.port_id = uuidutils.generate_uuid()
|
||||||
|
bad_existing_port.network_id = uuidutils.generate_uuid()
|
||||||
|
bad_existing_port.subnet_id = uuidutils.generate_uuid()
|
||||||
|
show_port = self.driver.neutron_client.show_port
|
||||||
|
show_port.return_value = bad_existing_port
|
||||||
|
port_create_dict = copy.deepcopy(t_constants.MOCK_NEUTRON_PORT)
|
||||||
|
port_create_dict['port']['device_owner'] = (
|
||||||
|
allowed_address_pairs.OCTAVIA_OWNER)
|
||||||
|
port_create_dict['port']['device_id'] = 'lb-1'
|
||||||
|
create_port = self.driver.neutron_client.create_port
|
||||||
|
create_port.return_value = port_create_dict
|
||||||
|
show_subnet = self.driver.neutron_client.show_subnet
|
||||||
|
show_subnet.return_value = {'subnet': {
|
||||||
|
'id': t_constants.MOCK_SUBNET_ID,
|
||||||
|
'network_id': t_constants.MOCK_NETWORK_ID
|
||||||
|
}}
|
||||||
|
fake_lb_vip = data_models.Vip(subnet_id=t_constants.MOCK_SUBNET_ID,
|
||||||
|
network_id=t_constants.MOCK_NETWORK_ID,
|
||||||
|
port_id=t_constants.MOCK_PORT_ID,
|
||||||
|
octavia_owned=True)
|
||||||
|
fake_lb = data_models.LoadBalancer(id='1', vip=fake_lb_vip,
|
||||||
|
project_id='test-project')
|
||||||
|
vip = self.driver.allocate_vip(fake_lb)
|
||||||
|
exp_create_port_call = {
|
||||||
|
'port': {
|
||||||
|
'name': 'octavia-lb-1',
|
||||||
|
'network_id': t_constants.MOCK_NETWORK_ID,
|
||||||
|
'device_id': 'lb-1',
|
||||||
|
'device_owner': allowed_address_pairs.OCTAVIA_OWNER,
|
||||||
|
'admin_state_up': False,
|
||||||
|
'project_id': 'test-project',
|
||||||
|
'fixed_ips': [{'subnet_id': t_constants.MOCK_SUBNET_ID}]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
self.driver.neutron_client.delete_port.assert_called_once_with(
|
||||||
|
t_constants.MOCK_PORT_ID)
|
||||||
|
create_port.assert_called_once_with(exp_create_port_call)
|
||||||
|
self.assertIsInstance(vip, data_models.Vip)
|
||||||
|
self.assertEqual(t_constants.MOCK_IP_ADDRESS, vip.ip_address)
|
||||||
|
self.assertEqual(t_constants.MOCK_SUBNET_ID, vip.subnet_id)
|
||||||
|
self.assertEqual(t_constants.MOCK_PORT_ID, vip.port_id)
|
||||||
|
self.assertEqual(fake_lb.id, vip.load_balancer_id)
|
||||||
|
|
||||||
|
@mock.patch('octavia.network.drivers.neutron.base.BaseNeutronDriver.'
|
||||||
|
'get_port', side_effect=network_base.PortNotFound)
|
||||||
|
@mock.patch('octavia.network.drivers.neutron.base.BaseNeutronDriver.'
|
||||||
|
'_check_extension_enabled', return_value=True)
|
||||||
|
def test_allocate_vip_when_port_not_found(self, mock_check_ext,
|
||||||
|
mock_get_port):
|
||||||
|
port_create_dict = copy.deepcopy(t_constants.MOCK_NEUTRON_PORT)
|
||||||
|
port_create_dict['port']['device_owner'] = (
|
||||||
|
allowed_address_pairs.OCTAVIA_OWNER)
|
||||||
|
port_create_dict['port']['device_id'] = 'lb-1'
|
||||||
|
create_port = self.driver.neutron_client.create_port
|
||||||
|
create_port.return_value = port_create_dict
|
||||||
|
show_subnet = self.driver.neutron_client.show_subnet
|
||||||
|
show_subnet.return_value = {'subnet': {
|
||||||
|
'id': t_constants.MOCK_SUBNET_ID,
|
||||||
|
'network_id': t_constants.MOCK_NETWORK_ID
|
||||||
|
}}
|
||||||
|
fake_lb_vip = data_models.Vip(subnet_id=t_constants.MOCK_SUBNET_ID,
|
||||||
|
network_id=t_constants.MOCK_NETWORK_ID,
|
||||||
|
port_id=t_constants.MOCK_PORT_ID)
|
||||||
|
fake_lb = data_models.LoadBalancer(id='1', vip=fake_lb_vip,
|
||||||
|
project_id='test-project')
|
||||||
|
vip = self.driver.allocate_vip(fake_lb)
|
||||||
|
exp_create_port_call = {
|
||||||
|
'port': {
|
||||||
|
'name': 'octavia-lb-1',
|
||||||
|
'network_id': t_constants.MOCK_NETWORK_ID,
|
||||||
|
'device_id': 'lb-1',
|
||||||
|
'device_owner': allowed_address_pairs.OCTAVIA_OWNER,
|
||||||
|
'admin_state_up': False,
|
||||||
|
'project_id': 'test-project',
|
||||||
|
'fixed_ips': [{'subnet_id': t_constants.MOCK_SUBNET_ID}]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
create_port.assert_called_once_with(exp_create_port_call)
|
||||||
|
self.assertIsInstance(vip, data_models.Vip)
|
||||||
|
self.assertEqual(t_constants.MOCK_IP_ADDRESS, vip.ip_address)
|
||||||
|
self.assertEqual(t_constants.MOCK_SUBNET_ID, vip.subnet_id)
|
||||||
|
self.assertEqual(t_constants.MOCK_PORT_ID, vip.port_id)
|
||||||
|
self.assertEqual(fake_lb.id, vip.load_balancer_id)
|
||||||
|
|
||||||
|
@mock.patch('octavia.network.drivers.neutron.base.BaseNeutronDriver.'
|
||||||
|
'get_port', side_effect=Exception('boom'))
|
||||||
|
@mock.patch('octavia.network.drivers.neutron.base.BaseNeutronDriver.'
|
||||||
|
'_check_extension_enabled', return_value=True)
|
||||||
|
def test_allocate_vip_unkown_exception(self, mock_check_ext,
|
||||||
|
mock_get_port):
|
||||||
|
fake_lb_vip = data_models.Vip(subnet_id=t_constants.MOCK_SUBNET_ID,
|
||||||
|
network_id=t_constants.MOCK_NETWORK_ID,
|
||||||
|
port_id=t_constants.MOCK_PORT_ID)
|
||||||
|
fake_lb = data_models.LoadBalancer(id='1', vip=fake_lb_vip,
|
||||||
|
project_id='test-project')
|
||||||
|
self.assertRaises(network_base.AllocateVIPException,
|
||||||
|
self.driver.allocate_vip, fake_lb)
|
||||||
|
|
||||||
def test_allocate_vip_when_port_creation_fails(self):
|
def test_allocate_vip_when_port_creation_fails(self):
|
||||||
fake_lb_vip = data_models.Vip(
|
fake_lb_vip = data_models.Vip(
|
||||||
subnet_id=t_constants.MOCK_SUBNET_ID)
|
subnet_id=t_constants.MOCK_SUBNET_ID)
|
||||||
@ -512,6 +690,77 @@ class TestAllowedAddressPairsDriver(base.TestCase):
|
|||||||
self.assertEqual(t_constants.MOCK_PORT_ID, vip.port_id)
|
self.assertEqual(t_constants.MOCK_PORT_ID, vip.port_id)
|
||||||
self.assertEqual(fake_lb.id, vip.load_balancer_id)
|
self.assertEqual(fake_lb.id, vip.load_balancer_id)
|
||||||
|
|
||||||
|
@mock.patch('octavia.network.drivers.neutron.base.BaseNeutronDriver.'
|
||||||
|
'_check_extension_enabled', return_value=True)
|
||||||
|
def test_allocate_vip_when_no_port_fixed_ip(self, mock_check_ext):
|
||||||
|
port_create_dict = copy.deepcopy(t_constants.MOCK_NEUTRON_PORT)
|
||||||
|
port_create_dict['port']['device_owner'] = (
|
||||||
|
allowed_address_pairs.OCTAVIA_OWNER)
|
||||||
|
port_create_dict['port']['device_id'] = 'lb-1'
|
||||||
|
create_port = self.driver.neutron_client.create_port
|
||||||
|
create_port.return_value = port_create_dict
|
||||||
|
show_subnet = self.driver.neutron_client.show_subnet
|
||||||
|
show_subnet.return_value = {'subnet': {
|
||||||
|
'id': t_constants.MOCK_SUBNET_ID,
|
||||||
|
'network_id': t_constants.MOCK_NETWORK_ID
|
||||||
|
}}
|
||||||
|
fake_lb_vip = data_models.Vip(subnet_id=t_constants.MOCK_SUBNET_ID,
|
||||||
|
network_id=t_constants.MOCK_NETWORK_ID,
|
||||||
|
ip_address=t_constants.MOCK_IP_ADDRESS)
|
||||||
|
fake_lb = data_models.LoadBalancer(id='1', vip=fake_lb_vip,
|
||||||
|
project_id='test-project')
|
||||||
|
vip = self.driver.allocate_vip(fake_lb)
|
||||||
|
exp_create_port_call = {
|
||||||
|
'port': {
|
||||||
|
'name': 'octavia-lb-1',
|
||||||
|
'network_id': t_constants.MOCK_NETWORK_ID,
|
||||||
|
'device_id': 'lb-1',
|
||||||
|
'device_owner': allowed_address_pairs.OCTAVIA_OWNER,
|
||||||
|
'admin_state_up': False,
|
||||||
|
'project_id': 'test-project',
|
||||||
|
'fixed_ips': [{'subnet_id': t_constants.MOCK_SUBNET_ID,
|
||||||
|
'ip_address': t_constants.MOCK_IP_ADDRESS}]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
create_port.assert_called_once_with(exp_create_port_call)
|
||||||
|
self.assertIsInstance(vip, data_models.Vip)
|
||||||
|
self.assertEqual(t_constants.MOCK_IP_ADDRESS, vip.ip_address)
|
||||||
|
self.assertEqual(t_constants.MOCK_SUBNET_ID, vip.subnet_id)
|
||||||
|
self.assertEqual(t_constants.MOCK_PORT_ID, vip.port_id)
|
||||||
|
self.assertEqual(fake_lb.id, vip.load_balancer_id)
|
||||||
|
|
||||||
|
@mock.patch('octavia.network.drivers.neutron.base.BaseNeutronDriver.'
|
||||||
|
'_check_extension_enabled', return_value=True)
|
||||||
|
def test_allocate_vip_when_no_port_no_fixed_ip(self, mock_check_ext):
|
||||||
|
port_create_dict = copy.deepcopy(t_constants.MOCK_NEUTRON_PORT)
|
||||||
|
port_create_dict['port']['device_owner'] = (
|
||||||
|
allowed_address_pairs.OCTAVIA_OWNER)
|
||||||
|
port_create_dict['port']['device_id'] = 'lb-1'
|
||||||
|
create_port = self.driver.neutron_client.create_port
|
||||||
|
create_port.return_value = port_create_dict
|
||||||
|
show_subnet = self.driver.neutron_client.show_subnet
|
||||||
|
show_subnet.return_value = {'subnet': {
|
||||||
|
'id': t_constants.MOCK_SUBNET_ID,
|
||||||
|
'network_id': t_constants.MOCK_NETWORK_ID
|
||||||
|
}}
|
||||||
|
fake_lb_vip = data_models.Vip(network_id=t_constants.MOCK_NETWORK_ID)
|
||||||
|
fake_lb = data_models.LoadBalancer(id='1', vip=fake_lb_vip,
|
||||||
|
project_id='test-project')
|
||||||
|
vip = self.driver.allocate_vip(fake_lb)
|
||||||
|
exp_create_port_call = {
|
||||||
|
'port': {
|
||||||
|
'name': 'octavia-lb-1',
|
||||||
|
'network_id': t_constants.MOCK_NETWORK_ID,
|
||||||
|
'device_id': 'lb-1',
|
||||||
|
'device_owner': allowed_address_pairs.OCTAVIA_OWNER,
|
||||||
|
'admin_state_up': False,
|
||||||
|
'project_id': 'test-project'}
|
||||||
|
}
|
||||||
|
create_port.assert_called_once_with(exp_create_port_call)
|
||||||
|
self.assertIsInstance(vip, data_models.Vip)
|
||||||
|
self.assertEqual(t_constants.MOCK_PORT_ID, vip.port_id)
|
||||||
|
self.assertEqual(fake_lb.id, vip.load_balancer_id)
|
||||||
|
|
||||||
@mock.patch('octavia.network.drivers.neutron.base.BaseNeutronDriver.'
|
@mock.patch('octavia.network.drivers.neutron.base.BaseNeutronDriver.'
|
||||||
'_check_extension_enabled', return_value=False)
|
'_check_extension_enabled', return_value=False)
|
||||||
def test_allocate_vip_when_no_port_provided_tenant(self, mock_check_ext):
|
def test_allocate_vip_when_no_port_provided_tenant(self, mock_check_ext):
|
||||||
@ -626,8 +875,8 @@ class TestAllowedAddressPairsDriver(base.TestCase):
|
|||||||
def test_plug_network_when_compute_instance_cant_be_found(self):
|
def test_plug_network_when_compute_instance_cant_be_found(self):
|
||||||
net_id = t_constants.MOCK_NOVA_INTERFACE.net_id
|
net_id = t_constants.MOCK_NOVA_INTERFACE.net_id
|
||||||
network_attach = self.driver.compute.attach_network_or_port
|
network_attach = self.driver.compute.attach_network_or_port
|
||||||
network_attach.side_effect = nova_exceptions.NotFound(
|
network_attach.side_effect = exceptions.NotFound(
|
||||||
404, message='Instance not found')
|
resource='Instance not found', id=1)
|
||||||
self.assertRaises(network_base.AmphoraNotFound,
|
self.assertRaises(network_base.AmphoraNotFound,
|
||||||
self.driver.plug_network,
|
self.driver.plug_network,
|
||||||
t_constants.MOCK_COMPUTE_ID, net_id)
|
t_constants.MOCK_COMPUTE_ID, net_id)
|
||||||
@ -962,20 +1211,20 @@ class TestAllowedAddressPairsDriver(base.TestCase):
|
|||||||
port_id=self.PORT_ID)
|
port_id=self.PORT_ID)
|
||||||
|
|
||||||
# NotFound cases
|
# NotFound cases
|
||||||
network_attach.side_effect = nova_exceptions.NotFound(
|
network_attach.side_effect = exceptions.NotFound(
|
||||||
1, message='Instance')
|
resource='Instance', id=1)
|
||||||
self.assertRaises(network_base.AmphoraNotFound,
|
self.assertRaises(network_base.AmphoraNotFound,
|
||||||
self.driver.plug_port,
|
self.driver.plug_port,
|
||||||
amphora,
|
amphora,
|
||||||
port)
|
port)
|
||||||
network_attach.side_effect = nova_exceptions.NotFound(
|
network_attach.side_effect = exceptions.NotFound(
|
||||||
1, message='Network')
|
resource='Network', id=1)
|
||||||
self.assertRaises(network_base.NetworkNotFound,
|
self.assertRaises(network_base.NetworkNotFound,
|
||||||
self.driver.plug_port,
|
self.driver.plug_port,
|
||||||
amphora,
|
amphora,
|
||||||
port)
|
port)
|
||||||
network_attach.side_effect = nova_exceptions.NotFound(
|
network_attach.side_effect = exceptions.NotFound(
|
||||||
1, message='bogus')
|
resource='bogus', id=1)
|
||||||
self.assertRaises(network_base.PlugNetworkException,
|
self.assertRaises(network_base.PlugNetworkException,
|
||||||
self.driver.plug_port,
|
self.driver.plug_port,
|
||||||
amphora,
|
amphora,
|
||||||
@ -1100,3 +1349,157 @@ class TestAllowedAddressPairsDriver(base.TestCase):
|
|||||||
self.assertRaises(network_base.TimeoutException,
|
self.assertRaises(network_base.TimeoutException,
|
||||||
self.driver.wait_for_port_detach,
|
self.driver.wait_for_port_detach,
|
||||||
amphora)
|
amphora)
|
||||||
|
|
||||||
|
def test_delete_port(self):
|
||||||
|
PORT_ID = uuidutils.generate_uuid()
|
||||||
|
|
||||||
|
self.driver.neutron_client.delete_port.side_effect = [
|
||||||
|
mock.DEFAULT, neutron_exceptions.NotFound, Exception('boom')]
|
||||||
|
|
||||||
|
# Test successful delete
|
||||||
|
self.driver.delete_port(PORT_ID)
|
||||||
|
|
||||||
|
self.driver.neutron_client.delete_port.assert_called_once_with(PORT_ID)
|
||||||
|
|
||||||
|
# Test port NotFound (does not raise)
|
||||||
|
self.driver.delete_port(PORT_ID)
|
||||||
|
|
||||||
|
# Test unknown exception
|
||||||
|
self.assertRaises(exceptions.NetworkServiceError,
|
||||||
|
self.driver.delete_port, PORT_ID)
|
||||||
|
|
||||||
|
def test_set_port_admin_state_up(self):
|
||||||
|
PORT_ID = uuidutils.generate_uuid()
|
||||||
|
TEST_STATE = 'test state'
|
||||||
|
|
||||||
|
self.driver.neutron_client.update_port.side_effect = [
|
||||||
|
mock.DEFAULT, neutron_exceptions.NotFound, Exception('boom')]
|
||||||
|
|
||||||
|
# Test successful state set
|
||||||
|
self.driver.set_port_admin_state_up(PORT_ID, TEST_STATE)
|
||||||
|
|
||||||
|
self.driver.neutron_client.update_port.assert_called_once_with(
|
||||||
|
PORT_ID, {'port': {'admin_state_up': TEST_STATE}})
|
||||||
|
|
||||||
|
# Test port NotFound
|
||||||
|
self.assertRaises(network_base.PortNotFound,
|
||||||
|
self.driver.set_port_admin_state_up,
|
||||||
|
PORT_ID, {'port': {'admin_state_up': TEST_STATE}})
|
||||||
|
|
||||||
|
# Test unknown exception
|
||||||
|
self.assertRaises(exceptions.NetworkServiceError,
|
||||||
|
self.driver.set_port_admin_state_up, PORT_ID,
|
||||||
|
{'port': {'admin_state_up': TEST_STATE}})
|
||||||
|
|
||||||
|
def test_create_port(self):
|
||||||
|
ADMIN_STATE_UP = False
|
||||||
|
FAKE_NAME = 'fake_name'
|
||||||
|
IP_ADDRESS1 = '203.0.113.71'
|
||||||
|
IP_ADDRESS2 = '203.0.113.72'
|
||||||
|
IP_ADDRESS3 = '203.0.113.73'
|
||||||
|
NETWORK_ID = uuidutils.generate_uuid()
|
||||||
|
QOS_POLICY_ID = uuidutils.generate_uuid()
|
||||||
|
SECONDARY_IPS = [IP_ADDRESS2, IP_ADDRESS3]
|
||||||
|
SECURITY_GROUP_ID = uuidutils.generate_uuid()
|
||||||
|
SUBNET1_ID = uuidutils.generate_uuid()
|
||||||
|
FIXED_IPS = [{'subnet_id': SUBNET1_ID, 'ip_address': IP_ADDRESS1}]
|
||||||
|
|
||||||
|
MOCK_NEUTRON_PORT = {'port': {
|
||||||
|
'network_id': NETWORK_ID, 'device_id': t_constants.MOCK_DEVICE_ID,
|
||||||
|
'device_owner': t_constants.MOCK_DEVICE_OWNER,
|
||||||
|
'id': t_constants.MOCK_PORT_ID, 'name': FAKE_NAME,
|
||||||
|
'tenant_id': t_constants.MOCK_PROJECT_ID,
|
||||||
|
'admin_state_up': ADMIN_STATE_UP,
|
||||||
|
'status': t_constants.MOCK_STATUS,
|
||||||
|
'mac_address': t_constants.MOCK_MAC_ADDR,
|
||||||
|
'fixed_ips': [{'ip_address': IP_ADDRESS1,
|
||||||
|
'subnet_id': SUBNET1_ID}],
|
||||||
|
'security_groups': [],
|
||||||
|
'qos_policy_id': QOS_POLICY_ID}}
|
||||||
|
|
||||||
|
reference_port_dict = {'admin_state_up': ADMIN_STATE_UP,
|
||||||
|
'device_id': t_constants.MOCK_DEVICE_ID,
|
||||||
|
'device_owner': t_constants.MOCK_DEVICE_OWNER,
|
||||||
|
'fixed_ips': [],
|
||||||
|
'id': t_constants.MOCK_PORT_ID,
|
||||||
|
'mac_address': t_constants.MOCK_MAC_ADDR,
|
||||||
|
'name': FAKE_NAME,
|
||||||
|
'network': None,
|
||||||
|
'network_id': NETWORK_ID,
|
||||||
|
'project_id': t_constants.MOCK_PROJECT_ID,
|
||||||
|
'qos_policy_id': QOS_POLICY_ID,
|
||||||
|
'security_group_ids': [],
|
||||||
|
'status': t_constants.MOCK_STATUS}
|
||||||
|
|
||||||
|
self.driver.neutron_client.create_port.side_effect = [
|
||||||
|
MOCK_NEUTRON_PORT, MOCK_NEUTRON_PORT, Exception('boom')]
|
||||||
|
|
||||||
|
# Test successful path
|
||||||
|
result = self.driver.create_port(
|
||||||
|
NETWORK_ID, name=FAKE_NAME, fixed_ips=FIXED_IPS,
|
||||||
|
secondary_ips=SECONDARY_IPS,
|
||||||
|
security_group_ids=[SECURITY_GROUP_ID], admin_state_up=False,
|
||||||
|
qos_policy_id=QOS_POLICY_ID)
|
||||||
|
|
||||||
|
self.assertEqual(reference_port_dict, result.to_dict())
|
||||||
|
self.driver.neutron_client.create_port.assert_called_once_with(
|
||||||
|
{'port': {
|
||||||
|
'network_id': NETWORK_ID, 'admin_state_up': ADMIN_STATE_UP,
|
||||||
|
'device_owner': allowed_address_pairs.OCTAVIA_OWNER,
|
||||||
|
'allowed_address_pairs': [
|
||||||
|
{'ip_address': IP_ADDRESS2}, {'ip_address': IP_ADDRESS3}],
|
||||||
|
'fixed_ips': [{
|
||||||
|
'subnet_id': SUBNET1_ID, 'ip_address': IP_ADDRESS1}],
|
||||||
|
'name': FAKE_NAME, 'qos_policy_id': QOS_POLICY_ID,
|
||||||
|
'security_groups': [SECURITY_GROUP_ID]}})
|
||||||
|
|
||||||
|
# Test minimal successful path
|
||||||
|
result = self.driver.create_port(NETWORK_ID)
|
||||||
|
|
||||||
|
self.assertEqual(reference_port_dict, result.to_dict())
|
||||||
|
|
||||||
|
# Test exception
|
||||||
|
self.assertRaises(network_base.CreatePortException,
|
||||||
|
self.driver.create_port, NETWORK_ID, name=FAKE_NAME,
|
||||||
|
fixed_ips=FIXED_IPS, secondary_ips=SECONDARY_IPS,
|
||||||
|
security_group_ids=[SECURITY_GROUP_ID],
|
||||||
|
admin_state_up=False, qos_policy_id=QOS_POLICY_ID)
|
||||||
|
|
||||||
|
def test_get_security_group(self):
|
||||||
|
|
||||||
|
# Test the case of security groups disabled in neutron
|
||||||
|
FAKE_SG_NAME = 'Fake_SG_name'
|
||||||
|
FAKE_NEUTRON_SECURITY_GROUPS = {'security_groups': [
|
||||||
|
t_constants.MOCK_SECURITY_GROUP]}
|
||||||
|
reference_sg_dict = {'id': t_constants.MOCK_SECURITY_GROUP_ID,
|
||||||
|
'name': t_constants.MOCK_SECURITY_GROUP_NAME,
|
||||||
|
'description': '', 'tags': [],
|
||||||
|
'security_group_rule_ids': [],
|
||||||
|
'stateful': None,
|
||||||
|
'project_id': t_constants.MOCK_PROJECT_ID}
|
||||||
|
|
||||||
|
self.driver.neutron_client.list_security_groups.side_effect = [
|
||||||
|
FAKE_NEUTRON_SECURITY_GROUPS, None, Exception('boom')]
|
||||||
|
|
||||||
|
self.driver.sec_grp_enabled = False
|
||||||
|
result = self.driver.get_security_group(FAKE_SG_NAME)
|
||||||
|
|
||||||
|
self.assertIsNone(result)
|
||||||
|
self.driver.neutron_client.list_security_groups.assert_not_called()
|
||||||
|
|
||||||
|
# Test successful get of the security group
|
||||||
|
self.driver.sec_grp_enabled = True
|
||||||
|
|
||||||
|
result = self.driver.get_security_group(FAKE_SG_NAME)
|
||||||
|
|
||||||
|
self.assertEqual(reference_sg_dict, result.to_dict())
|
||||||
|
self.driver.neutron_client.list_security_groups.called_once_with(
|
||||||
|
name=FAKE_SG_NAME)
|
||||||
|
|
||||||
|
# Test no security groups returned
|
||||||
|
self.assertRaises(network_base.SecurityGroupNotFound,
|
||||||
|
self.driver.get_security_group, FAKE_SG_NAME)
|
||||||
|
|
||||||
|
# Test with an unknown exception
|
||||||
|
self.assertRaises(network_base.NetworkException,
|
||||||
|
self.driver.get_security_group, FAKE_SG_NAME)
|
||||||
|
@ -66,6 +66,7 @@ class TestNeutronUtils(base.TestCase):
|
|||||||
project_id=t_constants.MOCK_PROJECT_ID,
|
project_id=t_constants.MOCK_PROJECT_ID,
|
||||||
admin_state_up=t_constants.MOCK_ADMIN_STATE_UP,
|
admin_state_up=t_constants.MOCK_ADMIN_STATE_UP,
|
||||||
fixed_ips=[],
|
fixed_ips=[],
|
||||||
|
security_group_ids=[],
|
||||||
)
|
)
|
||||||
self._compare_ignore_value_none(model_obj.to_dict(), assert_dict)
|
self._compare_ignore_value_none(model_obj.to_dict(), assert_dict)
|
||||||
fixed_ips = t_constants.MOCK_NEUTRON_PORT['port']['fixed_ips']
|
fixed_ips = t_constants.MOCK_NEUTRON_PORT['port']['fixed_ips']
|
||||||
|
@ -16,6 +16,7 @@ from unittest import mock
|
|||||||
from oslo_utils import uuidutils
|
from oslo_utils import uuidutils
|
||||||
|
|
||||||
from octavia.db import models
|
from octavia.db import models
|
||||||
|
from octavia.network import data_models as network_models
|
||||||
from octavia.network.drivers.noop_driver import driver
|
from octavia.network.drivers.noop_driver import driver
|
||||||
import octavia.tests.unit.base as base
|
import octavia.tests.unit.base as base
|
||||||
|
|
||||||
@ -186,6 +187,14 @@ class TestNoopNetworkDriver(base.TestCase):
|
|||||||
self.device_id)]
|
self.device_id)]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def test_get_security_group(self):
|
||||||
|
FAKE_SG_NAME = 'fake_sg_name'
|
||||||
|
result = self.driver.get_security_group(FAKE_SG_NAME)
|
||||||
|
|
||||||
|
self.assertEqual((FAKE_SG_NAME, 'get_security_group'),
|
||||||
|
self.driver.driver.networkconfigconfig[FAKE_SG_NAME])
|
||||||
|
self.assertTrue(uuidutils.is_uuid_like(result.id))
|
||||||
|
|
||||||
def test_plug_port(self):
|
def test_plug_port(self):
|
||||||
self.driver.plug_port(self.amphora1, self.port)
|
self.driver.plug_port(self.amphora1, self.port)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
@ -237,3 +246,50 @@ class TestNoopNetworkDriver(base.TestCase):
|
|||||||
self.driver.driver.networkconfigconfig[self.amphora1.id,
|
self.driver.driver.networkconfigconfig[self.amphora1.id,
|
||||||
self.vip.ip_address]
|
self.vip.ip_address]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def test_delete_port(self):
|
||||||
|
PORT_ID = uuidutils.generate_uuid()
|
||||||
|
|
||||||
|
self.driver.delete_port(PORT_ID)
|
||||||
|
|
||||||
|
self.assertEqual((PORT_ID, 'delete_port'),
|
||||||
|
self.driver.driver.networkconfigconfig[PORT_ID])
|
||||||
|
|
||||||
|
def test_set_port_admin_state_up(self):
|
||||||
|
PORT_ID = uuidutils.generate_uuid()
|
||||||
|
|
||||||
|
self.driver.set_port_admin_state_up(PORT_ID, False)
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
(PORT_ID, False, 'admin_down_port'),
|
||||||
|
self.driver.driver.networkconfigconfig[(PORT_ID, False)])
|
||||||
|
|
||||||
|
def test_create_port(self):
|
||||||
|
FAKE_NAME = 'fake_name'
|
||||||
|
IP_ADDRESS = '2001:db8::77'
|
||||||
|
NETWORK_ID = uuidutils.generate_uuid()
|
||||||
|
QOS_POLICY_ID = uuidutils.generate_uuid()
|
||||||
|
SUBNET_ID = uuidutils.generate_uuid()
|
||||||
|
FIXED_IPS = [{'ip_address': IP_ADDRESS, 'subnet_id': SUBNET_ID},
|
||||||
|
{'subnet_id': SUBNET_ID}]
|
||||||
|
|
||||||
|
# Test minimum
|
||||||
|
result = self.driver.create_port(NETWORK_ID)
|
||||||
|
|
||||||
|
self.assertIsInstance(result, network_models.Port)
|
||||||
|
self.assertEqual(NETWORK_ID, result.network_id)
|
||||||
|
|
||||||
|
# Test full parameters
|
||||||
|
result = self.driver.create_port(
|
||||||
|
NETWORK_ID, name=FAKE_NAME, fixed_ips=FIXED_IPS,
|
||||||
|
admin_state_up=False, qos_policy_id=QOS_POLICY_ID)
|
||||||
|
|
||||||
|
self.assertIsInstance(result, network_models.Port)
|
||||||
|
self.assertEqual(NETWORK_ID, result.network_id)
|
||||||
|
self.assertEqual(FAKE_NAME, result.name)
|
||||||
|
self.assertEqual(IP_ADDRESS, result.fixed_ips[0].ip_address)
|
||||||
|
self.assertEqual(SUBNET_ID, result.fixed_ips[0].subnet_id)
|
||||||
|
self.assertEqual('198.51.100.56', result.fixed_ips[1].ip_address)
|
||||||
|
self.assertEqual(SUBNET_ID, result.fixed_ips[1].subnet_id)
|
||||||
|
self.assertEqual(QOS_POLICY_ID, result.qos_policy_id)
|
||||||
|
self.assertFalse(result.admin_state_up)
|
||||||
|
@ -0,0 +1,11 @@
|
|||||||
|
---
|
||||||
|
upgrade:
|
||||||
|
- |
|
||||||
|
The failover improvements do not require an updated amphora image,
|
||||||
|
but updating existing amphora will minimize the failover
|
||||||
|
outage time for standalone amphora on subsequent failovers.
|
||||||
|
fixes:
|
||||||
|
- |
|
||||||
|
Significantly improved the reliability and performance of amphora
|
||||||
|
and load balancer failovers. This is especially true when the
|
||||||
|
Nova service is experiencing failures.
|
@ -52,13 +52,12 @@ def generate(flow_list, output_directory):
|
|||||||
current_instance = current_class()
|
current_instance = current_class()
|
||||||
get_flow_method = getattr(current_instance, current_tuple[2])
|
get_flow_method = getattr(current_instance, current_tuple[2])
|
||||||
if (current_tuple[1] == 'AmphoraFlows' and
|
if (current_tuple[1] == 'AmphoraFlows' and
|
||||||
current_tuple[2] == 'get_failover_flow'):
|
current_tuple[2] == 'get_failover_amphora_flow'):
|
||||||
amp1 = dmh.generate_amphora()
|
amp1 = dmh.generate_amphora()
|
||||||
amp2 = dmh.generate_amphora()
|
amp2 = dmh.generate_amphora()
|
||||||
lb = dmh.generate_load_balancer(amphorae=[amp1, amp2])
|
lb = dmh.generate_load_balancer(amphorae=[amp1, amp2])
|
||||||
current_engine = engines.load(
|
current_engine = engines.load(
|
||||||
get_flow_method(role=constants.ROLE_STANDALONE,
|
get_flow_method(amp1, 2))
|
||||||
load_balancer=lb))
|
|
||||||
elif (current_tuple[1] == 'LoadBalancerFlows' and
|
elif (current_tuple[1] == 'LoadBalancerFlows' and
|
||||||
current_tuple[2] == 'get_create_load_balancer_flow'):
|
current_tuple[2] == 'get_create_load_balancer_flow'):
|
||||||
current_engine = engines.load(
|
current_engine = engines.load(
|
||||||
@ -74,6 +73,15 @@ def generate(flow_list, output_directory):
|
|||||||
lb = dmh.generate_load_balancer()
|
lb = dmh.generate_load_balancer()
|
||||||
delete_flow, store = get_flow_method(lb)
|
delete_flow, store = get_flow_method(lb)
|
||||||
current_engine = engines.load(delete_flow)
|
current_engine = engines.load(delete_flow)
|
||||||
|
elif (current_tuple[1] == 'LoadBalancerFlows' and
|
||||||
|
current_tuple[2] == 'get_failover_LB_flow'):
|
||||||
|
amp1 = dmh.generate_amphora()
|
||||||
|
amp2 = dmh.generate_amphora()
|
||||||
|
lb = dmh.generate_load_balancer(
|
||||||
|
amphorae=[amp1, amp2],
|
||||||
|
topology=constants.TOPOLOGY_ACTIVE_STANDBY)
|
||||||
|
current_engine = engines.load(
|
||||||
|
get_flow_method([amp1, amp2], lb))
|
||||||
elif (current_tuple[1] == 'MemberFlows' and
|
elif (current_tuple[1] == 'MemberFlows' and
|
||||||
current_tuple[2] == 'get_batch_update_members_flow'):
|
current_tuple[2] == 'get_batch_update_members_flow'):
|
||||||
current_engine = engines.load(
|
current_engine = engines.load(
|
||||||
|
@ -3,12 +3,13 @@
|
|||||||
# Format:
|
# Format:
|
||||||
# module class flow
|
# module class flow
|
||||||
octavia.controller.worker.v1.flows.amphora_flows AmphoraFlows get_create_amphora_flow
|
octavia.controller.worker.v1.flows.amphora_flows AmphoraFlows get_create_amphora_flow
|
||||||
octavia.controller.worker.v1.flows.amphora_flows AmphoraFlows get_failover_flow
|
octavia.controller.worker.v1.flows.amphora_flows AmphoraFlows get_failover_amphora_flow
|
||||||
octavia.controller.worker.v1.flows.amphora_flows AmphoraFlows cert_rotate_amphora_flow
|
octavia.controller.worker.v1.flows.amphora_flows AmphoraFlows cert_rotate_amphora_flow
|
||||||
octavia.controller.worker.v1.flows.load_balancer_flows LoadBalancerFlows get_create_load_balancer_flow
|
octavia.controller.worker.v1.flows.load_balancer_flows LoadBalancerFlows get_create_load_balancer_flow
|
||||||
octavia.controller.worker.v1.flows.load_balancer_flows LoadBalancerFlows get_delete_load_balancer_flow
|
octavia.controller.worker.v1.flows.load_balancer_flows LoadBalancerFlows get_delete_load_balancer_flow
|
||||||
octavia.controller.worker.v1.flows.load_balancer_flows LoadBalancerFlows get_cascade_delete_load_balancer_flow
|
octavia.controller.worker.v1.flows.load_balancer_flows LoadBalancerFlows get_cascade_delete_load_balancer_flow
|
||||||
octavia.controller.worker.v1.flows.load_balancer_flows LoadBalancerFlows get_update_load_balancer_flow
|
octavia.controller.worker.v1.flows.load_balancer_flows LoadBalancerFlows get_update_load_balancer_flow
|
||||||
|
octavia.controller.worker.v1.flows.load_balancer_flows LoadBalancerFlows get_failover_LB_flow
|
||||||
octavia.controller.worker.v1.flows.listener_flows ListenerFlows get_create_listener_flow
|
octavia.controller.worker.v1.flows.listener_flows ListenerFlows get_create_listener_flow
|
||||||
octavia.controller.worker.v1.flows.listener_flows ListenerFlows get_create_all_listeners_flow
|
octavia.controller.worker.v1.flows.listener_flows ListenerFlows get_create_all_listeners_flow
|
||||||
octavia.controller.worker.v1.flows.listener_flows ListenerFlows get_delete_listener_flow
|
octavia.controller.worker.v1.flows.listener_flows ListenerFlows get_delete_listener_flow
|
||||||
|
Loading…
x
Reference in New Issue
Block a user