Add UDP test scenario

Update test_healthmonitor_traffic & test_basic_traffic tests
to support UDP traffic in test_traffic_ops
Add simple UDP test in test_ipv6_traffic_ops

Add a UDP test server, merged with the existing HTTP test server.

Change-Id: I1e497b75672753ed0e7acf482bc0e4a6138d3437
This commit is contained in:
Gregory Thiemonge 2019-04-30 15:06:17 +02:00
parent 2c59777910
commit 29d179016a
12 changed files with 558 additions and 121 deletions

View File

@ -138,6 +138,7 @@ HEALTH_MONITOR_TCP = 'TCP'
HEALTH_MONITOR_HTTP = 'HTTP' HEALTH_MONITOR_HTTP = 'HTTP'
HEALTH_MONITOR_HTTPS = 'HTTPS' HEALTH_MONITOR_HTTPS = 'HTTPS'
HEALTH_MONITOR_TLS_HELLO = 'TLS-HELLO' HEALTH_MONITOR_TLS_HELLO = 'TLS-HELLO'
HEALTH_MONITOR_UDP_CONNECT = 'UDP-CONNECT'
# Session Persistence # Session Persistence
TYPE = 'type' TYPE = 'type'

View File

@ -1,6 +0,0 @@
To build a statically linked binary for httpd on Ubuntu (can run anywhere):
```sh
sudo apt-get install -y golang
go build -ldflags "-s -w -linkmode external -extldflags -static" -o httpd.bin httpd.go
```

View File

@ -0,0 +1,26 @@
====================
Amphorae test server
====================
test_server is a static application that simulates an HTTP and a UDP server.
Building
--------
To build a statically linked binary for test_server (can run anywhere):
Install dependencies for Ubuntu/Debian:
sudo apt-get install -y golang
Install dependencies for Centos (use golang 1.10 from go-toolset-7) and launch
a shell into the new environment:
sudo yum install -y centos-release-scl
sudo yum install -y go-toolset-7-golang-bin glibc-static openssl-static zlib-static
scl enable go-toolset-7 bash
Build the binary:
CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-s -w -extldflags -static' -o test_server.bin test_server.go

Binary file not shown.

View File

@ -4,6 +4,7 @@ import (
"flag" "flag"
"fmt" "fmt"
"io" "io"
"net"
"net/http" "net/http"
"sync" "sync"
"time" "time"
@ -88,20 +89,51 @@ func reset_handler(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "reset\n") fmt.Fprintf(w, "reset\n")
} }
func main() { func http_serve(port int, id string) {
portPtr := flag.Int("port", 8080, "TCP port to listen on")
idPtr := flag.String("id", "1", "Server ID")
flag.Parse()
resp = fmt.Sprintf("%s", *idPtr)
sess_cookie.Name = "JSESSIONID" sess_cookie.Name = "JSESSIONID"
sess_cookie.Value = *idPtr sess_cookie.Value = id
http.HandleFunc("/", root_handler) http.HandleFunc("/", root_handler)
http.HandleFunc("/slow", slow_handler) http.HandleFunc("/slow", slow_handler)
http.HandleFunc("/stats", stats_handler) http.HandleFunc("/stats", stats_handler)
http.HandleFunc("/reset", reset_handler) http.HandleFunc("/reset", reset_handler)
portStr := fmt.Sprintf(":%d", *portPtr) portStr := fmt.Sprintf(":%d", port)
http.ListenAndServe(portStr, nil) http.ListenAndServe(portStr, nil)
} }
func udp_serve(port int, id string) {
portStr := fmt.Sprintf("0.0.0.0:%d", port)
pc, err := net.ListenPacket("udp", portStr)
if err != nil {
fmt.Println(err)
return
}
buffer := make([]byte, 1500)
for {
_, addr, err := pc.ReadFrom(buffer)
if err != nil {
fmt.Println(err)
return
}
_, err = pc.WriteTo([]byte(resp), addr)
if err != nil {
fmt.Println(err)
return
}
}
}
func main() {
portPtr := flag.Int("port", 8080, "Port to listen on")
idPtr := flag.String("id", "1", "Server ID")
flag.Parse()
resp = fmt.Sprintf("%s", *idPtr)
go http_serve(*portPtr, *idPtr)
udp_serve(*portPtr, *idPtr)
}

View File

@ -63,6 +63,28 @@ class IPv6TrafficOperationsScenarioTest(
cls.lb_vip_address = lb[const.VIP_ADDRESS] cls.lb_vip_address = lb[const.VIP_ADDRESS]
# Per protocol listeners and pools IDs
cls.listener_ids = {}
cls.pool_ids = {}
cls.protocol = const.HTTP
lb_feature_enabled = CONF.loadbalancer_feature_enabled
if not lb_feature_enabled.l7_protocol_enabled:
cls.protocol = lb_feature_enabled.l4_protocol
# Don't use same ports for HTTP/l4_protocol and UDP because some
# releases (<=train) don't support it
cls._listener_pool_create(const.HTTP, 80)
cls._listener_pool_create(const.UDP, 8080)
@classmethod
def _listener_pool_create(cls, protocol, protocol_port):
if (protocol == const.UDP and
not cls.mem_listener_client.is_version_supported(
cls.api_version, '2.1')):
return
waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer, waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
cls.lb_id, const.PROVISIONING_STATUS, cls.lb_id, const.PROVISIONING_STATUS,
const.ACTIVE, const.ACTIVE,
@ -72,15 +94,19 @@ class IPv6TrafficOperationsScenarioTest(
listener_name = data_utils.rand_name("lb_member_listener1_ipv6_ops") listener_name = data_utils.rand_name("lb_member_listener1_ipv6_ops")
listener_kwargs = { listener_kwargs = {
const.NAME: listener_name, const.NAME: listener_name,
const.PROTOCOL: const.HTTP, const.PROTOCOL: protocol,
const.PROTOCOL_PORT: '80', const.PROTOCOL_PORT: protocol_port,
const.LOADBALANCER_ID: cls.lb_id, const.LOADBALANCER_ID: cls.lb_id,
# For branches that don't support multiple listeners in single
# haproxy process and use haproxy>=1.8:
const.CONNECTION_LIMIT: 200,
} }
listener = cls.mem_listener_client.create_listener(**listener_kwargs) listener = cls.mem_listener_client.create_listener(
cls.listener_id = listener[const.ID] **listener_kwargs)
cls.listener_ids[protocol] = listener[const.ID]
cls.addClassResourceCleanup( cls.addClassResourceCleanup(
cls.mem_listener_client.cleanup_listener, cls.mem_listener_client.cleanup_listener,
cls.listener_id, cls.listener_ids[protocol],
lb_client=cls.mem_lb_client, lb_id=cls.lb_id) lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer, waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
@ -92,15 +118,15 @@ class IPv6TrafficOperationsScenarioTest(
pool_name = data_utils.rand_name("lb_member_pool1_ipv6_ops") pool_name = data_utils.rand_name("lb_member_pool1_ipv6_ops")
pool_kwargs = { pool_kwargs = {
const.NAME: pool_name, const.NAME: pool_name,
const.PROTOCOL: const.HTTP, const.PROTOCOL: protocol,
const.LB_ALGORITHM: cls.lb_algorithm, const.LB_ALGORITHM: cls.lb_algorithm,
const.LISTENER_ID: cls.listener_id, const.LISTENER_ID: cls.listener_ids[protocol],
} }
pool = cls.mem_pool_client.create_pool(**pool_kwargs) pool = cls.mem_pool_client.create_pool(**pool_kwargs)
cls.pool_id = pool[const.ID] cls.pool_ids[protocol] = pool[const.ID]
cls.addClassResourceCleanup( cls.addClassResourceCleanup(
cls.mem_pool_client.cleanup_pool, cls.mem_pool_client.cleanup_pool,
cls.pool_id, cls.pool_ids[protocol],
lb_client=cls.mem_lb_client, lb_id=cls.lb_id) lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer, waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
@ -109,8 +135,8 @@ class IPv6TrafficOperationsScenarioTest(
CONF.load_balancer.build_interval, CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout) CONF.load_balancer.build_timeout)
@decorators.idempotent_id('219ac17d-c5c1-4e7e-a9d5-0764d7ce7746') def _test_ipv6_vip_mixed_ipv4_ipv6_members_traffic(self, protocol,
def test_ipv6_vip_mixed_ipv4_ipv6_members_traffic(self): protocol_port):
"""Tests traffic through a loadbalancer with IPv4 and IPv6 members. """Tests traffic through a loadbalancer with IPv4 and IPv6 members.
* Set up members on a loadbalancer. * Set up members on a loadbalancer.
@ -119,7 +145,7 @@ class IPv6TrafficOperationsScenarioTest(
# Set up Member 1 for Webserver 1 # Set up Member 1 for Webserver 1
member1_name = data_utils.rand_name("lb_member_member1-traffic") member1_name = data_utils.rand_name("lb_member_member1-traffic")
member1_kwargs = { member1_kwargs = {
const.POOL_ID: self.pool_id, const.POOL_ID: self.pool_ids[protocol],
const.NAME: member1_name, const.NAME: member1_name,
const.ADMIN_STATE_UP: True, const.ADMIN_STATE_UP: True,
const.ADDRESS: self.webserver1_ip, const.ADDRESS: self.webserver1_ip,
@ -132,7 +158,7 @@ class IPv6TrafficOperationsScenarioTest(
**member1_kwargs) **member1_kwargs)
self.addCleanup( self.addCleanup(
self.mem_member_client.cleanup_member, self.mem_member_client.cleanup_member,
member1[const.ID], pool_id=self.pool_id, member1[const.ID], pool_id=self.pool_ids[protocol],
lb_client=self.mem_lb_client, lb_id=self.lb_id) lb_client=self.mem_lb_client, lb_id=self.lb_id)
waiters.wait_for_status( waiters.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id, self.mem_lb_client.show_loadbalancer, self.lb_id,
@ -143,7 +169,7 @@ class IPv6TrafficOperationsScenarioTest(
# Set up Member 2 for Webserver 2 # Set up Member 2 for Webserver 2
member2_name = data_utils.rand_name("lb_member_member2-traffic") member2_name = data_utils.rand_name("lb_member_member2-traffic")
member2_kwargs = { member2_kwargs = {
const.POOL_ID: self.pool_id, const.POOL_ID: self.pool_ids[protocol],
const.NAME: member2_name, const.NAME: member2_name,
const.ADMIN_STATE_UP: True, const.ADMIN_STATE_UP: True,
const.ADDRESS: self.webserver2_ipv6, const.ADDRESS: self.webserver2_ipv6,
@ -157,7 +183,7 @@ class IPv6TrafficOperationsScenarioTest(
**member2_kwargs) **member2_kwargs)
self.addCleanup( self.addCleanup(
self.mem_member_client.cleanup_member, self.mem_member_client.cleanup_member,
member2[const.ID], pool_id=self.pool_id, member2[const.ID], pool_id=self.pool_ids[protocol],
lb_client=self.mem_lb_client, lb_id=self.lb_id) lb_client=self.mem_lb_client, lb_id=self.lb_id)
waiters.wait_for_status( waiters.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id, self.mem_lb_client.show_loadbalancer, self.lb_id,
@ -166,4 +192,101 @@ class IPv6TrafficOperationsScenarioTest(
CONF.load_balancer.check_timeout) CONF.load_balancer.check_timeout)
# Send some traffic # Send some traffic
self.check_members_balanced(self.lb_vip_address) self.check_members_balanced(self.lb_vip_address,
protocol_port=protocol_port,
protocol=protocol)
@decorators.idempotent_id('219ac17d-c5c1-4e7e-a9d5-0764d7ce7746')
def test_ipv6_vip_mixed_ipv4_ipv6_members_traffic(self):
self._test_ipv6_vip_mixed_ipv4_ipv6_members_traffic(self.protocol, 80)
@decorators.idempotent_id('c468434d-bc84-4bfa-825f-d4761daa0d76')
# Skipping test for amphora driver until "UDP load balancers cannot mix
# protocol versions" (https://storyboard.openstack.org/#!/story/2003329) is
# fixed
@decorators.skip_because(
bug='2003329',
bug_type='storyboard',
condition=CONF.load_balancer.provider in const.AMPHORA_PROVIDERS)
def test_ipv6_vip_mixed_ipv4_ipv6_members_udp_traffic(self):
if not self.mem_listener_client.is_version_supported(
self.api_version, '2.1'):
raise self.skipException('UDP listener support is only available '
'in Octavia API version 2.1 or newer')
self._test_ipv6_vip_mixed_ipv4_ipv6_members_traffic(const.UDP, 8080)
def _test_ipv6_vip_ipv6_members_traffic(self, protocol, protocol_port):
"""Tests traffic through a loadbalancer with IPv6 members.
* Set up members on a loadbalancer.
* Test traffic to ensure it is balanced properly.
"""
# Set up Member 1 for Webserver 1
member1_name = data_utils.rand_name("lb_member_member1-traffic")
member1_kwargs = {
const.POOL_ID: self.pool_ids[protocol],
const.NAME: member1_name,
const.ADMIN_STATE_UP: True,
const.ADDRESS: self.webserver1_ipv6,
const.PROTOCOL_PORT: 80,
}
if self.lb_member_1_ipv6_subnet:
member1_kwargs[const.SUBNET_ID] = (
self.lb_member_1_ipv6_subnet[const.ID])
member1 = self.mem_member_client.create_member(
**member1_kwargs)
self.addCleanup(
self.mem_member_client.cleanup_member,
member1[const.ID], pool_id=self.pool_ids[protocol],
lb_client=self.mem_lb_client, lb_id=self.lb_id)
waiters.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.check_interval,
CONF.load_balancer.check_timeout)
# Set up Member 2 for Webserver 2
member2_name = data_utils.rand_name("lb_member_member2-traffic")
member2_kwargs = {
const.POOL_ID: self.pool_ids[protocol],
const.NAME: member2_name,
const.ADMIN_STATE_UP: True,
const.ADDRESS: self.webserver2_ipv6,
const.PROTOCOL_PORT: 80,
}
if self.lb_member_2_ipv6_subnet:
member2_kwargs[const.SUBNET_ID] = (
self.lb_member_2_ipv6_subnet[const.ID])
member2 = self.mem_member_client.create_member(
**member2_kwargs)
self.addCleanup(
self.mem_member_client.cleanup_member,
member2[const.ID], pool_id=self.pool_ids[protocol],
lb_client=self.mem_lb_client, lb_id=self.lb_id)
waiters.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.check_interval,
CONF.load_balancer.check_timeout)
# Send some traffic
self.check_members_balanced(self.lb_vip_address,
protocol_port=protocol_port,
protocol=protocol)
@decorators.idempotent_id('dd75f41a-5b29-47ad-963d-3434f1056ca3')
def test_ipv6_vip_ipv6_members_traffic(self):
self._test_ipv6_vip_ipv6_members_traffic(self.protocol, 80)
@decorators.idempotent_id('26317013-a9b5-4a00-a993-d4c55b764e40')
def test_ipv6_vip_ipv6_members_udp_traffic(self):
if not self.mem_listener_client.is_version_supported(
self.api_version, '2.1'):
raise self.skipException('UDP listener support is only available '
'in Octavia API version 2.1 or newer')
self._test_ipv6_vip_ipv6_members_traffic(const.UDP, 8080)

View File

@ -322,6 +322,7 @@ class MemberScenarioTest(test_base.LoadBalancerBaseTest):
# fixed # fixed
@decorators.skip_because( @decorators.skip_because(
bug='2003329', bug='2003329',
bug_type='storyboard',
condition=CONF.load_balancer.provider in const.AMPHORA_PROVIDERS) condition=CONF.load_balancer.provider in const.AMPHORA_PROVIDERS)
def test_mixed_udp_member_create(self): def test_mixed_udp_member_create(self):
"""Test the member creation with mixed IP protocol members/VIP.""" """Test the member creation with mixed IP protocol members/VIP."""

View File

@ -73,23 +73,44 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
else: else:
cls.lb_vip_address = lb[const.VIP_ADDRESS] cls.lb_vip_address = lb[const.VIP_ADDRESS]
protocol = const.HTTP # Per protocol listeners and pools IDs
cls.listener_ids = {}
cls.pool_ids = {}
cls.protocol = const.HTTP
lb_feature_enabled = CONF.loadbalancer_feature_enabled lb_feature_enabled = CONF.loadbalancer_feature_enabled
if not lb_feature_enabled.l7_protocol_enabled: if not lb_feature_enabled.l7_protocol_enabled:
protocol = lb_feature_enabled.l4_protocol cls.protocol = lb_feature_enabled.l4_protocol
# Don't use same ports for HTTP/l4_protocol and UDP because some
# releases (<=train) don't support it
cls._listener_pool_create(cls.protocol, 80)
cls._listener_pool_create(const.UDP, 8080)
@classmethod
def _listener_pool_create(cls, protocol, protocol_port):
if (protocol == const.UDP and
not cls.mem_listener_client.is_version_supported(
cls.api_version, '2.1')):
return
listener_name = data_utils.rand_name("lb_member_listener1_operations") listener_name = data_utils.rand_name("lb_member_listener1_operations")
listener_kwargs = { listener_kwargs = {
const.NAME: listener_name, const.NAME: listener_name,
const.PROTOCOL: protocol, const.PROTOCOL: protocol,
const.PROTOCOL_PORT: '80', const.PROTOCOL_PORT: protocol_port,
const.LOADBALANCER_ID: cls.lb_id, const.LOADBALANCER_ID: cls.lb_id,
# For branches that don't support multiple listeners in single
# haproxy process and use haproxy>=1.8:
const.CONNECTION_LIMIT: 200,
} }
listener = cls.mem_listener_client.create_listener(**listener_kwargs) listener = cls.mem_listener_client.create_listener(
cls.listener_id = listener[const.ID] **listener_kwargs)
cls.listener_ids[protocol] = listener[const.ID]
cls.addClassResourceCleanup( cls.addClassResourceCleanup(
cls.mem_listener_client.cleanup_listener, cls.mem_listener_client.cleanup_listener,
cls.listener_id, cls.listener_ids[protocol],
lb_client=cls.mem_lb_client, lb_id=cls.lb_id) lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer, waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
@ -103,13 +124,13 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
const.NAME: pool_name, const.NAME: pool_name,
const.PROTOCOL: protocol, const.PROTOCOL: protocol,
const.LB_ALGORITHM: cls.lb_algorithm, const.LB_ALGORITHM: cls.lb_algorithm,
const.LISTENER_ID: cls.listener_id, const.LISTENER_ID: cls.listener_ids[protocol],
} }
pool = cls.mem_pool_client.create_pool(**pool_kwargs) pool = cls.mem_pool_client.create_pool(**pool_kwargs)
cls.pool_id = pool[const.ID] cls.pool_ids[protocol] = pool[const.ID]
cls.addClassResourceCleanup( cls.addClassResourceCleanup(
cls.mem_pool_client.cleanup_pool, cls.mem_pool_client.cleanup_pool,
cls.pool_id, cls.pool_ids[protocol],
lb_client=cls.mem_lb_client, lb_id=cls.lb_id) lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer, waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
@ -118,10 +139,7 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
CONF.load_balancer.build_interval, CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout) CONF.load_balancer.build_timeout)
@testtools.skipIf(CONF.load_balancer.test_with_noop, def _test_basic_traffic(self, protocol, protocol_port):
'Traffic tests will not work in noop mode.')
@decorators.idempotent_id('6751135d-e15a-4e22-89f4-bfcc3408d424')
def test_basic_traffic(self):
"""Tests sending traffic through a loadbalancer """Tests sending traffic through a loadbalancer
* Set up members on a loadbalancer. * Set up members on a loadbalancer.
@ -130,7 +148,7 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
# Set up Member 1 for Webserver 1 # Set up Member 1 for Webserver 1
member1_name = data_utils.rand_name("lb_member_member1-traffic") member1_name = data_utils.rand_name("lb_member_member1-traffic")
member1_kwargs = { member1_kwargs = {
const.POOL_ID: self.pool_id, const.POOL_ID: self.pool_ids[protocol],
const.NAME: member1_name, const.NAME: member1_name,
const.ADMIN_STATE_UP: True, const.ADMIN_STATE_UP: True,
const.ADDRESS: self.webserver1_ip, const.ADDRESS: self.webserver1_ip,
@ -143,7 +161,7 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
**member1_kwargs) **member1_kwargs)
self.addCleanup( self.addCleanup(
self.mem_member_client.cleanup_member, self.mem_member_client.cleanup_member,
member1[const.ID], pool_id=self.pool_id, member1[const.ID], pool_id=self.pool_ids[protocol],
lb_client=self.mem_lb_client, lb_id=self.lb_id) lb_client=self.mem_lb_client, lb_id=self.lb_id)
waiters.wait_for_status( waiters.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id, self.mem_lb_client.show_loadbalancer, self.lb_id,
@ -154,7 +172,7 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
# Set up Member 2 for Webserver 2 # Set up Member 2 for Webserver 2
member2_name = data_utils.rand_name("lb_member_member2-traffic") member2_name = data_utils.rand_name("lb_member_member2-traffic")
member2_kwargs = { member2_kwargs = {
const.POOL_ID: self.pool_id, const.POOL_ID: self.pool_ids[protocol],
const.NAME: member2_name, const.NAME: member2_name,
const.ADMIN_STATE_UP: True, const.ADMIN_STATE_UP: True,
const.ADDRESS: self.webserver2_ip, const.ADDRESS: self.webserver2_ip,
@ -167,7 +185,7 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
**member2_kwargs) **member2_kwargs)
self.addCleanup( self.addCleanup(
self.mem_member_client.cleanup_member, self.mem_member_client.cleanup_member,
member2[const.ID], pool_id=self.pool_id, member2[const.ID], pool_id=self.pool_ids[protocol],
lb_client=self.mem_lb_client, lb_id=self.lb_id) lb_client=self.mem_lb_client, lb_id=self.lb_id)
waiters.wait_for_status( waiters.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id, self.mem_lb_client.show_loadbalancer, self.lb_id,
@ -176,13 +194,28 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
CONF.load_balancer.check_timeout) CONF.load_balancer.check_timeout)
# Send some traffic # Send some traffic
self.check_members_balanced(self.lb_vip_address) self.check_members_balanced(self.lb_vip_address,
protocol_port=protocol_port,
protocol=protocol)
@testtools.skipUnless( @testtools.skipIf(CONF.load_balancer.test_with_noop,
CONF.loadbalancer_feature_enabled.health_monitor_enabled, 'Traffic tests will not work in noop mode.')
'Health monitor testing is disabled') @decorators.idempotent_id('6751135d-e15a-4e22-89f4-bfcc3408d424')
@decorators.idempotent_id('a16f8eb4-a77c-4b0e-8b1b-91c237039713') def test_basic_traffic(self):
def test_healthmonitor_traffic(self): self._test_basic_traffic(self.protocol, 80)
@testtools.skipIf(CONF.load_balancer.test_with_noop,
'Traffic tests will not work in noop mode.')
@decorators.idempotent_id('1e459663-2315-4067-bb47-c8a72f4928f0')
def test_basic_udp_traffic(self):
if not self.mem_listener_client.is_version_supported(
self.api_version, '2.1'):
raise self.skipException('UDP listener support is only available '
'in Octavia API version 2.1 or newer')
self._test_basic_traffic(const.UDP, 8080)
def _test_healthmonitor_traffic(self, protocol, protocol_port):
"""Tests traffic is correctly routed based on healthmonitor status """Tests traffic is correctly routed based on healthmonitor status
* Create three members: * Create three members:
@ -198,9 +231,10 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
* Verify members are in their correct respective operating statuses. * Verify members are in their correct respective operating statuses.
* Verify that traffic is balanced evenly between the working members. * Verify that traffic is balanced evenly between the working members.
""" """
member1_name = data_utils.rand_name("lb_member_member1-hm-traffic") member1_name = data_utils.rand_name("lb_member_member1-hm-traffic")
member1_kwargs = { member1_kwargs = {
const.POOL_ID: self.pool_id, const.POOL_ID: self.pool_ids[protocol],
const.NAME: member1_name, const.NAME: member1_name,
const.ADMIN_STATE_UP: True, const.ADMIN_STATE_UP: True,
const.ADDRESS: self.webserver1_ip, const.ADDRESS: self.webserver1_ip,
@ -214,7 +248,7 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
member1_id = member1[const.ID] member1_id = member1[const.ID]
self.addCleanup( self.addCleanup(
self.mem_member_client.cleanup_member, self.mem_member_client.cleanup_member,
member1_id, pool_id=self.pool_id, member1_id, pool_id=self.pool_ids[protocol],
lb_client=self.mem_lb_client, lb_id=self.lb_id) lb_client=self.mem_lb_client, lb_id=self.lb_id)
waiters.wait_for_status( waiters.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id, self.mem_lb_client.show_loadbalancer, self.lb_id,
@ -225,7 +259,7 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
# Set up Member 2 for Webserver 2 # Set up Member 2 for Webserver 2
member2_name = data_utils.rand_name("lb_member_member2-hm-traffic") member2_name = data_utils.rand_name("lb_member_member2-hm-traffic")
member2_kwargs = { member2_kwargs = {
const.POOL_ID: self.pool_id, const.POOL_ID: self.pool_ids[protocol],
const.NAME: member2_name, const.NAME: member2_name,
const.ADMIN_STATE_UP: True, const.ADMIN_STATE_UP: True,
const.ADDRESS: self.webserver2_ip, const.ADDRESS: self.webserver2_ip,
@ -240,7 +274,7 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
member2_id = member2[const.ID] member2_id = member2[const.ID]
self.addCleanup( self.addCleanup(
self.mem_member_client.cleanup_member, self.mem_member_client.cleanup_member,
member2_id, pool_id=self.pool_id, member2_id, pool_id=self.pool_ids[protocol],
lb_client=self.mem_lb_client, lb_id=self.lb_id) lb_client=self.mem_lb_client, lb_id=self.lb_id)
waiters.wait_for_status( waiters.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id, self.mem_lb_client.show_loadbalancer, self.lb_id,
@ -251,7 +285,7 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
# Set up Member 3 as a non-existent disabled node # Set up Member 3 as a non-existent disabled node
member3_name = data_utils.rand_name("lb_member_member3-hm-traffic") member3_name = data_utils.rand_name("lb_member_member3-hm-traffic")
member3_kwargs = { member3_kwargs = {
const.POOL_ID: self.pool_id, const.POOL_ID: self.pool_ids[protocol],
const.NAME: member3_name, const.NAME: member3_name,
const.ADMIN_STATE_UP: False, const.ADMIN_STATE_UP: False,
const.ADDRESS: '192.0.2.1', const.ADDRESS: '192.0.2.1',
@ -263,7 +297,7 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
member3_id = member3[const.ID] member3_id = member3[const.ID]
self.addCleanup( self.addCleanup(
self.mem_member_client.cleanup_member, self.mem_member_client.cleanup_member,
member3_id, pool_id=self.pool_id, member3_id, pool_id=self.pool_ids[protocol],
lb_client=self.mem_lb_client, lb_id=self.lb_id) lb_client=self.mem_lb_client, lb_id=self.lb_id)
waiters.wait_for_status( waiters.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id, self.mem_lb_client.show_loadbalancer, self.lb_id,
@ -278,41 +312,60 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
const.NO_MONITOR, const.NO_MONITOR,
CONF.load_balancer.build_interval, CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout, CONF.load_balancer.build_timeout,
pool_id=self.pool_id) pool_id=self.pool_ids[protocol])
waiters.wait_for_status( waiters.wait_for_status(
self.mem_member_client.show_member, self.mem_member_client.show_member,
member2_id, const.OPERATING_STATUS, member2_id, const.OPERATING_STATUS,
const.NO_MONITOR, const.NO_MONITOR,
CONF.load_balancer.build_interval, CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout, CONF.load_balancer.build_timeout,
pool_id=self.pool_id) pool_id=self.pool_ids[protocol])
waiters.wait_for_status( waiters.wait_for_status(
self.mem_member_client.show_member, self.mem_member_client.show_member,
member3_id, const.OPERATING_STATUS, member3_id, const.OPERATING_STATUS,
const.OFFLINE, const.OFFLINE,
CONF.load_balancer.build_interval, CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout, CONF.load_balancer.build_timeout,
pool_id=self.pool_id) pool_id=self.pool_ids[protocol])
# Send some traffic and verify it is balanced # Send some traffic and verify it is balanced
self.check_members_balanced(self.lb_vip_address, self.check_members_balanced(self.lb_vip_address,
protocol_port=protocol_port,
protocol=protocol,
traffic_member_count=2) traffic_member_count=2)
# Create the healthmonitor # Create the healthmonitor
hm_name = data_utils.rand_name("lb_member_hm1-hm-traffic") hm_name = data_utils.rand_name("lb_member_hm1-hm-traffic")
hm_kwargs = { if protocol != const.HTTP:
const.POOL_ID: self.pool_id, if protocol == const.UDP:
const.NAME: hm_name, hm_type = const.HEALTH_MONITOR_UDP_CONNECT
const.TYPE: const.HEALTH_MONITOR_HTTP, elif protocol == const.TCP:
const.DELAY: 2, hm_type = const.HEALTH_MONITOR_TCP
const.TIMEOUT: 2,
const.MAX_RETRIES: 2, hm_kwargs = {
const.MAX_RETRIES_DOWN: 2, const.POOL_ID: self.pool_ids[protocol],
const.HTTP_METHOD: const.GET, const.NAME: hm_name,
const.URL_PATH: '/', const.TYPE: hm_type,
const.EXPECTED_CODES: '200', const.DELAY: 3,
const.ADMIN_STATE_UP: True, const.TIMEOUT: 2,
} const.MAX_RETRIES: 2,
const.MAX_RETRIES_DOWN: 2,
const.ADMIN_STATE_UP: True,
}
else:
hm_kwargs = {
const.POOL_ID: self.pool_ids[protocol],
const.NAME: hm_name,
const.TYPE: const.HEALTH_MONITOR_HTTP,
const.DELAY: 2,
const.TIMEOUT: 2,
const.MAX_RETRIES: 2,
const.MAX_RETRIES_DOWN: 2,
const.HTTP_METHOD: const.GET,
const.URL_PATH: '/',
const.EXPECTED_CODES: '200',
const.ADMIN_STATE_UP: True,
}
hm = self.mem_healthmonitor_client.create_healthmonitor(**hm_kwargs) hm = self.mem_healthmonitor_client.create_healthmonitor(**hm_kwargs)
self.addCleanup( self.addCleanup(
@ -339,24 +392,26 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
CONF.load_balancer.build_interval, CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout, CONF.load_balancer.build_timeout,
error_ok=True, error_ok=True,
pool_id=self.pool_id) pool_id=self.pool_ids[protocol])
waiters.wait_for_status( waiters.wait_for_status(
self.mem_member_client.show_member, self.mem_member_client.show_member,
member2_id, const.OPERATING_STATUS, member2_id, const.OPERATING_STATUS,
const.ERROR, const.ERROR,
CONF.load_balancer.build_interval, CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout, CONF.load_balancer.build_timeout,
pool_id=self.pool_id) pool_id=self.pool_ids[protocol])
waiters.wait_for_status( waiters.wait_for_status(
self.mem_member_client.show_member, self.mem_member_client.show_member,
member3_id, const.OPERATING_STATUS, member3_id, const.OPERATING_STATUS,
const.OFFLINE, const.OFFLINE,
CONF.load_balancer.build_interval, CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout, CONF.load_balancer.build_timeout,
pool_id=self.pool_id) pool_id=self.pool_ids[protocol])
# Send some traffic and verify it is *unbalanced*, as expected # Send some traffic and verify it is *unbalanced*, as expected
self.check_members_balanced(self.lb_vip_address, self.check_members_balanced(self.lb_vip_address,
protocol_port=protocol_port,
protocol=protocol,
traffic_member_count=1) traffic_member_count=1)
# Delete the healthmonitor # Delete the healthmonitor
@ -375,24 +430,45 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
const.NO_MONITOR, const.NO_MONITOR,
CONF.load_balancer.build_interval, CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout, CONF.load_balancer.build_timeout,
pool_id=self.pool_id) pool_id=self.pool_ids[protocol])
waiters.wait_for_status( waiters.wait_for_status(
self.mem_member_client.show_member, self.mem_member_client.show_member,
member2_id, const.OPERATING_STATUS, member2_id, const.OPERATING_STATUS,
const.NO_MONITOR, const.NO_MONITOR,
CONF.load_balancer.build_interval, CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout, CONF.load_balancer.build_timeout,
pool_id=self.pool_id) pool_id=self.pool_ids[protocol])
waiters.wait_for_status( waiters.wait_for_status(
self.mem_member_client.show_member, self.mem_member_client.show_member,
member3_id, const.OPERATING_STATUS, member3_id, const.OPERATING_STATUS,
const.OFFLINE, const.OFFLINE,
CONF.load_balancer.build_interval, CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout, CONF.load_balancer.build_timeout,
pool_id=self.pool_id) pool_id=self.pool_ids[protocol])
# Send some traffic and verify it is balanced again # Send some traffic and verify it is balanced again
self.check_members_balanced(self.lb_vip_address) self.check_members_balanced(self.lb_vip_address,
protocol_port=protocol_port,
protocol=protocol)
@testtools.skipUnless(
CONF.loadbalancer_feature_enabled.health_monitor_enabled,
'Health monitor testing is disabled')
@decorators.idempotent_id('a16f8eb4-a77c-4b0e-8b1b-91c237039713')
def test_healthmonitor_traffic(self):
self._test_healthmonitor_traffic(self.protocol, 80)
@testtools.skipUnless(
CONF.loadbalancer_feature_enabled.health_monitor_enabled,
'Health monitor testing is disabled')
@decorators.idempotent_id('80b86513-1a76-4e42-91c9-cb23c879e536')
def test_healthmonitor_udp_traffic(self):
if not self.mem_listener_client.is_version_supported(
self.api_version, '2.1'):
raise self.skipException('UDP listener support is only available '
'in Octavia API version 2.1 or newer')
self._test_healthmonitor_traffic(const.UDP, 8080)
@testtools.skipUnless( @testtools.skipUnless(
CONF.loadbalancer_feature_enabled.l7_protocol_enabled, CONF.loadbalancer_feature_enabled.l7_protocol_enabled,
@ -408,11 +484,13 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
* Create a policy/rule to reject connections. * Create a policy/rule to reject connections.
* Test traffic to ensure it goes to the correct place. * Test traffic to ensure it goes to the correct place.
""" """
protocol = const.HTTP
# Create a second pool # Create a second pool
pool_name = data_utils.rand_name("lb_member_pool2_l7redirect") pool_name = data_utils.rand_name("lb_member_pool2_l7redirect")
pool_kwargs = { pool_kwargs = {
const.NAME: pool_name, const.NAME: pool_name,
const.PROTOCOL: const.HTTP, const.PROTOCOL: protocol,
const.LB_ALGORITHM: self.lb_algorithm, const.LB_ALGORITHM: self.lb_algorithm,
const.LOADBALANCER_ID: self.lb_id, const.LOADBALANCER_ID: self.lb_id,
} }
@ -432,7 +510,7 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
# Set up Member 1 for Webserver 1 on the default pool # Set up Member 1 for Webserver 1 on the default pool
member1_name = data_utils.rand_name("lb_member_member1-l7redirect") member1_name = data_utils.rand_name("lb_member_member1-l7redirect")
member1_kwargs = { member1_kwargs = {
const.POOL_ID: self.pool_id, const.POOL_ID: self.pool_ids[protocol],
const.NAME: member1_name, const.NAME: member1_name,
const.ADMIN_STATE_UP: True, const.ADMIN_STATE_UP: True,
const.ADDRESS: self.webserver1_ip, const.ADDRESS: self.webserver1_ip,
@ -445,7 +523,7 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
**member1_kwargs) **member1_kwargs)
self.addCleanup( self.addCleanup(
self.mem_member_client.cleanup_member, self.mem_member_client.cleanup_member,
member1[const.ID], pool_id=self.pool_id, member1[const.ID], pool_id=self.pool_ids[protocol],
lb_client=self.mem_lb_client, lb_id=self.lb_id) lb_client=self.mem_lb_client, lb_id=self.lb_id)
waiters.wait_for_status( waiters.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id, self.mem_lb_client.show_loadbalancer, self.lb_id,
@ -469,7 +547,7 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
**member2_kwargs) **member2_kwargs)
self.addCleanup( self.addCleanup(
self.mem_member_client.cleanup_member, self.mem_member_client.cleanup_member,
member2[const.ID], pool_id=self.pool_id, member2[const.ID], pool_id=self.pool_ids[protocol],
lb_client=self.mem_lb_client, lb_id=self.lb_id) lb_client=self.mem_lb_client, lb_id=self.lb_id)
waiters.wait_for_status( waiters.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id, self.mem_lb_client.show_loadbalancer, self.lb_id,
@ -481,7 +559,7 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
l7policy1_name = data_utils.rand_name("lb_member_l7policy1-l7redirect") l7policy1_name = data_utils.rand_name("lb_member_l7policy1-l7redirect")
l7policy1_description = data_utils.arbitrary_string(size=255) l7policy1_description = data_utils.arbitrary_string(size=255)
l7policy1_kwargs = { l7policy1_kwargs = {
const.LISTENER_ID: self.listener_id, const.LISTENER_ID: self.listener_ids[protocol],
const.NAME: l7policy1_name, const.NAME: l7policy1_name,
const.DESCRIPTION: l7policy1_description, const.DESCRIPTION: l7policy1_description,
const.ADMIN_STATE_UP: True, const.ADMIN_STATE_UP: True,
@ -526,7 +604,7 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
l7policy2_name = data_utils.rand_name("lb_member_l7policy2-l7redirect") l7policy2_name = data_utils.rand_name("lb_member_l7policy2-l7redirect")
l7policy2_description = data_utils.arbitrary_string(size=255) l7policy2_description = data_utils.arbitrary_string(size=255)
l7policy2_kwargs = { l7policy2_kwargs = {
const.LISTENER_ID: self.listener_id, const.LISTENER_ID: self.listener_ids[protocol],
const.NAME: l7policy2_name, const.NAME: l7policy2_name,
const.DESCRIPTION: l7policy2_description, const.DESCRIPTION: l7policy2_description,
const.ADMIN_STATE_UP: True, const.ADMIN_STATE_UP: True,
@ -571,7 +649,7 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
l7policy3_name = data_utils.rand_name("lb_member_l7policy3-l7redirect") l7policy3_name = data_utils.rand_name("lb_member_l7policy3-l7redirect")
l7policy3_description = data_utils.arbitrary_string(size=255) l7policy3_description = data_utils.arbitrary_string(size=255)
l7policy3_kwargs = { l7policy3_kwargs = {
const.LISTENER_ID: self.listener_id, const.LISTENER_ID: self.listener_ids[protocol],
const.NAME: l7policy3_name, const.NAME: l7policy3_name,
const.DESCRIPTION: l7policy3_description, const.DESCRIPTION: l7policy3_description,
const.ADMIN_STATE_UP: True, const.ADMIN_STATE_UP: True,
@ -633,21 +711,17 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
url_for_member1, url_for_member1,
headers={'reject': 'true'}) headers={'reject': 'true'})
@testtools.skipIf(CONF.load_balancer.test_with_noop, def _test_mixed_ipv4_ipv6_members_traffic(self, protocol, protocol_port):
'Traffic tests will not work in noop mode.')
@testtools.skipUnless(CONF.load_balancer.test_with_ipv6,
'Mixed IPv4/IPv6 member test requires IPv6.')
@decorators.idempotent_id('20b6b671-0101-4bed-a249-9af6ee3aa6d9')
def test_mixed_ipv4_ipv6_members_traffic(self):
"""Tests traffic through a loadbalancer with IPv4 and IPv6 members. """Tests traffic through a loadbalancer with IPv4 and IPv6 members.
* Set up members on a loadbalancer. * Set up members on a loadbalancer.
* Test traffic to ensure it is balanced properly. * Test traffic to ensure it is balanced properly.
""" """
# Set up Member 1 for Webserver 1 # Set up Member 1 for Webserver 1
member1_name = data_utils.rand_name("lb_member_member1-traffic") member1_name = data_utils.rand_name("lb_member_member1-traffic")
member1_kwargs = { member1_kwargs = {
const.POOL_ID: self.pool_id, const.POOL_ID: self.pool_ids[protocol],
const.NAME: member1_name, const.NAME: member1_name,
const.ADMIN_STATE_UP: True, const.ADMIN_STATE_UP: True,
const.ADDRESS: self.webserver1_ip, const.ADDRESS: self.webserver1_ip,
@ -660,7 +734,7 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
**member1_kwargs) **member1_kwargs)
self.addCleanup( self.addCleanup(
self.mem_member_client.cleanup_member, self.mem_member_client.cleanup_member,
member1[const.ID], pool_id=self.pool_id, member1[const.ID], pool_id=self.pool_ids[protocol],
lb_client=self.mem_lb_client, lb_id=self.lb_id) lb_client=self.mem_lb_client, lb_id=self.lb_id)
waiters.wait_for_status( waiters.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id, self.mem_lb_client.show_loadbalancer, self.lb_id,
@ -671,7 +745,7 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
# Set up Member 2 for Webserver 2 # Set up Member 2 for Webserver 2
member2_name = data_utils.rand_name("lb_member_member2-traffic") member2_name = data_utils.rand_name("lb_member_member2-traffic")
member2_kwargs = { member2_kwargs = {
const.POOL_ID: self.pool_id, const.POOL_ID: self.pool_ids[protocol],
const.NAME: member2_name, const.NAME: member2_name,
const.ADMIN_STATE_UP: True, const.ADMIN_STATE_UP: True,
const.ADDRESS: self.webserver2_ipv6, const.ADDRESS: self.webserver2_ipv6,
@ -685,7 +759,7 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
**member2_kwargs) **member2_kwargs)
self.addCleanup( self.addCleanup(
self.mem_member_client.cleanup_member, self.mem_member_client.cleanup_member,
member2[const.ID], pool_id=self.pool_id, member2[const.ID], pool_id=self.pool_ids[protocol],
lb_client=self.mem_lb_client, lb_id=self.lb_id) lb_client=self.mem_lb_client, lb_id=self.lb_id)
waiters.wait_for_status( waiters.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id, self.mem_lb_client.show_loadbalancer, self.lb_id,
@ -694,4 +768,34 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
CONF.load_balancer.check_timeout) CONF.load_balancer.check_timeout)
# Send some traffic # Send some traffic
self.check_members_balanced(self.lb_vip_address) self.check_members_balanced(self.lb_vip_address,
protocol_port=protocol_port,
protocol=protocol)
@testtools.skipIf(CONF.load_balancer.test_with_noop,
'Traffic tests will not work in noop mode.')
@testtools.skipUnless(CONF.load_balancer.test_with_ipv6,
'Mixed IPv4/IPv6 member test requires IPv6.')
@decorators.idempotent_id('20b6b671-0101-4bed-a249-9af6ee3aa6d9')
def test_mixed_ipv4_ipv6_members_traffic(self):
self._test_mixed_ipv4_ipv6_members_traffic(self.protocol, 80)
@testtools.skipIf(CONF.load_balancer.test_with_noop,
'Traffic tests will not work in noop mode.')
@testtools.skipUnless(CONF.load_balancer.test_with_ipv6,
'Mixed IPv4/IPv6 member test requires IPv6.')
@decorators.idempotent_id('56823616-34e1-4e17-beb9-15dd6b1593af')
# Skipping test for amphora driver until "UDP load balancers cannot mix
# protocol versions" (https://storyboard.openstack.org/#!/story/2003329) is
# fixed
@decorators.skip_because(
bug='2003329',
bug_type='storyboard',
condition=CONF.load_balancer.provider in const.AMPHORA_PROVIDERS)
def test_mixed_ipv4_ipv6_members_udp_traffic(self):
if not self.mem_listener_client.is_version_supported(
self.api_version, '2.1'):
raise self.skipException('UDP listener support is only available '
'in Octavia API version 2.1 or newer')
self._test_mixed_ipv4_ipv6_members_traffic(const.UDP, 8080)

View File

@ -12,12 +12,14 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import errno
import ipaddress import ipaddress
import pkg_resources import pkg_resources
import random import random
import requests import requests
import shlex import shlex
import six import six
import socket
import string import string
import subprocess import subprocess
import tempfile import tempfile
@ -45,6 +47,9 @@ RETRY_INITIAL_DELAY = 1
RETRY_BACKOFF = 1 RETRY_BACKOFF = 1
RETRY_MAX = 5 RETRY_MAX = 5
SRC_PORT_NUMBER_MIN = 32768
SRC_PORT_NUMBER_MAX = 61000
class LoadBalancerBaseTest(test.BaseTestCase): class LoadBalancerBaseTest(test.BaseTestCase):
"""Base class for load balancer tests.""" """Base class for load balancer tests."""
@ -61,6 +66,8 @@ class LoadBalancerBaseTest(test.BaseTestCase):
webserver2_response = 5 webserver2_response = 5
used_ips = [] used_ips = []
src_port_number = SRC_PORT_NUMBER_MIN
@classmethod @classmethod
def skip_checks(cls): def skip_checks(cls):
"""Check if we should skip all of the children tests.""" """Check if we should skip all of the children tests."""
@ -548,6 +555,34 @@ class LoadBalancerBaseTestWithCompute(LoadBalancerBaseTest):
cls.lb_mem_SGr_client.delete_security_group_rule, cls.lb_mem_SGr_client.delete_security_group_rule,
cls.lb_mem_SGr_client.show_security_group_rule, cls.lb_mem_SGr_client.show_security_group_rule,
SGr['id']) SGr['id'])
# Create a security group rule to allow UDP 80-81 (test webservers)
SGr = cls.lb_mem_SGr_client.create_security_group_rule(
direction='ingress',
security_group_id=cls.lb_member_sec_group['id'],
protocol='udp',
ethertype='IPv4',
port_range_min=80,
port_range_max=81)['security_group_rule']
cls.addClassResourceCleanup(
waiters.wait_for_not_found,
cls.lb_mem_SGr_client.delete_security_group_rule,
cls.lb_mem_SGr_client.show_security_group_rule,
SGr['id'])
# Create a security group rule to allow UDP 9999 (test webservers)
# Port 9999 is used to illustrate health monitor ERRORs on closed
# ports.
SGr = cls.lb_mem_SGr_client.create_security_group_rule(
direction='ingress',
security_group_id=cls.lb_member_sec_group['id'],
protocol='udp',
ethertype='IPv4',
port_range_min=9999,
port_range_max=9999)['security_group_rule']
cls.addClassResourceCleanup(
waiters.wait_for_not_found,
cls.lb_mem_SGr_client.delete_security_group_rule,
cls.lb_mem_SGr_client.show_security_group_rule,
SGr['id'])
# Create a security group rule to allow 22 (ssh) # Create a security group rule to allow 22 (ssh)
SGr = cls.lb_mem_SGr_client.create_security_group_rule( SGr = cls.lb_mem_SGr_client.create_security_group_rule(
direction='ingress', direction='ingress',
@ -575,6 +610,20 @@ class LoadBalancerBaseTestWithCompute(LoadBalancerBaseTest):
cls.lb_mem_SGr_client.delete_security_group_rule, cls.lb_mem_SGr_client.delete_security_group_rule,
cls.lb_mem_SGr_client.show_security_group_rule, cls.lb_mem_SGr_client.show_security_group_rule,
SGr['id']) SGr['id'])
# Create a security group rule to allow UDP 80-81 (test
# webservers)
SGr = cls.lb_mem_SGr_client.create_security_group_rule(
direction='ingress',
security_group_id=cls.lb_member_sec_group['id'],
protocol='udp',
ethertype='IPv6',
port_range_min=80,
port_range_max=81)['security_group_rule']
cls.addClassResourceCleanup(
waiters.wait_for_not_found,
cls.lb_mem_SGr_client.delete_security_group_rule,
cls.lb_mem_SGr_client.show_security_group_rule,
SGr['id'])
# Create a security group rule to allow 22 (ssh) # Create a security group rule to allow 22 (ssh)
SGr = cls.lb_mem_SGr_client.create_security_group_rule( SGr = cls.lb_mem_SGr_client.create_security_group_rule(
direction='ingress', direction='ingress',
@ -647,6 +696,10 @@ class LoadBalancerBaseTestWithCompute(LoadBalancerBaseTest):
cls._validate_webserver(cls.webserver1_public_ip, cls._validate_webserver(cls.webserver1_public_ip,
cls.webserver1_response) cls.webserver1_response)
# Validate udp server 1
cls._validate_udp_server(cls.webserver1_public_ip,
cls.webserver1_response)
# Set up serving on webserver 2 # Set up serving on webserver 2
cls._install_start_webserver(cls.webserver2_public_ip, cls._install_start_webserver(cls.webserver2_public_ip,
cls.lb_member_keypair['private_key'], cls.lb_member_keypair['private_key'],
@ -656,6 +709,10 @@ class LoadBalancerBaseTestWithCompute(LoadBalancerBaseTest):
cls._validate_webserver(cls.webserver2_public_ip, cls._validate_webserver(cls.webserver2_public_ip,
cls.webserver2_response) cls.webserver2_response)
# Validate udp server 2
cls._validate_udp_server(cls.webserver2_public_ip,
cls.webserver2_response)
@classmethod @classmethod
def _create_networks(cls): def _create_networks(cls):
super(LoadBalancerBaseTestWithCompute, cls)._create_networks() super(LoadBalancerBaseTestWithCompute, cls)._create_networks()
@ -796,8 +853,8 @@ class LoadBalancerBaseTestWithCompute(LoadBalancerBaseTest):
@classmethod @classmethod
def _install_start_webserver(cls, ip_address, ssh_key, start_id): def _install_start_webserver(cls, ip_address, ssh_key, start_id):
local_file = pkg_resources.resource_filename( local_file = pkg_resources.resource_filename(
'octavia_tempest_plugin.contrib.httpd', 'httpd.bin') 'octavia_tempest_plugin.contrib.test_server', 'test_server.bin')
dest_file = '/dev/shm/httpd.bin' dest_file = '/dev/shm/test_server.bin'
linux_client = remote_client.RemoteClient( linux_client = remote_client.RemoteClient(
ip_address, CONF.validation.image_ssh_user, pkey=ssh_key) ip_address, CONF.validation.image_ssh_user, pkey=ssh_key)
@ -859,20 +916,101 @@ class LoadBalancerBaseTestWithCompute(LoadBalancerBaseTest):
URL = 'http://{0}:81'.format(ip_address) URL = 'http://{0}:81'.format(ip_address)
validators.validate_URL_response(URL, expected_body=str(start_id + 1)) validators.validate_URL_response(URL, expected_body=str(start_id + 1))
def _wait_for_lb_functional(self, vip_address, @classmethod
protocol='http', verify=True): def _validate_udp_server(cls, ip_address, start_id):
session = requests.Session() res = cls._udp_request(ip_address, 80)
if res != str(start_id):
raise Exception("Response from test server doesn't match the "
"expected value ({0} != {1}).".format(
res, str(start_id)))
res = cls._udp_request(ip_address, 81)
if res != str(start_id + 1):
raise Exception("Response from test server doesn't match the "
"expected value ({0} != {1}).".format(
res, str(start_id + 1)))
@classmethod
def _udp_request(cls, vip_address, port=80, timeout=None):
if ipaddress.ip_address(vip_address).version == 6:
family = socket.AF_INET6
else:
family = socket.AF_INET
sock = socket.socket(family, socket.SOCK_DGRAM)
# Force the use of an incremental port number for source to avoid
# re-use of a previous source port that will affect the round-robin
# dispatch
while True:
port_number = cls.src_port_number
cls.src_port_number += 1
if cls.src_port_number >= SRC_PORT_NUMBER_MAX:
cls.src_port_number = SRC_PORT_NUMBER_MIN
# catch and skip already used ports on the host
try:
sock.bind(('', port_number))
except OSError as e:
# if error is 'Address already in use', try next port number
if e.errno != errno.EADDRINUSE:
raise e
else:
# successfully bind the socket
break
server_address = (vip_address, port)
data = b"data\n"
if timeout is not None:
sock.settimeout(timeout)
sock.sendto(data, server_address)
data, addr = sock.recvfrom(4096)
sock.close()
return data.decode('utf-8')
def _wait_for_lb_functional(self, vip_address, traffic_member_count,
protocol_port, protocol, verify):
if protocol != const.UDP:
session = requests.Session()
start = time.time() start = time.time()
response_counts = {}
# Send requests to the load balancer until at least
# "traffic_member_count" members have replied (ensure network
# connectivity is functional between the load balancer and the membesr)
while time.time() - start < CONF.load_balancer.build_timeout: while time.time() - start < CONF.load_balancer.build_timeout:
try: try:
session.get("{0}://{1}".format(protocol, vip_address), if protocol != const.UDP:
timeout=2, verify=verify) url = "{0}://{1}{2}{3}".format(
time.sleep(1) protocol.lower(),
return vip_address,
':' if protocol_port else '',
protocol_port or '')
r = session.get(url, timeout=2, verify=verify)
data = r.content
else:
data = self._udp_request(vip_address, port=protocol_port,
timeout=2)
if data in response_counts:
response_counts[data] += 1
else:
response_counts[data] = 1
if traffic_member_count == len(response_counts):
LOG.debug('Loadbalancer response totals: %s',
response_counts)
time.sleep(1)
return
except Exception: except Exception:
LOG.warning('Server is not passing initial traffic. Waiting.') LOG.warning('Server is not passing initial traffic. Waiting.')
time.sleep(1) time.sleep(1)
LOG.debug('Loadbalancer response totals: %s', response_counts)
LOG.error('Server did not begin passing traffic within the timeout ' LOG.error('Server did not begin passing traffic within the timeout '
'period. Failing test.') 'period. Failing test.')
raise Exception() raise Exception()
@ -880,16 +1018,27 @@ class LoadBalancerBaseTestWithCompute(LoadBalancerBaseTest):
def _send_lb_request(self, handler, protocol, vip_address, def _send_lb_request(self, handler, protocol, vip_address,
verify, protocol_port, num=20): verify, protocol_port, num=20):
response_counts = {} response_counts = {}
# Send a number requests to lb vip # Send a number requests to lb vip
for i in range(num): for i in range(num):
try: try:
r = handler.get('{0}://{1}:{2}'.format(protocol, vip_address, if protocol != const.UDP:
protocol_port), url = "{0}://{1}{2}{3}".format(
timeout=2, verify=verify) protocol.lower(),
if r.content in response_counts: vip_address,
response_counts[r.content] += 1 ':' if protocol_port else '',
protocol_port or '')
r = handler.get(url, timeout=2, verify=verify)
data = r.content
else: else:
response_counts[r.content] = 1 data = self._udp_request(vip_address, port=protocol_port,
timeout=2)
if data in response_counts:
response_counts[data] += 1
else:
response_counts[data] = 1
except Exception: except Exception:
LOG.exception('Failed to send request to loadbalancer vip') LOG.exception('Failed to send request to loadbalancer vip')
raise Exception('Failed to connect to lb') raise Exception('Failed to connect to lb')
@ -897,7 +1046,7 @@ class LoadBalancerBaseTestWithCompute(LoadBalancerBaseTest):
return response_counts return response_counts
def _check_members_balanced_round_robin( def _check_members_balanced_round_robin(
self, vip_address, traffic_member_count=2, protocol='http', self, vip_address, traffic_member_count=2, protocol=const.HTTP,
verify=True, protocol_port=80): verify=True, protocol_port=80):
handler = requests.Session() handler = requests.Session()
@ -912,7 +1061,7 @@ class LoadBalancerBaseTestWithCompute(LoadBalancerBaseTest):
self.assertEqual(1, len(set(response_counts.values()))) self.assertEqual(1, len(set(response_counts.values())))
def _check_members_balanced_source_ip_port( def _check_members_balanced_source_ip_port(
self, vip_address, traffic_member_count=2, protocol='http', self, vip_address, traffic_member_count=2, protocol=const.HTTP,
verify=True, protocol_port=80): verify=True, protocol_port=80):
handler = requests handler = requests
@ -931,11 +1080,14 @@ class LoadBalancerBaseTestWithCompute(LoadBalancerBaseTest):
self.assertEqual(1, len(response_counts)) self.assertEqual(1, len(response_counts))
def check_members_balanced(self, vip_address, traffic_member_count=2, def check_members_balanced(self, vip_address, traffic_member_count=2,
protocol='http', verify=True, protocol_port=80): protocol=const.HTTP, verify=True,
protocol_port=80):
if ipaddress.ip_address(vip_address).version == 6: if (ipaddress.ip_address(vip_address).version == 6 and
protocol != const.UDP):
vip_address = '[{}]'.format(vip_address) vip_address = '[{}]'.format(vip_address)
self._wait_for_lb_functional(vip_address, protocol, verify) self._wait_for_lb_functional(vip_address, traffic_member_count,
protocol_port, protocol, verify)
validate_func = '_check_members_balanced_%s' % self.lb_algorithm validate_func = '_check_members_balanced_%s' % self.lb_algorithm
validate_func = getattr(self, validate_func.lower()) validate_func = getattr(self, validate_func.lower())

View File

@ -0,0 +1,4 @@
---
features:
- |
Added test scenarios for UDP traffic.