Adjust scenario tests for NotImplemented skip

This is a patch to restructrue the scenario tests to use the
new skip_if_not_implemented capability.

Change-Id: I49a7fb6650030f2a1115c6d42442062bd33415fd
This commit is contained in:
Michael Johnson 2020-03-19 15:59:19 -07:00
parent c611b45680
commit 89bdbcd125
16 changed files with 2274 additions and 655 deletions

View File

@ -124,6 +124,7 @@ SUPPORTED_LB_TOPOLOGIES = (SINGLE, ACTIVE_STANDBY)
# Protocols # Protocols
HTTP = 'HTTP' HTTP = 'HTTP'
HTTPS = 'HTTPS' HTTPS = 'HTTPS'
PROXY = 'PROXY'
TCP = 'TCP' TCP = 'TCP'
TERMINATED_HTTPS = 'TERMINATED_HTTPS' TERMINATED_HTTPS = 'TERMINATED_HTTPS'
UDP = 'UDP' UDP = 'UDP'

View File

@ -0,0 +1,32 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import socket
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.connection import HTTPConnection
from requests.packages.urllib3.poolmanager import PoolManager
class SourcePortAdapter(HTTPAdapter):
""""Transport adapter" that allows us to set the source port."""
def __init__(self, port, *args, **kwargs):
self._source_port = port
super(SourcePortAdapter, self).__init__(*args, **kwargs)
def init_poolmanager(self, connections, maxsize, block=False):
# Make sure TIMED_WAIT doesn't stop us from reusing the socket
sock_options = HTTPConnection.default_socket_options + [
(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1), ]
self.poolmanager = PoolManager(
num_pools=connections, maxsize=maxsize,
block=block, source_address=('', self._source_port),
socket_options=sock_options)

View File

@ -41,11 +41,6 @@ class TLSWithBarbicanTest(test_base.LoadBalancerBaseTestWithCompute):
@classmethod @classmethod
def skip_checks(cls): def skip_checks(cls):
super(TLSWithBarbicanTest, cls).skip_checks() super(TLSWithBarbicanTest, cls).skip_checks()
if not CONF.loadbalancer_feature_enabled.l7_protocol_enabled:
raise cls.skipException(
'[loadbalancer_feature_enabled] "l7_protocol_enabled" is '
'False in the tempest configuration. TLS tests will be '
'skipped.')
if not CONF.loadbalancer_feature_enabled.terminated_tls_enabled: if not CONF.loadbalancer_feature_enabled.terminated_tls_enabled:
raise cls.skipException( raise cls.skipException(
'[loadbalancer-feature-enabled] "terminated_tls_enabled" is ' '[loadbalancer-feature-enabled] "terminated_tls_enabled" is '
@ -308,8 +303,8 @@ class TLSWithBarbicanTest(test_base.LoadBalancerBaseTestWithCompute):
# Test HTTPS listener load balancing. # Test HTTPS listener load balancing.
# Note: certificate validation tests will follow this test # Note: certificate validation tests will follow this test
self.check_members_balanced(self.lb_vip_address, protocol='https', self.check_members_balanced(self.lb_vip_address, protocol=const.HTTPS,
verify=False, protocol_port=443) HTTPS_verify=False, protocol_port=443)
def _verify_cb(connection, x509, errno, errdepth, retcode): def _verify_cb(connection, x509, errno, errdepth, retcode):
"""Callback for certificate validation.""" """Callback for certificate validation."""
@ -394,8 +389,8 @@ class TLSWithBarbicanTest(test_base.LoadBalancerBaseTestWithCompute):
# Test HTTPS listener load balancing. # Test HTTPS listener load balancing.
# Note: certificate validation tests will follow this test # Note: certificate validation tests will follow this test
self.check_members_balanced(self.lb_vip_address, protocol='https', self.check_members_balanced(self.lb_vip_address, protocol=const.HTTPS,
verify=False, protocol_port=443) HTTPS_verify=False, protocol_port=443)
# Test HTTP listener load balancing. # Test HTTP listener load balancing.
self.check_members_balanced(self.lb_vip_address) self.check_members_balanced(self.lb_vip_address)
@ -429,8 +424,8 @@ class TLSWithBarbicanTest(test_base.LoadBalancerBaseTestWithCompute):
# Test HTTPS listener load balancing. # Test HTTPS listener load balancing.
# Note: certificate validation tests will follow this test # Note: certificate validation tests will follow this test
self.check_members_balanced(self.lb_vip_address, protocol='https', self.check_members_balanced(self.lb_vip_address, protocol=const.HTTPS,
verify=False, protocol_port=443) HTTPS_verify=False, protocol_port=443)
def _verify_server_cb(connection, x509, errno, errdepth, retcode): def _verify_server_cb(connection, x509, errno, errdepth, retcode):
return _verify_cb(connection, x509, errno, errdepth, retcode, return _verify_cb(connection, x509, errno, errdepth, retcode,
@ -562,8 +557,8 @@ class TLSWithBarbicanTest(test_base.LoadBalancerBaseTestWithCompute):
# Test HTTPS listener load balancing. # Test HTTPS listener load balancing.
# Note: certificate validation tests will follow this test # Note: certificate validation tests will follow this test
self.check_members_balanced(self.lb_vip_address, protocol='https', self.check_members_balanced(self.lb_vip_address, protocol=const.HTTPS,
verify=False, protocol_port=443) HTTPS_verify=False, protocol_port=443)
listener2_name = data_utils.rand_name("lb_member_listener2-tls-sni") listener2_name = data_utils.rand_name("lb_member_listener2-tls-sni")
listener2_kwargs = { listener2_kwargs = {
@ -590,8 +585,8 @@ class TLSWithBarbicanTest(test_base.LoadBalancerBaseTestWithCompute):
# Test HTTPS listener load balancing. # Test HTTPS listener load balancing.
# Note: certificate validation tests will follow this test # Note: certificate validation tests will follow this test
self.check_members_balanced(self.lb_vip_address, protocol='https', self.check_members_balanced(self.lb_vip_address, protocol=const.HTTPS,
verify=False, protocol_port=8443) HTTPS_verify=False, protocol_port=8443)
def _verify_server_cb(connection, x509, errno, errdepth, retcode): def _verify_server_cb(connection, x509, errno, errdepth, retcode):
return _verify_cb(connection, x509, errno, errdepth, retcode, return _verify_cb(connection, x509, errno, errdepth, retcode,

View File

@ -12,12 +12,14 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import testtools
from uuid import UUID from uuid import UUID
from dateutil import parser from dateutil import parser
from tempest import config from tempest import config
from tempest.lib.common.utils import data_utils from tempest.lib.common.utils import data_utils
from tempest.lib import decorators from tempest.lib import decorators
from tempest.lib import exceptions
from octavia_tempest_plugin.common import constants as const from octavia_tempest_plugin.common import constants as const
from octavia_tempest_plugin.tests import test_base from octavia_tempest_plugin.tests import test_base
@ -28,12 +30,6 @@ CONF = config.CONF
class HealthMonitorScenarioTest(test_base.LoadBalancerBaseTest): class HealthMonitorScenarioTest(test_base.LoadBalancerBaseTest):
@classmethod
def skip_checks(cls):
super(HealthMonitorScenarioTest, cls).skip_checks()
if not CONF.loadbalancer_feature_enabled.health_monitor_enabled:
raise cls.skipException('Health Monitors not supported')
@classmethod @classmethod
def resource_setup(cls): def resource_setup(cls):
"""Setup resources needed by the tests.""" """Setup resources needed by the tests."""
@ -57,28 +53,199 @@ class HealthMonitorScenarioTest(test_base.LoadBalancerBaseTest):
CONF.load_balancer.lb_build_interval, CONF.load_balancer.lb_build_interval,
CONF.load_balancer.lb_build_timeout) CONF.load_balancer.lb_build_timeout)
pool_name = data_utils.rand_name("lb_member_pool1_hm") @decorators.idempotent_id('4c2058f9-b8e2-4a5b-a2f3-3bd58a29f63b')
pool_kwargs = { def test_LC_HTTP_healthmonitor_CRUD(self):
const.NAME: pool_name, self._test_healthmonitor_CRUD(
const.PROTOCOL: const.HTTP, const.HTTP, const.LB_ALGORITHM_LEAST_CONNECTIONS,
const.LB_ALGORITHM: cls.lb_algorithm, const.HEALTH_MONITOR_HTTP)
const.LOADBALANCER_ID: cls.lb_id,
}
pool = cls.mem_pool_client.create_pool(**pool_kwargs)
cls.pool_id = pool[const.ID]
cls.addClassResourceCleanup(
cls.mem_pool_client.cleanup_pool,
cls.pool_id,
lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer, @decorators.idempotent_id('08681eac-e907-4f71-8799-4b8fdf23914a')
cls.lb_id, const.PROVISIONING_STATUS, def test_LC_HTTPS_healthmonitor_CRUD(self):
const.ACTIVE, self._test_healthmonitor_CRUD(
CONF.load_balancer.build_interval, const.HTTPS, const.LB_ALGORITHM_LEAST_CONNECTIONS,
CONF.load_balancer.build_timeout) const.HEALTH_MONITOR_HTTPS)
@decorators.idempotent_id('74611ffb-45f8-4cf5-a28c-7cc37879a27b')
def test_LC_PING_healthmonitor_CRUD(self):
self._test_healthmonitor_CRUD(
const.TCP, const.LB_ALGORITHM_LEAST_CONNECTIONS,
const.HEALTH_MONITOR_PING)
@decorators.idempotent_id('cacec696-10f4-430d-bc9e-2c5f235a3324')
def test_LC_TCP_healthmonitor_CRUD(self):
self._test_healthmonitor_CRUD(
const.TCP, const.LB_ALGORITHM_LEAST_CONNECTIONS,
const.HEALTH_MONITOR_TCP)
@decorators.idempotent_id('6becafb2-1e15-4977-bb29-b08f5728d028')
def test_LC_TLS_healthmonitor_CRUD(self):
self._test_healthmonitor_CRUD(
const.TCP, const.LB_ALGORITHM_LEAST_CONNECTIONS,
const.HEALTH_MONITOR_TLS_HELLO)
@decorators.idempotent_id('fe43ee90-093d-4175-837e-92f803958ef1')
def test_LC_UDP_healthmonitor_CRUD(self):
self._test_healthmonitor_CRUD(
const.UDP, const.LB_ALGORITHM_LEAST_CONNECTIONS,
const.HEALTH_MONITOR_UDP_CONNECT)
@decorators.idempotent_id('a51e09aa-6e44-4c67-a9e4-df70d0e08f96') @decorators.idempotent_id('a51e09aa-6e44-4c67-a9e4-df70d0e08f96')
def test_healthmonitor_CRUD(self): def test_RR_HTTP_healthmonitor_CRUD(self):
self._test_healthmonitor_CRUD(
const.HTTP, const.LB_ALGORITHM_ROUND_ROBIN,
const.HEALTH_MONITOR_HTTP)
@decorators.idempotent_id('fef9eabc-9d1e-4ad2-ae3e-05afc8c84c48')
def test_RR_HTTPS_healthmonitor_CRUD(self):
self._test_healthmonitor_CRUD(
const.HTTPS, const.LB_ALGORITHM_ROUND_ROBIN,
const.HEALTH_MONITOR_HTTPS)
@decorators.idempotent_id('de01b73d-dba0-4426-9e20-9be3a34cfc44')
def test_RR_PING_healthmonitor_CRUD(self):
self._test_healthmonitor_CRUD(
const.TCP, const.LB_ALGORITHM_ROUND_ROBIN,
const.HEALTH_MONITOR_PING)
@decorators.idempotent_id('141a121a-8918-4f9c-a070-eaf8ec29008d')
def test_RR_TCP_healthmonitor_CRUD(self):
self._test_healthmonitor_CRUD(
const.TCP, const.LB_ALGORITHM_ROUND_ROBIN,
const.HEALTH_MONITOR_TCP)
@decorators.idempotent_id('de80d87a-5479-41c6-8c6b-518cc64ec62d')
def test_RR_TLS_healthmonitor_CRUD(self):
self._test_healthmonitor_CRUD(
const.TCP, const.LB_ALGORITHM_ROUND_ROBIN,
const.HEALTH_MONITOR_TLS_HELLO)
@decorators.idempotent_id('265d7359-f0a5-4083-92a8-07cb1787fe36')
def test_RR_UDP_healthmonitor_CRUD(self):
self._test_healthmonitor_CRUD(
const.UDP, const.LB_ALGORITHM_ROUND_ROBIN,
const.HEALTH_MONITOR_UDP_CONNECT)
@decorators.idempotent_id('20a2905f-2b53-4395-9a7f-1ded67ef4408')
def test_SI_HTTP_healthmonitor_CRUD(self):
self._test_healthmonitor_CRUD(
const.HTTP, const.LB_ALGORITHM_SOURCE_IP,
const.HEALTH_MONITOR_HTTP)
@decorators.idempotent_id('8a8cc776-b68f-4761-9bf9-cae566cdc155')
def test_SI_HTTPS_healthmonitor_CRUD(self):
self._test_healthmonitor_CRUD(
const.HTTPS, const.LB_ALGORITHM_SOURCE_IP,
const.HEALTH_MONITOR_HTTPS)
@decorators.idempotent_id('296a445c-5cc8-47a7-ae26-8d548f9712c3')
def test_SI_PING_healthmonitor_CRUD(self):
self._test_healthmonitor_CRUD(
const.TCP, const.LB_ALGORITHM_SOURCE_IP,
const.HEALTH_MONITOR_PING)
@decorators.idempotent_id('94be34b1-4dc6-492b-a777-0587626a785f')
def test_SI_TCP_healthmonitor_CRUD(self):
self._test_healthmonitor_CRUD(
const.TCP, const.LB_ALGORITHM_SOURCE_IP,
const.HEALTH_MONITOR_TCP)
@decorators.idempotent_id('0de0e021-fd3c-4f7c-b959-67d758394fd2')
def test_SI_TLS_healthmonitor_CRUD(self):
self._test_healthmonitor_CRUD(
const.TCP, const.LB_ALGORITHM_SOURCE_IP,
const.HEALTH_MONITOR_TLS_HELLO)
@decorators.idempotent_id('3c79750a-aba6-4838-acbe-bc937ccf2118')
def test_SI_UDP_healthmonitor_CRUD(self):
self._test_healthmonitor_CRUD(
const.UDP, const.LB_ALGORITHM_SOURCE_IP,
const.HEALTH_MONITOR_UDP_CONNECT)
@decorators.idempotent_id('d5e0d1b6-7cce-4592-abce-0ac6bee18818')
def test_SIP_HTTP_healthmonitor_CRUD(self):
try:
self._test_healthmonitor_CRUD(
const.HTTP, const.LB_ALGORITHM_SOURCE_IP_PORT,
const.HEALTH_MONITOR_HTTP)
except exceptions.NotImplemented as e:
message = ("The configured provider driver '{driver}' "
"does not support a feature required for this "
"test.".format(driver=CONF.load_balancer.provider))
if hasattr(e, 'resp_body'):
message = e.resp_body.get('faultstring', message)
raise testtools.TestCase.skipException(message)
@decorators.idempotent_id('e188daac-6db9-4dc2-8ecb-b47932e1984a')
def test_SIP_HTTPS_healthmonitor_CRUD(self):
try:
self._test_healthmonitor_CRUD(
const.HTTPS, const.LB_ALGORITHM_SOURCE_IP_PORT,
const.HEALTH_MONITOR_HTTPS)
except exceptions.NotImplemented as e:
message = ("The configured provider driver '{driver}' "
"does not support a feature required for this "
"test.".format(driver=CONF.load_balancer.provider))
if hasattr(e, 'resp_body'):
message = e.resp_body.get('faultstring', message)
raise testtools.TestCase.skipException(message)
@decorators.idempotent_id('f9458ffd-5af7-402b-9c15-c061bf2eb9ba')
def test_SIP_PING_healthmonitor_CRUD(self):
try:
self._test_healthmonitor_CRUD(
const.TCP, const.LB_ALGORITHM_SOURCE_IP_PORT,
const.HEALTH_MONITOR_PING)
except exceptions.NotImplemented as e:
message = ("The configured provider driver '{driver}' "
"does not support a feature required for this "
"test.".format(driver=CONF.load_balancer.provider))
if hasattr(e, 'resp_body'):
message = e.resp_body.get('faultstring', message)
raise testtools.TestCase.skipException(message)
@decorators.idempotent_id('b4cbe603-0a14-4778-b38c-f330053c86b6')
def test_SIP_TCP_healthmonitor_CRUD(self):
try:
self._test_healthmonitor_CRUD(
const.TCP, const.LB_ALGORITHM_SOURCE_IP_PORT,
const.HEALTH_MONITOR_TCP)
except exceptions.NotImplemented as e:
message = ("The configured provider driver '{driver}' "
"does not support a feature required for this "
"test.".format(driver=CONF.load_balancer.provider))
if hasattr(e, 'resp_body'):
message = e.resp_body.get('faultstring', message)
raise testtools.TestCase.skipException(message)
@decorators.idempotent_id('57714d4c-d584-4345-9ceb-becc3ae37b7f')
def test_SIP_TLS_healthmonitor_CRUD(self):
try:
self._test_healthmonitor_CRUD(
const.TCP, const.LB_ALGORITHM_SOURCE_IP_PORT,
const.HEALTH_MONITOR_TLS_HELLO)
except exceptions.NotImplemented as e:
message = ("The configured provider driver '{driver}' "
"does not support a feature required for this "
"test.".format(driver=CONF.load_balancer.provider))
if hasattr(e, 'resp_body'):
message = e.resp_body.get('faultstring', message)
raise testtools.TestCase.skipException(message)
@decorators.idempotent_id('cc4abf84-361b-409b-b859-9a860d539deb')
def test_SIP_UDP_healthmonitor_CRUD(self):
try:
self._test_healthmonitor_CRUD(
const.UDP, const.LB_ALGORITHM_SOURCE_IP_PORT,
const.HEALTH_MONITOR_UDP_CONNECT)
except exceptions.NotImplemented as e:
message = ("The configured provider driver '{driver}' "
"does not support a feature required for this "
"test.".format(driver=CONF.load_balancer.provider))
if hasattr(e, 'resp_body'):
message = e.resp_body.get('faultstring', message)
raise testtools.TestCase.skipException(message)
def _test_healthmonitor_CRUD(self, pool_protocol, pool_algorithm, hm_type):
"""Tests healthmonitor create, read, update, delete, and member status """Tests healthmonitor create, read, update, delete, and member status
* Create a fully populated healthmonitor. * Create a fully populated healthmonitor.
@ -86,21 +253,48 @@ class HealthMonitorScenarioTest(test_base.LoadBalancerBaseTest):
* Update the healthmonitor. * Update the healthmonitor.
* Delete the healthmonitor. * Delete the healthmonitor.
""" """
if (pool_algorithm == const.LB_ALGORITHM_SOURCE_IP_PORT and not
self.mem_listener_client.is_version_supported(
self.api_version, '2.13')):
raise testtools.TestCase.skipException(
'Skipping this test as load balancing algorithm '
'SOURCE_IP_PORT requires API version 2.13 or newer.')
pool_name = data_utils.rand_name("lb_member_pool1_hm")
pool_kwargs = {
const.NAME: pool_name,
const.PROTOCOL: pool_protocol,
const.LB_ALGORITHM: pool_algorithm,
const.LOADBALANCER_ID: self.lb_id,
}
pool = self.mem_pool_client.create_pool(**pool_kwargs)
self.addClassResourceCleanup(
self.mem_pool_client.cleanup_pool, pool[const.ID],
lb_client=self.mem_lb_client, lb_id=self.lb_id)
waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
self.lb_id, const.PROVISIONING_STATUS,
const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
# Healthmonitor create # Healthmonitor create
hm_name = data_utils.rand_name("lb_member_hm1-CRUD") hm_name = data_utils.rand_name("lb_member_hm1-CRUD")
delay = 3 if hm_type == const.HEALTH_MONITOR_UDP_CONNECT else 2
hm_kwargs = { hm_kwargs = {
const.POOL_ID: self.pool_id, const.POOL_ID: pool[const.ID],
const.NAME: hm_name, const.NAME: hm_name,
const.TYPE: const.HEALTH_MONITOR_HTTP, const.TYPE: hm_type,
const.DELAY: 2, const.DELAY: delay,
const.TIMEOUT: 2, const.TIMEOUT: 2,
const.MAX_RETRIES: 2, const.MAX_RETRIES: 2,
const.MAX_RETRIES_DOWN: 2, const.MAX_RETRIES_DOWN: 2,
const.HTTP_METHOD: const.GET,
const.URL_PATH: '/',
const.EXPECTED_CODES: '200',
const.ADMIN_STATE_UP: True, const.ADMIN_STATE_UP: True,
} }
if hm_type == const.HEALTH_MONITOR_HTTP:
hm_kwargs.update({const.HTTP_METHOD: const.GET,
const.URL_PATH: '/',
const.EXPECTED_CODES: '200'})
hm = self.mem_healthmonitor_client.create_healthmonitor(**hm_kwargs) hm = self.mem_healthmonitor_client.create_healthmonitor(**hm_kwargs)
self.addCleanup( self.addCleanup(
@ -126,8 +320,10 @@ class HealthMonitorScenarioTest(test_base.LoadBalancerBaseTest):
equal_items = [const.NAME, const.TYPE, const.DELAY, const.TIMEOUT, equal_items = [const.NAME, const.TYPE, const.DELAY, const.TIMEOUT,
const.MAX_RETRIES, const.MAX_RETRIES_DOWN, const.MAX_RETRIES, const.MAX_RETRIES_DOWN,
const.HTTP_METHOD, const.URL_PATH, const.EXPECTED_CODES,
const.ADMIN_STATE_UP] const.ADMIN_STATE_UP]
if hm_type == const.HEALTH_MONITOR_HTTP:
equal_items = equal_items + [const.HTTP_METHOD, const.URL_PATH,
const.EXPECTED_CODES]
for item in equal_items: for item in equal_items:
self.assertEqual(hm_kwargs[item], hm[item]) self.assertEqual(hm_kwargs[item], hm[item])
@ -140,11 +336,13 @@ class HealthMonitorScenarioTest(test_base.LoadBalancerBaseTest):
const.TIMEOUT: hm_kwargs[const.TIMEOUT] + 1, const.TIMEOUT: hm_kwargs[const.TIMEOUT] + 1,
const.MAX_RETRIES: hm_kwargs[const.MAX_RETRIES] + 1, const.MAX_RETRIES: hm_kwargs[const.MAX_RETRIES] + 1,
const.MAX_RETRIES_DOWN: hm_kwargs[const.MAX_RETRIES_DOWN] + 1, const.MAX_RETRIES_DOWN: hm_kwargs[const.MAX_RETRIES_DOWN] + 1,
const.HTTP_METHOD: const.POST,
const.URL_PATH: '/test',
const.EXPECTED_CODES: '201,202',
const.ADMIN_STATE_UP: not hm_kwargs[const.ADMIN_STATE_UP], const.ADMIN_STATE_UP: not hm_kwargs[const.ADMIN_STATE_UP],
} }
if hm_type == const.HEALTH_MONITOR_HTTP:
hm_update_kwargs.update({const.HTTP_METHOD: const.POST,
const.URL_PATH: '/test',
const.EXPECTED_CODES: '201,202'})
hm = self.mem_healthmonitor_client.update_healthmonitor( hm = self.mem_healthmonitor_client.update_healthmonitor(
hm[const.ID], **hm_update_kwargs) hm[const.ID], **hm_update_kwargs)
@ -163,8 +361,10 @@ class HealthMonitorScenarioTest(test_base.LoadBalancerBaseTest):
# Test changed items # Test changed items
equal_items = [const.NAME, const.DELAY, const.TIMEOUT, equal_items = [const.NAME, const.DELAY, const.TIMEOUT,
const.MAX_RETRIES, const.MAX_RETRIES_DOWN, const.MAX_RETRIES, const.MAX_RETRIES_DOWN,
const.HTTP_METHOD, const.URL_PATH, const.EXPECTED_CODES,
const.ADMIN_STATE_UP] const.ADMIN_STATE_UP]
if hm_type == const.HEALTH_MONITOR_HTTP:
equal_items = equal_items + [const.HTTP_METHOD, const.URL_PATH,
const.EXPECTED_CODES]
for item in equal_items: for item in equal_items:
self.assertEqual(hm_update_kwargs[item], hm[item]) self.assertEqual(hm_update_kwargs[item], hm[item])

View File

@ -11,12 +11,12 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import testtools
import requests
from tempest import config from tempest import config
from tempest.lib.common.utils import data_utils from tempest.lib.common.utils import data_utils
from tempest.lib import decorators from tempest.lib import decorators
from tempest.lib import exceptions as tempest_exceptions
from octavia_tempest_plugin.common import constants as const from octavia_tempest_plugin.common import constants as const
from octavia_tempest_plugin.tests import test_base from octavia_tempest_plugin.tests import test_base
@ -69,19 +69,17 @@ class IPv6TrafficOperationsScenarioTest(
cls.listener_ids = {} cls.listener_ids = {}
cls.pool_ids = {} cls.pool_ids = {}
cls.protocol = const.HTTP
lb_feature_enabled = CONF.loadbalancer_feature_enabled
if not lb_feature_enabled.l7_protocol_enabled:
cls.protocol = lb_feature_enabled.l4_protocol
# Don't use same ports for HTTP/l4_protocol and UDP because some # Don't use same ports for HTTP/l4_protocol and UDP because some
# releases (<=train) don't support it # releases (<=train) don't support it
cls._listener_pool_create(const.HTTP, 80) cls._listener_pool_create(const.HTTP, 80)
cls._listener_pool_create(const.TCP, 81)
cls._listener_pool_create(const.UDP, 8080) cls._listener_pool_create(const.UDP, 8080)
@classmethod @classmethod
def _listener_pool_create(cls, protocol, protocol_port): def _listener_pool_create(cls, protocol, protocol_port,
algorithm=const.LB_ALGORITHM_ROUND_ROBIN):
if (protocol == const.UDP and if (protocol == const.UDP and
not cls.mem_listener_client.is_version_supported( not cls.mem_listener_client.is_version_supported(
cls.api_version, '2.1')): cls.api_version, '2.1')):
@ -103,8 +101,7 @@ class IPv6TrafficOperationsScenarioTest(
# haproxy process and use haproxy>=1.8: # haproxy process and use haproxy>=1.8:
const.CONNECTION_LIMIT: 200, const.CONNECTION_LIMIT: 200,
} }
listener = cls.mem_listener_client.create_listener( listener = cls.mem_listener_client.create_listener(**listener_kwargs)
**listener_kwargs)
cls.listener_ids[protocol] = listener[const.ID] cls.listener_ids[protocol] = listener[const.ID]
cls.addClassResourceCleanup( cls.addClassResourceCleanup(
cls.mem_listener_client.cleanup_listener, cls.mem_listener_client.cleanup_listener,
@ -121,7 +118,7 @@ class IPv6TrafficOperationsScenarioTest(
pool_kwargs = { pool_kwargs = {
const.NAME: pool_name, const.NAME: pool_name,
const.PROTOCOL: protocol, const.PROTOCOL: protocol,
const.LB_ALGORITHM: cls.lb_algorithm, const.LB_ALGORITHM: algorithm,
const.LISTENER_ID: cls.listener_ids[protocol], const.LISTENER_ID: cls.listener_ids[protocol],
} }
pool = cls.mem_pool_client.create_pool(**pool_kwargs) pool = cls.mem_pool_client.create_pool(**pool_kwargs)
@ -137,8 +134,8 @@ class IPv6TrafficOperationsScenarioTest(
CONF.load_balancer.build_interval, CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout) CONF.load_balancer.build_timeout)
def _test_ipv6_vip_mixed_ipv4_ipv6_members_traffic(self, protocol, def _test_ipv6_vip_mixed_ipv4_ipv6_members_traffic(
protocol_port): self, protocol, protocol_port, persistent=True):
"""Tests traffic through a loadbalancer with IPv4 and IPv6 members. """Tests traffic through a loadbalancer with IPv4 and IPv6 members.
* Set up members on a loadbalancer. * Set up members on a loadbalancer.
@ -156,8 +153,7 @@ class IPv6TrafficOperationsScenarioTest(
if self.lb_member_1_subnet: if self.lb_member_1_subnet:
member1_kwargs[const.SUBNET_ID] = self.lb_member_1_subnet[const.ID] member1_kwargs[const.SUBNET_ID] = self.lb_member_1_subnet[const.ID]
member1 = self.mem_member_client.create_member( member1 = self.mem_member_client.create_member(**member1_kwargs)
**member1_kwargs)
self.addCleanup( self.addCleanup(
self.mem_member_client.cleanup_member, self.mem_member_client.cleanup_member,
member1[const.ID], pool_id=self.pool_ids[protocol], member1[const.ID], pool_id=self.pool_ids[protocol],
@ -181,8 +177,7 @@ class IPv6TrafficOperationsScenarioTest(
member2_kwargs[const.SUBNET_ID] = ( member2_kwargs[const.SUBNET_ID] = (
self.lb_member_2_ipv6_subnet[const.ID]) self.lb_member_2_ipv6_subnet[const.ID])
member2 = self.mem_member_client.create_member( member2 = self.mem_member_client.create_member(**member2_kwargs)
**member2_kwargs)
self.addCleanup( self.addCleanup(
self.mem_member_client.cleanup_member, self.mem_member_client.cleanup_member,
member2[const.ID], pool_id=self.pool_ids[protocol], member2[const.ID], pool_id=self.pool_ids[protocol],
@ -196,11 +191,16 @@ class IPv6TrafficOperationsScenarioTest(
# Send some traffic # Send some traffic
self.check_members_balanced(self.lb_vip_address, self.check_members_balanced(self.lb_vip_address,
protocol_port=protocol_port, protocol_port=protocol_port,
protocol=protocol) protocol=protocol, persistent=persistent)
@decorators.idempotent_id('219ac17d-c5c1-4e7e-a9d5-0764d7ce7746') @decorators.idempotent_id('219ac17d-c5c1-4e7e-a9d5-0764d7ce7746')
def test_ipv6_vip_mixed_ipv4_ipv6_members_traffic(self): def test_http_ipv6_vip_mixed_ipv4_ipv6_members_traffic(self):
self._test_ipv6_vip_mixed_ipv4_ipv6_members_traffic(self.protocol, 80) self._test_ipv6_vip_mixed_ipv4_ipv6_members_traffic(const.HTTP, 80)
@decorators.idempotent_id('a4e8d5d1-03d5-4252-9300-e89b9b2bdafc')
def test_tcp_ipv6_vip_mixed_ipv4_ipv6_members_traffic(self):
self._test_ipv6_vip_mixed_ipv4_ipv6_members_traffic(const.TCP, 81,
persistent=False)
@decorators.idempotent_id('c468434d-bc84-4bfa-825f-d4761daa0d76') @decorators.idempotent_id('c468434d-bc84-4bfa-825f-d4761daa0d76')
# Skipping test for amphora driver until "UDP load balancers cannot mix # Skipping test for amphora driver until "UDP load balancers cannot mix
@ -218,7 +218,8 @@ class IPv6TrafficOperationsScenarioTest(
self._test_ipv6_vip_mixed_ipv4_ipv6_members_traffic(const.UDP, 8080) self._test_ipv6_vip_mixed_ipv4_ipv6_members_traffic(const.UDP, 8080)
def _test_ipv6_vip_ipv6_members_traffic(self, protocol, protocol_port): def _test_ipv6_vip_ipv6_members_traffic(self, protocol, protocol_port,
persistent=True):
"""Tests traffic through a loadbalancer with IPv6 members. """Tests traffic through a loadbalancer with IPv6 members.
* Set up members on a loadbalancer. * Set up members on a loadbalancer.
@ -238,8 +239,7 @@ class IPv6TrafficOperationsScenarioTest(
member1_kwargs[const.SUBNET_ID] = ( member1_kwargs[const.SUBNET_ID] = (
self.lb_member_1_ipv6_subnet[const.ID]) self.lb_member_1_ipv6_subnet[const.ID])
member1 = self.mem_member_client.create_member( member1 = self.mem_member_client.create_member(**member1_kwargs)
**member1_kwargs)
self.addCleanup( self.addCleanup(
self.mem_member_client.cleanup_member, self.mem_member_client.cleanup_member,
member1[const.ID], pool_id=self.pool_ids[protocol], member1[const.ID], pool_id=self.pool_ids[protocol],
@ -263,8 +263,7 @@ class IPv6TrafficOperationsScenarioTest(
member2_kwargs[const.SUBNET_ID] = ( member2_kwargs[const.SUBNET_ID] = (
self.lb_member_2_ipv6_subnet[const.ID]) self.lb_member_2_ipv6_subnet[const.ID])
member2 = self.mem_member_client.create_member( member2 = self.mem_member_client.create_member(**member2_kwargs)
**member2_kwargs)
self.addCleanup( self.addCleanup(
self.mem_member_client.cleanup_member, self.mem_member_client.cleanup_member,
member2[const.ID], pool_id=self.pool_ids[protocol], member2[const.ID], pool_id=self.pool_ids[protocol],
@ -278,11 +277,16 @@ class IPv6TrafficOperationsScenarioTest(
# Send some traffic # Send some traffic
self.check_members_balanced(self.lb_vip_address, self.check_members_balanced(self.lb_vip_address,
protocol_port=protocol_port, protocol_port=protocol_port,
protocol=protocol) protocol=protocol, persistent=persistent)
@decorators.idempotent_id('dd75f41a-5b29-47ad-963d-3434f1056ca3') @decorators.idempotent_id('dd75f41a-5b29-47ad-963d-3434f1056ca3')
def test_ipv6_vip_ipv6_members_traffic(self): def test_http_ipv6_vip_ipv6_members_traffic(self):
self._test_ipv6_vip_ipv6_members_traffic(self.protocol, 80) self._test_ipv6_vip_ipv6_members_traffic(const.HTTP, 80)
@decorators.idempotent_id('9bb93619-14cb-45d9-ad60-2f80c201486a')
def test_tcp_ipv6_vip_ipv6_members_traffic(self):
self._test_ipv6_vip_ipv6_members_traffic(const.TCP, 81,
persistent=False)
@decorators.idempotent_id('26317013-a9b5-4a00-a993-d4c55b764e40') @decorators.idempotent_id('26317013-a9b5-4a00-a993-d4c55b764e40')
def test_ipv6_vip_ipv6_members_udp_traffic(self): def test_ipv6_vip_ipv6_members_udp_traffic(self):
@ -293,8 +297,68 @@ class IPv6TrafficOperationsScenarioTest(
self._test_ipv6_vip_ipv6_members_traffic(const.UDP, 8080) self._test_ipv6_vip_ipv6_members_traffic(const.UDP, 8080)
@decorators.idempotent_id('9bead31b-0760-4c8f-b70a-f758fc5edd6a')
def test_ipv6_http_LC_listener_with_allowed_cidrs(self):
self._test_listener_with_allowed_cidrs(
const.HTTP, 90, const.LB_ALGORITHM_LEAST_CONNECTIONS)
@decorators.idempotent_id('843a13f7-e00f-4151-8817-b5395eb69b52')
def test_ipv6_tcp_LC_listener_with_allowed_cidrs(self):
self._test_listener_with_allowed_cidrs(
const.TCP, 91, const.LB_ALGORITHM_LEAST_CONNECTIONS)
@decorators.idempotent_id('cc0d55b1-87e8-4a87-bf50-66299947a469')
def test_ipv6_udp_LC_listener_with_allowed_cidrs(self):
self._test_listener_with_allowed_cidrs(
const.UDP, 92, const.LB_ALGORITHM_LEAST_CONNECTIONS)
@decorators.idempotent_id('84b23f68-4bc3-49e5-8372-60c25fe69613') @decorators.idempotent_id('84b23f68-4bc3-49e5-8372-60c25fe69613')
def test_listener_with_allowed_cidrs(self): def test_ipv6_http_RR_listener_with_allowed_cidrs(self):
self._test_listener_with_allowed_cidrs(
const.HTTP, 93, const.LB_ALGORITHM_ROUND_ROBIN)
@decorators.idempotent_id('52c07510-5755-44a3-9231-64c9cbb4bbd4')
def test_ipv6_tcp_RR_listener_with_allowed_cidrs(self):
self._test_listener_with_allowed_cidrs(
const.TCP, 94, const.LB_ALGORITHM_ROUND_ROBIN)
@decorators.idempotent_id('df0417d9-dc72-4bb5-b3ce-1e2558a3c4a9')
def test_ipv6_udp_RR_listener_with_allowed_cidrs(self):
self._test_listener_with_allowed_cidrs(
const.UDP, 95, const.LB_ALGORITHM_ROUND_ROBIN)
@decorators.idempotent_id('d1256195-3d85-4ffd-bda3-1c0ab78b8ce1')
def test_ipv6_http_SI_listener_with_allowed_cidrs(self):
self._test_listener_with_allowed_cidrs(
const.HTTP, 96, const.LB_ALGORITHM_SOURCE_IP)
@decorators.idempotent_id('bf8504b6-b95a-4f8a-9032-ab432db46eec')
def test_ipv6_tcp_SI_listener_with_allowed_cidrs(self):
self._test_listener_with_allowed_cidrs(
const.TCP, 97, const.LB_ALGORITHM_SOURCE_IP)
@decorators.idempotent_id('ce75bf28-5288-4821-a603-460e602de8b9')
def test_ipv6_udp_SI_listener_with_allowed_cidrs(self):
self._test_listener_with_allowed_cidrs(
const.UDP, 98, const.LB_ALGORITHM_SOURCE_IP)
@decorators.idempotent_id('c11768f1-19b4-48cc-99a5-0737379b1957')
def test_ipv6_http_SIP_listener_with_allowed_cidrs(self):
self._test_listener_with_allowed_cidrs(
const.HTTP, 99, const.LB_ALGORITHM_SOURCE_IP_PORT)
@decorators.idempotent_id('fcfe2ab1-2c36-4793-a926-1fec589a9a2a')
def test_ipv6_tcp_SIP_listener_with_allowed_cidrs(self):
self._test_listener_with_allowed_cidrs(
const.TCP, 100, const.LB_ALGORITHM_SOURCE_IP_PORT)
@decorators.idempotent_id('80f31bc1-819e-4d9e-8820-bf3e28600540')
def test_ipv6_udp_SIP_listener_with_allowed_cidrs(self):
self._test_listener_with_allowed_cidrs(
const.UDP, 101, const.LB_ALGORITHM_SOURCE_IP_PORT)
def _test_listener_with_allowed_cidrs(self, protocol, protocol_port,
algorithm):
"""Tests traffic through a loadbalancer with allowed CIDRs set. """Tests traffic through a loadbalancer with allowed CIDRs set.
* Set up listener with allowed CIDRS (allow all) on a loadbalancer. * Set up listener with allowed CIDRS (allow all) on a loadbalancer.
@ -312,11 +376,10 @@ class IPv6TrafficOperationsScenarioTest(
'or newer.') 'or newer.')
listener_name = data_utils.rand_name("lb_member_listener2_cidrs") listener_name = data_utils.rand_name("lb_member_listener2_cidrs")
listener_port = 8080
listener_kwargs = { listener_kwargs = {
const.NAME: listener_name, const.NAME: listener_name,
const.PROTOCOL: self.protocol, const.PROTOCOL: protocol,
const.PROTOCOL_PORT: listener_port, const.PROTOCOL_PORT: protocol_port,
const.LOADBALANCER_ID: self.lb_id, const.LOADBALANCER_ID: self.lb_id,
const.ALLOWED_CIDRS: ['::/0'] const.ALLOWED_CIDRS: ['::/0']
} }
@ -336,11 +399,25 @@ class IPv6TrafficOperationsScenarioTest(
pool_name = data_utils.rand_name("lb_member_pool3_cidrs") pool_name = data_utils.rand_name("lb_member_pool3_cidrs")
pool_kwargs = { pool_kwargs = {
const.NAME: pool_name, const.NAME: pool_name,
const.PROTOCOL: self.protocol, const.PROTOCOL: protocol,
const.LB_ALGORITHM: self.lb_algorithm, const.LB_ALGORITHM: algorithm,
const.LISTENER_ID: listener_id, const.LISTENER_ID: listener_id,
} }
pool = self.mem_pool_client.create_pool(**pool_kwargs) # This is a special case as the reference driver does not support
# SOURCE-IP-PORT. Since it runs with not_implemented_is_error, we must
# handle this test case special.
try:
pool = self.mem_pool_client.create_pool(**pool_kwargs)
except tempest_exceptions.NotImplemented as e:
if algorithm != const.LB_ALGORITHM_SOURCE_IP_PORT:
raise
message = ("The configured provider driver '{driver}' "
"does not support a feature required for this "
"test.".format(driver=CONF.load_balancer.provider))
if hasattr(e, 'resp_body'):
message = e.resp_body.get('faultstring', message)
raise testtools.TestCase.skipException(message)
pool_id = pool[const.ID] pool_id = pool[const.ID]
self.addCleanup( self.addCleanup(
self.mem_pool_client.cleanup_pool, self.mem_pool_client.cleanup_pool,
@ -359,7 +436,7 @@ class IPv6TrafficOperationsScenarioTest(
const.POOL_ID: pool_id, const.POOL_ID: pool_id,
const.NAME: member1_name, const.NAME: member1_name,
const.ADMIN_STATE_UP: True, const.ADMIN_STATE_UP: True,
const.ADDRESS: self.webserver1_ip, const.ADDRESS: self.webserver1_ipv6,
const.PROTOCOL_PORT: 80, const.PROTOCOL_PORT: 80,
} }
if self.lb_member_1_subnet: if self.lb_member_1_subnet:
@ -383,7 +460,7 @@ class IPv6TrafficOperationsScenarioTest(
const.POOL_ID: pool_id, const.POOL_ID: pool_id,
const.NAME: member2_name, const.NAME: member2_name,
const.ADMIN_STATE_UP: True, const.ADMIN_STATE_UP: True,
const.ADDRESS: self.webserver2_ip, const.ADDRESS: self.webserver2_ipv6,
const.PROTOCOL_PORT: 80, const.PROTOCOL_PORT: 80,
} }
if self.lb_member_2_subnet: if self.lb_member_2_subnet:
@ -401,8 +478,13 @@ class IPv6TrafficOperationsScenarioTest(
CONF.load_balancer.check_timeout) CONF.load_balancer.check_timeout)
# Send some traffic # Send some traffic
members = 2
if algorithm == const.LB_ALGORITHM_SOURCE_IP:
members = 1
self.check_members_balanced( self.check_members_balanced(
self.lb_vip_address, protocol_port=listener_port) self.lb_vip_address, protocol=protocol,
protocol_port=protocol_port, persistent=False,
traffic_member_count=members)
listener_kwargs = { listener_kwargs = {
const.LISTENER_ID: listener_id, const.LISTENER_ID: listener_id,
@ -415,21 +497,27 @@ class IPv6TrafficOperationsScenarioTest(
CONF.load_balancer.build_interval, CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout) CONF.load_balancer.build_timeout)
url_for_vip = 'http://[{}]:{}/'.format(self.lb_vip_address,
listener_port)
# NOTE: Before we start with the consistent response check, we must # NOTE: Before we start with the consistent response check, we must
# wait until Neutron completes the SG update. # wait until Neutron completes the SG update.
# See https://bugs.launchpad.net/neutron/+bug/1866353. # See https://bugs.launchpad.net/neutron/+bug/1866353.
def expect_conn_error(url): def expect_timeout_error(address, protocol, protocol_port):
try: try:
requests.Session().get(url) self.make_request(address, protocol=protocol,
except requests.exceptions.ConnectionError: protocol_port=protocol_port)
except tempest_exceptions.TimeoutException:
return True return True
return False return False
waiters.wait_until_true(expect_conn_error, url=url_for_vip) waiters.wait_until_true(
expect_timeout_error, address=self.lb_vip_address,
protocol=protocol, protocol_port=protocol_port)
# Assert that the server is consistently unavailable # Assert that the server is consistently unavailable
if protocol == const.UDP:
url_for_vip = 'udp://[{}]:{}/'.format(self.lb_vip_address,
protocol_port)
else:
url_for_vip = 'http://[{}]:{}/'.format(self.lb_vip_address,
protocol_port)
self.assertConsistentResponse( self.assertConsistentResponse(
(None, None), url_for_vip, repeat=3, conn_error=True) (None, None), url_for_vip, repeat=3, expect_connection_error=True)

View File

@ -28,15 +28,6 @@ CONF = config.CONF
class L7PolicyScenarioTest(test_base.LoadBalancerBaseTest): class L7PolicyScenarioTest(test_base.LoadBalancerBaseTest):
@classmethod
def skip_checks(cls):
super(L7PolicyScenarioTest, cls).skip_checks()
if not CONF.loadbalancer_feature_enabled.l7_protocol_enabled:
raise cls.skipException(
'[loadbalancer-feature-enabled] '
'"l7_protocol_enabled" is set to False in the Tempest '
'configuration. L7 Scenario tests will be skipped.')
@classmethod @classmethod
def resource_setup(cls): def resource_setup(cls):
"""Setup resources needed by the tests.""" """Setup resources needed by the tests."""
@ -84,7 +75,7 @@ class L7PolicyScenarioTest(test_base.LoadBalancerBaseTest):
pool_kwargs = { pool_kwargs = {
const.NAME: pool_name, const.NAME: pool_name,
const.PROTOCOL: const.HTTP, const.PROTOCOL: const.HTTP,
const.LB_ALGORITHM: cls.lb_algorithm, const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
const.LOADBALANCER_ID: cls.lb_id, const.LOADBALANCER_ID: cls.lb_id,
} }
pool = cls.mem_pool_client.create_pool(**pool_kwargs) pool = cls.mem_pool_client.create_pool(**pool_kwargs)

View File

@ -28,15 +28,6 @@ CONF = config.CONF
class L7RuleScenarioTest(test_base.LoadBalancerBaseTest): class L7RuleScenarioTest(test_base.LoadBalancerBaseTest):
@classmethod
def skip_checks(cls):
super(L7RuleScenarioTest, cls).skip_checks()
if not CONF.loadbalancer_feature_enabled.l7_protocol_enabled:
raise cls.skipException(
'[loadbalancer-feature-enabled] '
'"l7_protocol_enabled" is set to False in the Tempest '
'configuration. L7 Scenario tests will be skipped.')
@classmethod @classmethod
def resource_setup(cls): def resource_setup(cls):
"""Setup resources needed by the tests.""" """Setup resources needed by the tests."""

View File

@ -12,6 +12,7 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import testtools
from uuid import UUID from uuid import UUID
from dateutil import parser from dateutil import parser
@ -19,6 +20,7 @@ from oslo_utils import strutils
from tempest import config from tempest import config
from tempest.lib.common.utils import data_utils from tempest.lib.common.utils import data_utils
from tempest.lib import decorators from tempest.lib import decorators
from tempest.lib import exceptions
from octavia_tempest_plugin.common import constants as const from octavia_tempest_plugin.common import constants as const
from octavia_tempest_plugin.tests import test_base from octavia_tempest_plugin.tests import test_base
@ -51,23 +53,30 @@ class ListenerScenarioTest(test_base.LoadBalancerBaseTest):
const.ACTIVE, const.ACTIVE,
CONF.load_balancer.lb_build_interval, CONF.load_balancer.lb_build_interval,
CONF.load_balancer.lb_build_timeout) CONF.load_balancer.lb_build_timeout)
cls.protocol = const.HTTP
lb_feature_enabled = CONF.loadbalancer_feature_enabled cls.allowed_cidrs = ['192.0.1.0/24']
if not lb_feature_enabled.l7_protocol_enabled: if CONF.load_balancer.test_with_ipv6:
cls.protocol = lb_feature_enabled.l4_protocol cls.allowed_cidrs = ['2001:db8:a0b:12f0::/64']
def _create_pools(cls, protocol, algorithm):
if (algorithm == const.LB_ALGORITHM_SOURCE_IP_PORT and not
cls.mem_listener_client.is_version_supported(
cls.api_version, '2.13')):
raise testtools.TestCase.skipException(
'Skipping this test as load balancing algorithm '
'SOURCE_IP_PORT requires API version 2.13 or newer.')
pool1_name = data_utils.rand_name("lb_member_pool1_listener") pool1_name = data_utils.rand_name("lb_member_pool1_listener")
pool1_kwargs = { pool1_kwargs = {
const.NAME: pool1_name, const.NAME: pool1_name,
const.PROTOCOL: cls.protocol, const.PROTOCOL: protocol,
const.LB_ALGORITHM: cls.lb_algorithm, const.LB_ALGORITHM: algorithm,
const.LOADBALANCER_ID: cls.lb_id, const.LOADBALANCER_ID: cls.lb_id,
} }
pool1 = cls.mem_pool_client.create_pool(**pool1_kwargs) pool1 = cls.mem_pool_client.create_pool(**pool1_kwargs)
cls.pool1_id = pool1[const.ID] pool1_id = pool1[const.ID]
cls.addClassResourceCleanup( cls.addClassResourceCleanup(
cls.mem_pool_client.cleanup_pool, cls.mem_pool_client.cleanup_pool, pool1_id,
cls.pool1_id,
lb_client=cls.mem_lb_client, lb_id=cls.lb_id) lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer, waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
@ -79,15 +88,14 @@ class ListenerScenarioTest(test_base.LoadBalancerBaseTest):
pool2_name = data_utils.rand_name("lb_member_pool2_listener") pool2_name = data_utils.rand_name("lb_member_pool2_listener")
pool2_kwargs = { pool2_kwargs = {
const.NAME: pool2_name, const.NAME: pool2_name,
const.PROTOCOL: cls.protocol, const.PROTOCOL: protocol,
const.LB_ALGORITHM: cls.lb_algorithm, const.LB_ALGORITHM: algorithm,
const.LOADBALANCER_ID: cls.lb_id, const.LOADBALANCER_ID: cls.lb_id,
} }
pool2 = cls.mem_pool_client.create_pool(**pool2_kwargs) pool2 = cls.mem_pool_client.create_pool(**pool2_kwargs)
cls.pool2_id = pool2[const.ID] pool2_id = pool2[const.ID]
cls.addClassResourceCleanup( cls.addClassResourceCleanup(
cls.mem_pool_client.cleanup_pool, cls.mem_pool_client.cleanup_pool, pool2_id,
cls.pool2_id,
lb_client=cls.mem_lb_client, lb_id=cls.lb_id) lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer, waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
@ -95,13 +103,128 @@ class ListenerScenarioTest(test_base.LoadBalancerBaseTest):
const.ACTIVE, const.ACTIVE,
CONF.load_balancer.build_interval, CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout) CONF.load_balancer.build_timeout)
return pool1_id, pool2_id
cls.allowed_cidrs = ['192.0.1.0/24'] # Note: TERMINATED_HTTPS listeners are covered in a different
if CONF.load_balancer.test_with_ipv6: # tempest scenario suite due to the need for key-manager services
cls.allowed_cidrs = ['2001:db8:a0b:12f0::/64']
@decorators.idempotent_id('ecdd65b0-cf8f-48ee-972b-2f09425472f1')
def test_http_least_connections_listener_CRUD(self):
pool1, pool2 = self._create_pools(const.HTTP,
const.LB_ALGORITHM_LEAST_CONNECTIONS)
self._test_listener_CRUD(const.HTTP, pool1, pool2)
@decorators.idempotent_id('0681b2ac-8301-4e6c-bf29-b35244864af3')
def test_tcp_least_connections_listener_CRUD(self):
pool1, pool2 = self._create_pools(const.TCP,
const.LB_ALGORITHM_LEAST_CONNECTIONS)
self._test_listener_CRUD(const.TCP, pool1, pool2)
@decorators.idempotent_id('27a2ba7d-6147-46e4-886a-47c1ba63bf89')
# Skipping due to a status update bug in the amphora driver.
@decorators.skip_because(
bug='2007979',
bug_type='storyboard',
condition=CONF.load_balancer.provider in const.AMPHORA_PROVIDERS)
def test_udp_least_connections_listener_CRUD(self):
pool1, pool2 = self._create_pools(const.UDP,
const.LB_ALGORITHM_LEAST_CONNECTIONS)
self._test_listener_CRUD(const.UDP, pool1, pool2)
@decorators.idempotent_id('4a874014-b7d1-49a4-ac9a-2400b3434700') @decorators.idempotent_id('4a874014-b7d1-49a4-ac9a-2400b3434700')
def test_listener_CRUD(self): def test_http_round_robin_listener_CRUD(self):
pool1, pool2 = self._create_pools(const.HTTP,
const.LB_ALGORITHM_ROUND_ROBIN)
self._test_listener_CRUD(const.HTTP, pool1, pool2)
@decorators.idempotent_id('2b888812-d916-44f0-b620-8d83dbb45975')
def test_tcp_round_robin_listener_CRUD(self):
pool1, pool2 = self._create_pools(const.TCP,
const.LB_ALGORITHM_ROUND_ROBIN)
self._test_listener_CRUD(const.TCP, pool1, pool2)
@decorators.idempotent_id('dd913f74-c6a6-4998-9bed-095babb9cb47')
# Skipping due to a status update bug in the amphora driver.
@decorators.skip_because(
bug='2007979',
bug_type='storyboard',
condition=CONF.load_balancer.provider in const.AMPHORA_PROVIDERS)
def test_udp_round_robin_listener_CRUD(self):
pool1, pool2 = self._create_pools(const.UDP,
const.LB_ALGORITHM_ROUND_ROBIN)
self._test_listener_CRUD(const.UDP, pool1, pool2)
@decorators.idempotent_id('b2ae8604-7a4f-477c-9658-fac27734671a')
def test_http_source_ip_listener_CRUD(self):
pool1, pool2 = self._create_pools(const.HTTP,
const.LB_ALGORITHM_SOURCE_IP)
self._test_listener_CRUD(const.HTTP, pool1, pool2)
@decorators.idempotent_id('0ad3fdee-e8c2-4c44-9690-b8a838fbc7a5')
def test_tcp_source_ip_listener_CRUD(self):
pool1, pool2 = self._create_pools(const.TCP,
const.LB_ALGORITHM_SOURCE_IP)
self._test_listener_CRUD(const.TCP, pool1, pool2)
@decorators.idempotent_id('7830aba8-12ca-40d9-9d9b-a63f7a43b287')
# Skipping due to a status update bug in the amphora driver.
@decorators.skip_because(
bug='2007979',
bug_type='storyboard',
condition=CONF.load_balancer.provider in const.AMPHORA_PROVIDERS)
def test_udp_source_ip_listener_CRUD(self):
pool1, pool2 = self._create_pools(const.UDP,
const.LB_ALGORITHM_SOURCE_IP)
self._test_listener_CRUD(const.UDP, pool1, pool2)
@decorators.idempotent_id('807a421e-5e99-4556-b0eb-512d39b25eac')
def test_http_source_ip_port_listener_CRUD(self):
try:
pool1, pool2 = self._create_pools(
const.HTTP, const.LB_ALGORITHM_SOURCE_IP_PORT)
self._test_listener_CRUD(const.HTTP, pool1, pool2)
except exceptions.NotImplemented as e:
message = ("The configured provider driver '{driver}' "
"does not support a feature required for this "
"test.".format(driver=CONF.load_balancer.provider))
if hasattr(e, 'resp_body'):
message = e.resp_body.get('faultstring', message)
raise testtools.TestCase.skipException(message)
@decorators.idempotent_id('6211f8ad-622d-404d-b199-8c2eb55ab340')
def test_tcp_source_ip_port_listener_CRUD(self):
try:
pool1, pool2 = self._create_pools(
const.TCP, const.LB_ALGORITHM_SOURCE_IP_PORT)
self._test_listener_CRUD(const.TCP, pool1, pool2)
except exceptions.NotImplemented as e:
message = ("The configured provider driver '{driver}' "
"does not support a feature required for this "
"test.".format(driver=CONF.load_balancer.provider))
if hasattr(e, 'resp_body'):
message = e.resp_body.get('faultstring', message)
raise testtools.TestCase.skipException(message)
@decorators.idempotent_id('3f9a2de9-5012-437d-a907-a25e1f68ccfb')
# Skipping due to a status update bug in the amphora driver.
@decorators.skip_because(
bug='2007979',
bug_type='storyboard',
condition=CONF.load_balancer.provider in const.AMPHORA_PROVIDERS)
def test_udp_source_ip_port_listener_CRUD(self):
try:
pool1, pool2 = self._create_pools(
const.UDP, const.LB_ALGORITHM_SOURCE_IP_PORT)
self._test_listener_CRUD(const.UDP, pool1, pool2)
except exceptions.NotImplemented as e:
message = ("The configured provider driver '{driver}' "
"does not support a feature required for this "
"test.".format(driver=CONF.load_balancer.provider))
if hasattr(e, 'resp_body'):
message = e.resp_body.get('faultstring', message)
raise testtools.TestCase.skipException(message)
def _test_listener_CRUD(self, protocol, pool1_id, pool2_id):
"""Tests listener create, read, update, delete """Tests listener create, read, update, delete
* Create a fully populated listener. * Create a fully populated listener.
@ -117,19 +240,23 @@ class ListenerScenarioTest(test_base.LoadBalancerBaseTest):
const.NAME: listener_name, const.NAME: listener_name,
const.DESCRIPTION: listener_description, const.DESCRIPTION: listener_description,
const.ADMIN_STATE_UP: False, const.ADMIN_STATE_UP: False,
const.PROTOCOL: self.protocol, const.PROTOCOL: protocol,
const.PROTOCOL_PORT: 80, const.PROTOCOL_PORT: 80,
const.LOADBALANCER_ID: self.lb_id, const.LOADBALANCER_ID: self.lb_id,
const.CONNECTION_LIMIT: 200, const.CONNECTION_LIMIT: 200,
const.INSERT_HEADERS: { const.DEFAULT_POOL_ID: pool1_id,
const.X_FORWARDED_FOR: "true",
const.X_FORWARDED_PORT: "true"
},
const.DEFAULT_POOL_ID: self.pool1_id,
# TODO(rm_work): need to finish the rest of this stuff # TODO(rm_work): need to finish the rest of this stuff
# const.DEFAULT_TLS_CONTAINER_REF: '', # const.DEFAULT_TLS_CONTAINER_REF: '',
# const.SNI_CONTAINER_REFS: [], # const.SNI_CONTAINER_REFS: [],
} }
if protocol in [const.HTTP, const.TERMINATED_HTTPS]:
listener_kwargs.update({
const.INSERT_HEADERS: {
const.X_FORWARDED_FOR: "true",
const.X_FORWARDED_PORT: "true"
},
})
if self.mem_listener_client.is_version_supported( if self.mem_listener_client.is_version_supported(
self.api_version, '2.1'): self.api_version, '2.1'):
listener_kwargs.update({ listener_kwargs.update({
@ -168,15 +295,16 @@ class ListenerScenarioTest(test_base.LoadBalancerBaseTest):
UUID(listener[const.ID]) UUID(listener[const.ID])
# Operating status will be OFFLINE while admin_state_up = False # Operating status will be OFFLINE while admin_state_up = False
self.assertEqual(const.OFFLINE, listener[const.OPERATING_STATUS]) self.assertEqual(const.OFFLINE, listener[const.OPERATING_STATUS])
self.assertEqual(self.protocol, listener[const.PROTOCOL]) self.assertEqual(protocol, listener[const.PROTOCOL])
self.assertEqual(80, listener[const.PROTOCOL_PORT]) self.assertEqual(80, listener[const.PROTOCOL_PORT])
self.assertEqual(200, listener[const.CONNECTION_LIMIT]) self.assertEqual(200, listener[const.CONNECTION_LIMIT])
insert_headers = listener[const.INSERT_HEADERS] if protocol in [const.HTTP, const.TERMINATED_HTTPS]:
self.assertTrue( insert_headers = listener[const.INSERT_HEADERS]
strutils.bool_from_string(insert_headers[const.X_FORWARDED_FOR])) self.assertTrue(strutils.bool_from_string(
self.assertTrue( insert_headers[const.X_FORWARDED_FOR]))
strutils.bool_from_string(insert_headers[const.X_FORWARDED_PORT])) self.assertTrue(strutils.bool_from_string(
self.assertEqual(self.pool1_id, listener[const.DEFAULT_POOL_ID]) insert_headers[const.X_FORWARDED_PORT]))
self.assertEqual(pool1_id, listener[const.DEFAULT_POOL_ID])
if self.mem_listener_client.is_version_supported( if self.mem_listener_client.is_version_supported(
self.api_version, '2.1'): self.api_version, '2.1'):
self.assertEqual(1000, listener[const.TIMEOUT_CLIENT_DATA]) self.assertEqual(1000, listener[const.TIMEOUT_CLIENT_DATA])
@ -196,15 +324,18 @@ class ListenerScenarioTest(test_base.LoadBalancerBaseTest):
const.DESCRIPTION: new_description, const.DESCRIPTION: new_description,
const.ADMIN_STATE_UP: True, const.ADMIN_STATE_UP: True,
const.CONNECTION_LIMIT: 400, const.CONNECTION_LIMIT: 400,
const.INSERT_HEADERS: { const.DEFAULT_POOL_ID: pool2_id,
const.X_FORWARDED_FOR: "false",
const.X_FORWARDED_PORT: "false"
},
const.DEFAULT_POOL_ID: self.pool2_id,
# TODO(rm_work): need to finish the rest of this stuff # TODO(rm_work): need to finish the rest of this stuff
# const.DEFAULT_TLS_CONTAINER_REF: '', # const.DEFAULT_TLS_CONTAINER_REF: '',
# const.SNI_CONTAINER_REFS: [], # const.SNI_CONTAINER_REFS: [],
} }
if protocol in [const.HTTP, const.TERMINATED_HTTPS]:
listener_update_kwargs.update({
const.INSERT_HEADERS: {
const.X_FORWARDED_FOR: "false",
const.X_FORWARDED_PORT: "false"
},
})
if self.mem_listener_client.is_version_supported( if self.mem_listener_client.is_version_supported(
self.api_version, '2.1'): self.api_version, '2.1'):
listener_update_kwargs.update({ listener_update_kwargs.update({
@ -251,15 +382,16 @@ class ListenerScenarioTest(test_base.LoadBalancerBaseTest):
self.assertEqual(const.OFFLINE, listener[const.OPERATING_STATUS]) self.assertEqual(const.OFFLINE, listener[const.OPERATING_STATUS])
else: else:
self.assertEqual(const.ONLINE, listener[const.OPERATING_STATUS]) self.assertEqual(const.ONLINE, listener[const.OPERATING_STATUS])
self.assertEqual(self.protocol, listener[const.PROTOCOL]) self.assertEqual(protocol, listener[const.PROTOCOL])
self.assertEqual(80, listener[const.PROTOCOL_PORT]) self.assertEqual(80, listener[const.PROTOCOL_PORT])
self.assertEqual(400, listener[const.CONNECTION_LIMIT]) self.assertEqual(400, listener[const.CONNECTION_LIMIT])
insert_headers = listener[const.INSERT_HEADERS] if protocol in [const.HTTP, const.TERMINATED_HTTPS]:
self.assertFalse( insert_headers = listener[const.INSERT_HEADERS]
strutils.bool_from_string(insert_headers[const.X_FORWARDED_FOR])) self.assertFalse(strutils.bool_from_string(
self.assertFalse( insert_headers[const.X_FORWARDED_FOR]))
strutils.bool_from_string(insert_headers[const.X_FORWARDED_PORT])) self.assertFalse(strutils.bool_from_string(
self.assertEqual(self.pool2_id, listener[const.DEFAULT_POOL_ID]) insert_headers[const.X_FORWARDED_PORT]))
self.assertEqual(pool2_id, listener[const.DEFAULT_POOL_ID])
if self.mem_listener_client.is_version_supported( if self.mem_listener_client.is_version_supported(
self.api_version, '2.1'): self.api_version, '2.1'):
self.assertEqual(2000, listener[const.TIMEOUT_CLIENT_DATA]) self.assertEqual(2000, listener[const.TIMEOUT_CLIENT_DATA])

View File

@ -22,7 +22,6 @@ from oslo_serialization import jsonutils
from tempest import config from tempest import config
from tempest.lib.common.utils import data_utils from tempest.lib.common.utils import data_utils
from tempest.lib import decorators from tempest.lib import decorators
from tempest.lib import exceptions
from octavia_tempest_plugin.common import constants as const from octavia_tempest_plugin.common import constants as const
from octavia_tempest_plugin.tests import test_base from octavia_tempest_plugin.tests import test_base
@ -77,7 +76,7 @@ class LoadBalancerScenarioTest(test_base.LoadBalancerBaseTest):
cls.lb_admin_flavor_client.cleanup_a_flavor, cls.lb_admin_flavor_client.cleanup_a_flavor,
cls.flavor[const.ID]) cls.flavor[const.ID])
cls.flavor_id = cls.flavor[const.ID] cls.flavor_id = cls.flavor[const.ID]
except exceptions.NotImplemented: except testtools.TestCase.skipException:
LOG.debug("Provider driver %s doesn't support flavors.", LOG.debug("Provider driver %s doesn't support flavors.",
CONF.load_balancer.provider) CONF.load_balancer.provider)
cls.flavor_profile = None cls.flavor_profile = None

View File

@ -18,7 +18,9 @@ from uuid import UUID
from dateutil import parser from dateutil import parser
from tempest import config from tempest import config
from tempest.lib.common.utils import data_utils from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import misc
from tempest.lib import decorators from tempest.lib import decorators
from tempest.lib import exceptions
from octavia_tempest_plugin.common import constants as const from octavia_tempest_plugin.common import constants as const
from octavia_tempest_plugin.tests import test_base from octavia_tempest_plugin.tests import test_base
@ -27,6 +29,17 @@ from octavia_tempest_plugin.tests import waiters
CONF = config.CONF CONF = config.CONF
# Member port numbers need to be unique on the shared pools so generate them
@misc.singleton
class MemberPort(object):
current_port = 8000
def increment(self):
self.current_port += 1
return self.current_port
class MemberScenarioTest(test_base.LoadBalancerBaseTest): class MemberScenarioTest(test_base.LoadBalancerBaseTest):
member_address = '2001:db8:0:0:0:0:0:1' member_address = '2001:db8:0:0:0:0:0:1'
@ -43,6 +56,10 @@ class MemberScenarioTest(test_base.LoadBalancerBaseTest):
cls._setup_lb_network_kwargs(lb_kwargs, cls._setup_lb_network_kwargs(lb_kwargs,
ip_version=4) ip_version=4)
cls.current_listener_port = 8000
cls.listener_pool_cache = {}
cls.member_port = MemberPort()
lb = cls.mem_lb_client.create_loadbalancer(**lb_kwargs) lb = cls.mem_lb_client.create_loadbalancer(**lb_kwargs)
cls.lb_id = lb[const.ID] cls.lb_id = lb[const.ID]
cls.addClassResourceCleanup( cls.addClassResourceCleanup(
@ -55,45 +72,43 @@ class MemberScenarioTest(test_base.LoadBalancerBaseTest):
CONF.load_balancer.lb_build_interval, CONF.load_balancer.lb_build_interval,
CONF.load_balancer.lb_build_timeout) CONF.load_balancer.lb_build_timeout)
# Per protocol listeners and pools IDs
cls.listener_ids = {}
cls.pool_ids = {}
cls.protocol = const.HTTP
lb_feature_enabled = CONF.loadbalancer_feature_enabled
if not lb_feature_enabled.l7_protocol_enabled:
cls.protocol = lb_feature_enabled.l4_protocol
# Don't use same ports for HTTP/l4_protocol and UDP since some previous
# releases (<=train) don't support it
cls._listener_pool_create(cls.protocol, 80)
cls._listener_pool_create(const.UDP, 8080)
@classmethod @classmethod
def _listener_pool_create(cls, protocol, protocol_port): def _listener_pool_create(cls, listener_protocol, pool_protocol,
algorithm):
"""Setup resources needed by the tests.""" """Setup resources needed by the tests."""
if (algorithm == const.LB_ALGORITHM_SOURCE_IP_PORT and not
if (protocol == const.UDP and cls.mem_listener_client.is_version_supported(
cls.api_version, '2.13')):
raise testtools.TestCase.skipException(
'Skipping this test as load balancing algorithm '
'SOURCE_IP_PORT requires API version 2.13 or newer.')
if (listener_protocol == const.UDP and
not cls.mem_listener_client.is_version_supported( not cls.mem_listener_client.is_version_supported(
cls.api_version, '2.1')): cls.api_version, '2.1')):
return raise cls.skipException('UDP listener support is only available '
'in Octavia API version 2.1 or newer')
# Cache listener/pool combinations we have already created as
# they can be reused for member test permutations
listener_pool_key = listener_protocol + pool_protocol + algorithm
pool_id = cls.listener_pool_cache.get(listener_pool_key, None)
if pool_id is not None:
return pool_id
listener_name = data_utils.rand_name("lb_member_listener1_member") listener_name = data_utils.rand_name("lb_member_listener1_member")
listener_kwargs = { listener_kwargs = {
const.NAME: listener_name, const.NAME: listener_name,
const.PROTOCOL: protocol, const.PROTOCOL: listener_protocol,
const.PROTOCOL_PORT: protocol_port, const.PROTOCOL_PORT: cls.current_listener_port,
const.LOADBALANCER_ID: cls.lb_id, const.LOADBALANCER_ID: cls.lb_id,
# For branches that don't support multiple listeners in single # For branches that don't support multiple listeners in single
# haproxy process and use haproxy>=1.8: # haproxy process and use haproxy>=1.8:
const.CONNECTION_LIMIT: 200, const.CONNECTION_LIMIT: 200,
} }
cls.current_listener_port += 1
listener = cls.mem_listener_client.create_listener(**listener_kwargs) listener = cls.mem_listener_client.create_listener(**listener_kwargs)
cls.listener_ids[protocol] = listener[const.ID]
cls.addClassResourceCleanup( cls.addClassResourceCleanup(
cls.mem_listener_client.cleanup_listener, cls.mem_listener_client.cleanup_listener, listener[const.ID],
cls.listener_ids[protocol],
lb_client=cls.mem_lb_client, lb_id=cls.lb_id) lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer, waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
@ -105,15 +120,24 @@ class MemberScenarioTest(test_base.LoadBalancerBaseTest):
pool_name = data_utils.rand_name("lb_member_pool1_member") pool_name = data_utils.rand_name("lb_member_pool1_member")
pool_kwargs = { pool_kwargs = {
const.NAME: pool_name, const.NAME: pool_name,
const.PROTOCOL: protocol, const.PROTOCOL: pool_protocol,
const.LB_ALGORITHM: cls.lb_algorithm, const.LB_ALGORITHM: algorithm,
const.LISTENER_ID: cls.listener_ids[protocol], const.LISTENER_ID: listener[const.ID],
} }
pool = cls.mem_pool_client.create_pool(**pool_kwargs) try:
cls.pool_ids[protocol] = pool[const.ID] pool = cls.mem_pool_client.create_pool(**pool_kwargs)
except exceptions.NotImplemented as e:
if algorithm != const.LB_ALGORITHM_SOURCE_IP_PORT:
raise
message = ("The configured provider driver '{driver}' "
"does not support a feature required for this "
"test.".format(driver=CONF.load_balancer.provider))
if hasattr(e, 'resp_body'):
message = e.resp_body.get('faultstring', message)
raise testtools.TestCase.skipException(message)
cls.addClassResourceCleanup( cls.addClassResourceCleanup(
cls.mem_pool_client.cleanup_pool, cls.mem_pool_client.cleanup_pool, pool[const.ID],
cls.pool_ids[protocol],
lb_client=cls.mem_lb_client, lb_id=cls.lb_id) lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer, waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
@ -121,9 +145,250 @@ class MemberScenarioTest(test_base.LoadBalancerBaseTest):
const.ACTIVE, const.ACTIVE,
CONF.load_balancer.build_interval, CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout) CONF.load_balancer.build_timeout)
cls.listener_pool_cache[listener_pool_key] = pool[const.ID]
return pool[const.ID]
@decorators.idempotent_id('33abafca-ce57-479e-8480-843ef412d6a6')
def test_HTTP_LC_member_crud(self):
pool_id = self._listener_pool_create(
const.HTTP, const.HTTP, const.LB_ALGORITHM_LEAST_CONNECTIONS)
self._test_member_CRUD(pool_id)
@decorators.idempotent_id('eab4eb32-b26f-4fe1-a606-1574b5b6182c')
def test_HTTP_LC_alt_monitor_member_crud(self):
pool_id = self._listener_pool_create(
const.HTTP, const.HTTP, const.LB_ALGORITHM_LEAST_CONNECTIONS)
self._test_member_CRUD(pool_id, alternate_monitoring=True)
@decorators.idempotent_id('80355701-bc68-4cba-a9b3-4f35fc192b6a')
def test_HTTPS_LC_member_crud(self):
pool_id = self._listener_pool_create(
const.HTTPS, const.HTTPS, const.LB_ALGORITHM_LEAST_CONNECTIONS)
self._test_member_CRUD(pool_id)
@decorators.idempotent_id('6f8fce94-b2aa-4497-b80f-74293d977d25')
def test_HTTPS_LC_alt_monitor_member_crud(self):
pool_id = self._listener_pool_create(
const.HTTPS, const.HTTPS, const.LB_ALGORITHM_LEAST_CONNECTIONS)
self._test_member_CRUD(pool_id, alternate_monitoring=True)
@decorators.idempotent_id('0e45c423-db43-4fee-8442-d9daabe6b2aa')
def test_PROXY_LC_member_crud(self):
pool_id = self._listener_pool_create(
const.TCP, const.PROXY, const.LB_ALGORITHM_LEAST_CONNECTIONS)
self._test_member_CRUD(pool_id)
@decorators.idempotent_id('3ea2aad1-5650-4ec6-8394-501de33cce70')
def test_PROXY_LC_alt_monitor_member_crud(self):
pool_id = self._listener_pool_create(
const.TCP, const.PROXY, const.LB_ALGORITHM_LEAST_CONNECTIONS)
self._test_member_CRUD(pool_id, alternate_monitoring=True)
@decorators.idempotent_id('9b2e7e2d-776b-419c-9717-ab4fef9cd5ca')
def test_TCP_LC_member_crud(self):
pool_id = self._listener_pool_create(
const.TCP, const.TCP, const.LB_ALGORITHM_LEAST_CONNECTIONS)
self._test_member_CRUD(pool_id)
@decorators.idempotent_id('06b95367-dc81-41e5-9a53-981833fb2979')
def test_TCP_LC_alt_monitor_member_crud(self):
pool_id = self._listener_pool_create(
const.TCP, const.TCP, const.LB_ALGORITHM_LEAST_CONNECTIONS)
self._test_member_CRUD(pool_id, alternate_monitoring=True)
@decorators.idempotent_id('c70bd8c6-0f6a-4ee7-840f-a3355aefd471')
def test_UDP_LC_member_crud(self):
pool_id = self._listener_pool_create(
const.UDP, const.UDP, const.LB_ALGORITHM_LEAST_CONNECTIONS)
self._test_member_CRUD(pool_id)
@decorators.idempotent_id('0b4ec248-c6a0-4d29-b77e-189453ec0535')
def test_UDP_LC_alt_monitor_member_crud(self):
pool_id = self._listener_pool_create(
const.UDP, const.UDP, const.LB_ALGORITHM_LEAST_CONNECTIONS)
self._test_member_CRUD(pool_id, alternate_monitoring=True)
@decorators.idempotent_id('15c8c3e3-569c-4029-95df-a9f72049e267') @decorators.idempotent_id('15c8c3e3-569c-4029-95df-a9f72049e267')
def test_member_CRUD(self): def test_HTTP_RR_member_crud(self):
pool_id = self._listener_pool_create(
const.HTTP, const.HTTP, const.LB_ALGORITHM_ROUND_ROBIN)
self._test_member_CRUD(pool_id)
@decorators.idempotent_id('a0f02494-ffb3-47be-8670-f56c0df9ec94')
def test_HTTP_RR_alt_monitor_member_crud(self):
pool_id = self._listener_pool_create(
const.HTTP, const.HTTP, const.LB_ALGORITHM_ROUND_ROBIN)
self._test_member_CRUD(pool_id, alternate_monitoring=True)
@decorators.idempotent_id('57aee0db-3295-42b7-a7d3-aae942a6cb41')
def test_HTTPS_RR_member_crud(self):
pool_id = self._listener_pool_create(
const.HTTPS, const.HTTPS, const.LB_ALGORITHM_ROUND_ROBIN)
self._test_member_CRUD(pool_id)
@decorators.idempotent_id('6c3e5bd7-4573-4f6d-ac64-31b238c9ea51')
def test_HTTPS_RR_alt_monitor_member_crud(self):
pool_id = self._listener_pool_create(
const.HTTPS, const.HTTPS, const.LB_ALGORITHM_ROUND_ROBIN)
self._test_member_CRUD(pool_id, alternate_monitoring=True)
@decorators.idempotent_id('e0ad1fa0-1fdb-472d-9d69-8968631c9239')
def test_PROXY_RR_member_crud(self):
pool_id = self._listener_pool_create(
const.TCP, const.PROXY, const.LB_ALGORITHM_ROUND_ROBIN)
self._test_member_CRUD(pool_id)
@decorators.idempotent_id('534fbc38-1c70-4c67-8f89-74a6905b1c98')
def test_PROXY_RR_alt_monitor_member_crud(self):
pool_id = self._listener_pool_create(
const.TCP, const.PROXY, const.LB_ALGORITHM_ROUND_ROBIN)
self._test_member_CRUD(pool_id, alternate_monitoring=True)
@decorators.idempotent_id('c4c72e4b-5abe-41df-9f1d-6a8a27c75a80')
def test_TCP_RR_member_crud(self):
pool_id = self._listener_pool_create(
const.TCP, const.TCP, const.LB_ALGORITHM_ROUND_ROBIN)
self._test_member_CRUD(pool_id)
@decorators.idempotent_id('673425e0-2a57-4c92-a416-7b4e0824708f')
def test_TCP_RR_alt_monitor_member_crud(self):
pool_id = self._listener_pool_create(
const.TCP, const.TCP, const.LB_ALGORITHM_ROUND_ROBIN)
self._test_member_CRUD(pool_id, alternate_monitoring=True)
@decorators.idempotent_id('f08c9efc-b69c-4c0f-a731-74ec8c17fc91')
def test_UDP_RR_member_crud(self):
pool_id = self._listener_pool_create(
const.UDP, const.UDP, const.LB_ALGORITHM_ROUND_ROBIN)
self._test_member_CRUD(pool_id)
@decorators.idempotent_id('94829e1e-506e-4f3c-ab04-4e338787ccfd')
def test_UDP_RR_alt_monitor_member_crud(self):
pool_id = self._listener_pool_create(
const.UDP, const.UDP, const.LB_ALGORITHM_ROUND_ROBIN)
self._test_member_CRUD(pool_id, alternate_monitoring=True)
@decorators.idempotent_id('07d1e571-d12c-4e04-90d1-8f4f42610df3')
def test_HTTP_SI_member_crud(self):
pool_id = self._listener_pool_create(
const.HTTP, const.HTTP, const.LB_ALGORITHM_SOURCE_IP)
self._test_member_CRUD(pool_id)
@decorators.idempotent_id('3910a7ec-63c5-4152-9fe1-ce21d3e1cdca')
def test_HTTP_SI_alt_monitor_member_crud(self):
pool_id = self._listener_pool_create(
const.HTTP, const.HTTP, const.LB_ALGORITHM_SOURCE_IP)
self._test_member_CRUD(pool_id, alternate_monitoring=True)
@decorators.idempotent_id('32b0b541-29dc-464b-91c1-115413539de7')
def test_HTTPS_SI_member_crud(self):
pool_id = self._listener_pool_create(
const.HTTPS, const.HTTPS, const.LB_ALGORITHM_SOURCE_IP)
self._test_member_CRUD(pool_id)
@decorators.idempotent_id('d59ea523-8dac-4e19-8df4-a7076a17296c')
def test_HTTPS_SI_alt_monitor_member_crud(self):
pool_id = self._listener_pool_create(
const.HTTPS, const.HTTPS, const.LB_ALGORITHM_SOURCE_IP)
self._test_member_CRUD(pool_id, alternate_monitoring=True)
@decorators.idempotent_id('12348506-1cfc-4d62-9cc2-d380776a9154')
def test_PROXY_SI_member_crud(self):
pool_id = self._listener_pool_create(
const.TCP, const.PROXY, const.LB_ALGORITHM_SOURCE_IP)
self._test_member_CRUD(pool_id)
@decorators.idempotent_id('5d3879a6-d103-4800-bca4-1ef18ecbee68')
def test_PROXY_SI_alt_monitor_member_crud(self):
pool_id = self._listener_pool_create(
const.TCP, const.PROXY, const.LB_ALGORITHM_SOURCE_IP)
self._test_member_CRUD(pool_id, alternate_monitoring=True)
@decorators.idempotent_id('efb158e2-de75-4d8b-8566-a0fa5fd75173')
def test_TCP_SI_member_crud(self):
pool_id = self._listener_pool_create(
const.TCP, const.TCP, const.LB_ALGORITHM_SOURCE_IP)
self._test_member_CRUD(pool_id)
@decorators.idempotent_id('4f1661e5-1dff-4910-9ecd-96327ea3e873')
def test_TCP_SI_alt_monitor_member_crud(self):
pool_id = self._listener_pool_create(
const.TCP, const.TCP, const.LB_ALGORITHM_SOURCE_IP)
self._test_member_CRUD(pool_id, alternate_monitoring=True)
@decorators.idempotent_id('0984583b-daaf-4509-bf1f-ff3acf33836b')
def test_UDP_SI_member_crud(self):
pool_id = self._listener_pool_create(
const.UDP, const.UDP, const.LB_ALGORITHM_SOURCE_IP)
self._test_member_CRUD(pool_id)
@decorators.idempotent_id('16b84495-e8f8-4e7b-b242-43a6e00fb8ad')
def test_UDP_SI_alt_monitor_member_crud(self):
pool_id = self._listener_pool_create(
const.UDP, const.UDP, const.LB_ALGORITHM_SOURCE_IP)
self._test_member_CRUD(pool_id, alternate_monitoring=True)
@decorators.idempotent_id('ab8f46fe-0c84-4755-a9a2-80cc1fbdea18')
def test_HTTP_SIP_member_crud(self):
pool_id = self._listener_pool_create(
const.HTTP, const.HTTP, const.LB_ALGORITHM_SOURCE_IP_PORT)
self._test_member_CRUD(pool_id)
@decorators.idempotent_id('7470bea5-9ea0-4e04-a82f-a0bed202b97d')
def test_HTTP_SIP_alt_monitor_member_crud(self):
pool_id = self._listener_pool_create(
const.HTTP, const.HTTP, const.LB_ALGORITHM_SOURCE_IP_PORT)
self._test_member_CRUD(pool_id, alternate_monitoring=True)
@decorators.idempotent_id('e59e9a7d-b6e7-43e9-b9d5-0717f113d769')
def test_HTTPS_SIP_member_crud(self):
pool_id = self._listener_pool_create(
const.HTTPS, const.HTTPS, const.LB_ALGORITHM_SOURCE_IP_PORT)
self._test_member_CRUD(pool_id)
@decorators.idempotent_id('761e1acd-3f4c-4e02-89e1-f89adfe2e3f9')
def test_HTTPS_SIP_alt_monitor_member_crud(self):
pool_id = self._listener_pool_create(
const.HTTPS, const.HTTPS, const.LB_ALGORITHM_SOURCE_IP_PORT)
self._test_member_CRUD(pool_id, alternate_monitoring=True)
@decorators.idempotent_id('ba7b0c73-df44-4a1a-a610-a107daabc36d')
def test_PROXY_SIP_member_crud(self):
pool_id = self._listener_pool_create(
const.TCP, const.PROXY, const.LB_ALGORITHM_SOURCE_IP_PORT)
self._test_member_CRUD(pool_id)
@decorators.idempotent_id('ad43bc3f-2664-42c4-999f-9763facb8d15')
def test_PROXY_SIP_alt_monitor_member_crud(self):
pool_id = self._listener_pool_create(
const.TCP, const.PROXY, const.LB_ALGORITHM_SOURCE_IP_PORT)
self._test_member_CRUD(pool_id, alternate_monitoring=True)
@decorators.idempotent_id('3341d05c-c199-496f-ac40-6248818ce831')
def test_TCP_SIP_member_crud(self):
pool_id = self._listener_pool_create(
const.TCP, const.TCP, const.LB_ALGORITHM_SOURCE_IP_PORT)
self._test_member_CRUD(pool_id)
@decorators.idempotent_id('5872f1de-1a33-4c20-bc02-7d058e3c3b55')
def test_TCP_SIP_alt_monitor_member_crud(self):
pool_id = self._listener_pool_create(
const.TCP, const.TCP, const.LB_ALGORITHM_SOURCE_IP_PORT)
self._test_member_CRUD(pool_id, alternate_monitoring=True)
@decorators.idempotent_id('9550835b-c9ef-44e3-8087-151c25a95168')
def test_UDP_SIP_member_crud(self):
pool_id = self._listener_pool_create(
const.UDP, const.UDP, const.LB_ALGORITHM_SOURCE_IP_PORT)
self._test_member_CRUD(pool_id)
@decorators.idempotent_id('5f40b080-0f2c-4791-a509-da7cfe9eace4')
def test_UDP_SIP_alt_monitor_member_crud(self):
pool_id = self._listener_pool_create(
const.UDP, const.UDP, const.LB_ALGORITHM_SOURCE_IP_PORT)
self._test_member_CRUD(pool_id, alternate_monitoring=True)
def _test_member_CRUD(self, pool_id, alternate_monitoring=False):
"""Tests member create, read, update, delete """Tests member create, read, update, delete
* Create a fully populated member. * Create a fully populated member.
@ -137,13 +402,15 @@ class MemberScenarioTest(test_base.LoadBalancerBaseTest):
member_kwargs = { member_kwargs = {
const.NAME: member_name, const.NAME: member_name,
const.ADMIN_STATE_UP: True, const.ADMIN_STATE_UP: True,
const.POOL_ID: self.pool_ids[self.protocol], const.POOL_ID: pool_id,
const.ADDRESS: '192.0.2.1', const.ADDRESS: '192.0.2.1',
const.PROTOCOL_PORT: 80, const.PROTOCOL_PORT: 80,
const.WEIGHT: 50, const.WEIGHT: 50,
const.MONITOR_ADDRESS: '192.0.2.2',
const.MONITOR_PORT: 8080,
} }
if alternate_monitoring:
member_kwargs[const.MONITOR_ADDRESS] = '192.0.2.2'
member_kwargs[const.MONITOR_PORT] = 8080
if self.mem_member_client.is_version_supported( if self.mem_member_client.is_version_supported(
self.api_version, '2.1'): self.api_version, '2.1'):
member_kwargs.update({ member_kwargs.update({
@ -153,14 +420,11 @@ class MemberScenarioTest(test_base.LoadBalancerBaseTest):
if self.lb_member_vip_subnet: if self.lb_member_vip_subnet:
member_kwargs[const.SUBNET_ID] = self.lb_member_vip_subnet[ member_kwargs[const.SUBNET_ID] = self.lb_member_vip_subnet[
const.ID] const.ID]
hm_enabled = CONF.loadbalancer_feature_enabled.health_monitor_enabled
if not hm_enabled:
del member_kwargs[const.MONITOR_ADDRESS]
del member_kwargs[const.MONITOR_PORT]
member = self.mem_member_client.create_member(**member_kwargs) member = self.mem_member_client.create_member(**member_kwargs)
self.addCleanup( self.addCleanup(
self.mem_member_client.cleanup_member, self.mem_member_client.cleanup_member,
member[const.ID], pool_id=self.pool_ids[self.protocol], member[const.ID], pool_id=pool_id,
lb_client=self.mem_lb_client, lb_id=self.lb_id) lb_client=self.mem_lb_client, lb_id=self.lb_id)
waiters.wait_for_status( waiters.wait_for_status(
@ -174,7 +438,7 @@ class MemberScenarioTest(test_base.LoadBalancerBaseTest):
const.ACTIVE, const.ACTIVE,
CONF.load_balancer.build_interval, CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout, CONF.load_balancer.build_timeout,
pool_id=self.pool_ids[self.protocol]) pool_id=pool_id)
parser.parse(member[const.CREATED_AT]) parser.parse(member[const.CREATED_AT])
parser.parse(member[const.UPDATED_AT]) parser.parse(member[const.UPDATED_AT])
@ -189,12 +453,13 @@ class MemberScenarioTest(test_base.LoadBalancerBaseTest):
const.NO_MONITOR, const.NO_MONITOR,
CONF.load_balancer.check_interval, CONF.load_balancer.check_interval,
CONF.load_balancer.check_timeout, CONF.load_balancer.check_timeout,
pool_id=self.pool_ids[self.protocol]) pool_id=pool_id)
equal_items = [const.NAME, const.ADMIN_STATE_UP, const.ADDRESS, equal_items = [const.NAME, const.ADMIN_STATE_UP, const.ADDRESS,
const.PROTOCOL_PORT, const.WEIGHT] const.PROTOCOL_PORT, const.WEIGHT]
if hm_enabled: if alternate_monitoring:
equal_items += [const.MONITOR_ADDRESS, const.MONITOR_PORT] equal_items += [const.MONITOR_ADDRESS, const.MONITOR_PORT]
if self.mem_member_client.is_version_supported( if self.mem_member_client.is_version_supported(
self.api_version, '2.1'): self.api_version, '2.1'):
equal_items.append(const.BACKUP) equal_items.append(const.BACKUP)
@ -221,7 +486,7 @@ class MemberScenarioTest(test_base.LoadBalancerBaseTest):
const.BACKUP: not member[const.BACKUP], const.BACKUP: not member[const.BACKUP],
}) })
if hm_enabled: if alternate_monitoring:
member_update_kwargs[const.MONITOR_ADDRESS] = '192.0.2.3' member_update_kwargs[const.MONITOR_ADDRESS] = '192.0.2.3'
member_update_kwargs[const.MONITOR_PORT] = member[ member_update_kwargs[const.MONITOR_PORT] = member[
const.MONITOR_PORT] + 1 const.MONITOR_PORT] + 1
@ -239,11 +504,11 @@ class MemberScenarioTest(test_base.LoadBalancerBaseTest):
const.ACTIVE, const.ACTIVE,
CONF.load_balancer.build_interval, CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout, CONF.load_balancer.build_timeout,
pool_id=self.pool_ids[self.protocol]) pool_id=pool_id)
# Test changed items # Test changed items
equal_items = [const.NAME, const.ADMIN_STATE_UP, const.WEIGHT] equal_items = [const.NAME, const.ADMIN_STATE_UP, const.WEIGHT]
if hm_enabled: if alternate_monitoring:
equal_items += [const.MONITOR_ADDRESS, const.MONITOR_PORT] equal_items += [const.MONITOR_ADDRESS, const.MONITOR_PORT]
if self.mem_member_client.is_version_supported( if self.mem_member_client.is_version_supported(
self.api_version, '2.1'): self.api_version, '2.1'):
@ -271,14 +536,14 @@ class MemberScenarioTest(test_base.LoadBalancerBaseTest):
CONF.load_balancer.check_timeout) CONF.load_balancer.check_timeout)
self.mem_member_client.delete_member( self.mem_member_client.delete_member(
member[const.ID], member[const.ID],
pool_id=self.pool_ids[self.protocol]) pool_id=pool_id)
waiters.wait_for_deleted_status_or_not_found( waiters.wait_for_deleted_status_or_not_found(
self.mem_member_client.show_member, member[const.ID], self.mem_member_client.show_member, member[const.ID],
const.PROVISIONING_STATUS, const.PROVISIONING_STATUS,
CONF.load_balancer.check_interval, CONF.load_balancer.check_interval,
CONF.load_balancer.check_timeout, CONF.load_balancer.check_timeout,
pool_id=self.pool_ids[self.protocol]) pool_id=pool_id)
waiters.wait_for_status( waiters.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.mem_lb_client.show_loadbalancer,
@ -287,12 +552,12 @@ class MemberScenarioTest(test_base.LoadBalancerBaseTest):
CONF.load_balancer.check_interval, CONF.load_balancer.check_interval,
CONF.load_balancer.check_timeout) CONF.load_balancer.check_timeout)
def _test_mixed_member_create(self, protocol): def _test_mixed_member_create(self, pool_id):
member_name = data_utils.rand_name("lb_member_member1-create") member_name = data_utils.rand_name("lb_member_member1-create")
member_kwargs = { member_kwargs = {
const.NAME: member_name, const.NAME: member_name,
const.ADMIN_STATE_UP: True, const.ADMIN_STATE_UP: True,
const.POOL_ID: self.pool_ids[protocol], const.POOL_ID: pool_id,
const.ADDRESS: self.member_address, const.ADDRESS: self.member_address,
const.PROTOCOL_PORT: 80, const.PROTOCOL_PORT: 80,
const.WEIGHT: 50, const.WEIGHT: 50,
@ -306,7 +571,7 @@ class MemberScenarioTest(test_base.LoadBalancerBaseTest):
**member_kwargs) **member_kwargs)
self.addCleanup( self.addCleanup(
self.mem_member_client.cleanup_member, self.mem_member_client.cleanup_member,
member[const.ID], pool_id=self.pool_ids[protocol], member[const.ID], pool_id=pool_id,
lb_client=self.mem_lb_client, lb_id=self.lb_id) lb_client=self.mem_lb_client, lb_id=self.lb_id)
waiters.wait_for_status( waiters.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id, self.mem_lb_client.show_loadbalancer, self.lb_id,
@ -314,6 +579,98 @@ class MemberScenarioTest(test_base.LoadBalancerBaseTest):
CONF.load_balancer.check_interval, CONF.load_balancer.check_interval,
CONF.load_balancer.check_timeout) CONF.load_balancer.check_timeout)
@decorators.idempotent_id('f9bc8ef1-cf21-41e5-819d-7561173e5286')
@testtools.skipUnless(CONF.load_balancer.test_with_ipv6,
'IPv6 testing is disabled')
def test_mixed_HTTP_LC_member_create(self):
"""Test the member creation with mixed IP protocol members/VIP."""
pool_id = self._listener_pool_create(
const.HTTP, const.HTTP, const.LB_ALGORITHM_LEAST_CONNECTIONS)
self._test_mixed_member_create(pool_id)
@decorators.idempotent_id('e63c89a7-30a3-4eff-8ff5-dd62a5ecec0f')
@testtools.skipUnless(CONF.load_balancer.test_with_ipv6,
'IPv6 testing is disabled')
def test_mixed_HTTPS_LC_member_create(self):
"""Test the member creation with mixed IP protocol members/VIP."""
pool_id = self._listener_pool_create(
const.HTTPS, const.HTTPS, const.LB_ALGORITHM_LEAST_CONNECTIONS)
self._test_mixed_member_create(pool_id)
@decorators.idempotent_id('efaa9ed0-c261-4184-9693-0020965606a8')
@testtools.skipUnless(CONF.load_balancer.test_with_ipv6,
'IPv6 testing is disabled')
def test_mixed_PROXY_LC_member_create(self):
"""Test the member creation with mixed IP protocol members/VIP."""
pool_id = self._listener_pool_create(
const.TCP, const.PROXY, const.LB_ALGORITHM_LEAST_CONNECTIONS)
self._test_mixed_member_create(pool_id)
@decorators.idempotent_id('f4ac056c-2cb8-457f-b1b1-9b49226f9b9f')
@testtools.skipUnless(CONF.load_balancer.test_with_ipv6,
'IPv6 testing is disabled')
def test_mixed_TCP_LC_member_create(self):
"""Test the member creation with mixed IP protocol members/VIP."""
pool_id = self._listener_pool_create(
const.TCP, const.TCP, const.LB_ALGORITHM_LEAST_CONNECTIONS)
self._test_mixed_member_create(pool_id)
@decorators.idempotent_id('90e22b80-d52b-4af2-9c4d-9be44eed9575')
@testtools.skipUnless(CONF.load_balancer.test_with_ipv6,
'IPv6 testing is disabled')
# Skipping test for amphora driver until "UDP load balancers cannot mix
# protocol versions" (https://storyboard.openstack.org/#!/story/2003329) is
# fixed
@decorators.skip_because(
bug='2003329',
bug_type='storyboard',
condition=CONF.load_balancer.provider in const.AMPHORA_PROVIDERS)
def test_mixed_UDP_LC_member_create(self):
"""Test the member creation with mixed IP protocol members/VIP."""
if not self.mem_listener_client.is_version_supported(
self.api_version, '2.1'):
raise self.skipException('UDP listener support is only available '
'in Octavia API version 2.1 or newer')
pool_id = self._listener_pool_create(
const.UDP, const.UDP, const.LB_ALGORITHM_LEAST_CONNECTIONS)
self._test_mixed_member_create(pool_id)
@decorators.idempotent_id('b8afb91d-9b85-4569-85c7-03453df8990b')
@testtools.skipUnless(CONF.load_balancer.test_with_ipv6,
'IPv6 testing is disabled')
def test_mixed_HTTP_RR_member_create(self):
"""Test the member creation with mixed IP protocol members/VIP."""
pool_id = self._listener_pool_create(
const.HTTP, const.HTTP, const.LB_ALGORITHM_ROUND_ROBIN)
self._test_mixed_member_create(pool_id)
@decorators.idempotent_id('a64dc345-4afe-4a2c-8a6a-178dd5a94670')
@testtools.skipUnless(CONF.load_balancer.test_with_ipv6,
'IPv6 testing is disabled')
def test_mixed_HTTPS_RR_member_create(self):
"""Test the member creation with mixed IP protocol members/VIP."""
pool_id = self._listener_pool_create(
const.HTTPS, const.HTTPS, const.LB_ALGORITHM_ROUND_ROBIN)
self._test_mixed_member_create(pool_id)
@decorators.idempotent_id('909aebf2-f9e4-4b96-943e-c02b8a415cd2')
@testtools.skipUnless(CONF.load_balancer.test_with_ipv6,
'IPv6 testing is disabled')
def test_mixed_PROXY_RR_member_create(self):
"""Test the member creation with mixed IP protocol members/VIP."""
pool_id = self._listener_pool_create(
const.TCP, const.PROXY, const.LB_ALGORITHM_ROUND_ROBIN)
self._test_mixed_member_create(pool_id)
@decorators.idempotent_id('407ff3d4-f0a2-4d27-be69-3f2ec039a6a0')
@testtools.skipUnless(CONF.load_balancer.test_with_ipv6,
'IPv6 testing is disabled')
def test_mixed_TCP_RR_member_create(self):
"""Test the member creation with mixed IP protocol members/VIP."""
pool_id = self._listener_pool_create(
const.TCP, const.TCP, const.LB_ALGORITHM_ROUND_ROBIN)
self._test_mixed_member_create(pool_id)
@decorators.idempotent_id('0623aa1f-753d-44e7-afa1-017d274eace7') @decorators.idempotent_id('0623aa1f-753d-44e7-afa1-017d274eace7')
@testtools.skipUnless(CONF.load_balancer.test_with_ipv6, @testtools.skipUnless(CONF.load_balancer.test_with_ipv6,
'IPv6 testing is disabled') 'IPv6 testing is disabled')
@ -324,19 +681,124 @@ class MemberScenarioTest(test_base.LoadBalancerBaseTest):
bug='2003329', bug='2003329',
bug_type='storyboard', bug_type='storyboard',
condition=CONF.load_balancer.provider in const.AMPHORA_PROVIDERS) condition=CONF.load_balancer.provider in const.AMPHORA_PROVIDERS)
def test_mixed_udp_member_create(self): def test_mixed_UDP_RR_member_create(self):
"""Test the member creation with mixed IP protocol members/VIP.""" """Test the member creation with mixed IP protocol members/VIP."""
if not self.mem_listener_client.is_version_supported( if not self.mem_listener_client.is_version_supported(
self.api_version, '2.1'): self.api_version, '2.1'):
raise self.skipException('UDP listener support is only available ' raise self.skipException('UDP listener support is only available '
'in Octavia API version 2.1 or newer') 'in Octavia API version 2.1 or newer')
pool_id = self._listener_pool_create(
const.UDP, const.UDP, const.LB_ALGORITHM_ROUND_ROBIN)
self._test_mixed_member_create(pool_id)
self._test_mixed_member_create(const.UDP) @decorators.idempotent_id('cc7f9272-84a6-436c-a529-171b67a45b62')
@decorators.idempotent_id('b8afb91d-9b85-4569-85c7-03453df8990b')
@testtools.skipUnless(CONF.load_balancer.test_with_ipv6, @testtools.skipUnless(CONF.load_balancer.test_with_ipv6,
'IPv6 testing is disabled') 'IPv6 testing is disabled')
def test_mixed_member_create(self): def test_mixed_HTTP_SI_member_create(self):
"""Test the member creation with mixed IP protocol members/VIP.""" """Test the member creation with mixed IP protocol members/VIP."""
self._test_mixed_member_create(self.protocol) pool_id = self._listener_pool_create(
const.HTTP, const.HTTP, const.LB_ALGORITHM_SOURCE_IP)
self._test_mixed_member_create(pool_id)
@decorators.idempotent_id('704a10ed-d52d-4c75-9445-9ef98f7f540f')
@testtools.skipUnless(CONF.load_balancer.test_with_ipv6,
'IPv6 testing is disabled')
def test_mixed_HTTPS_SI_member_create(self):
"""Test the member creation with mixed IP protocol members/VIP."""
pool_id = self._listener_pool_create(
const.HTTPS, const.HTTPS, const.LB_ALGORITHM_SOURCE_IP)
self._test_mixed_member_create(pool_id)
@decorators.idempotent_id('4c516b5b-eb7b-4a4c-9a73-fba823332e25')
@testtools.skipUnless(CONF.load_balancer.test_with_ipv6,
'IPv6 testing is disabled')
def test_mixed_PROXY_SI_member_create(self):
"""Test the member creation with mixed IP protocol members/VIP."""
pool_id = self._listener_pool_create(
const.TCP, const.PROXY, const.LB_ALGORITHM_SOURCE_IP)
self._test_mixed_member_create(pool_id)
@decorators.idempotent_id('61973bc8-8bc4-4aec-bf57-b37583887544')
@testtools.skipUnless(CONF.load_balancer.test_with_ipv6,
'IPv6 testing is disabled')
def test_mixed_TCP_SI_member_create(self):
"""Test the member creation with mixed IP protocol members/VIP."""
pool_id = self._listener_pool_create(
const.TCP, const.TCP, const.LB_ALGORITHM_SOURCE_IP)
self._test_mixed_member_create(pool_id)
@decorators.idempotent_id('ddab1836-ba9f-42e5-9630-1572d4a63501')
@testtools.skipUnless(CONF.load_balancer.test_with_ipv6,
'IPv6 testing is disabled')
# Skipping test for amphora driver until "UDP load balancers cannot mix
# protocol versions" (https://storyboard.openstack.org/#!/story/2003329) is
# fixed
@decorators.skip_because(
bug='2003329',
bug_type='storyboard',
condition=CONF.load_balancer.provider in const.AMPHORA_PROVIDERS)
def test_mixed_UDP_SI_member_create(self):
"""Test the member creation with mixed IP protocol members/VIP."""
if not self.mem_listener_client.is_version_supported(
self.api_version, '2.1'):
raise self.skipException('UDP listener support is only available '
'in Octavia API version 2.1 or newer')
pool_id = self._listener_pool_create(
const.UDP, const.UDP, const.LB_ALGORITHM_SOURCE_IP)
self._test_mixed_member_create(pool_id)
@decorators.idempotent_id('b3dc557a-88ec-4bc6-84fd-c3aaab5d5920')
@testtools.skipUnless(CONF.load_balancer.test_with_ipv6,
'IPv6 testing is disabled')
def test_mixed_HTTP_SIP_member_create(self):
"""Test the member creation with mixed IP protocol members/VIP."""
pool_id = self._listener_pool_create(
const.HTTP, const.HTTP, const.LB_ALGORITHM_SOURCE_IP_PORT)
self._test_mixed_member_create(pool_id)
@decorators.idempotent_id('d6f3908d-470a-4939-b407-c6d6324c06b6')
@testtools.skipUnless(CONF.load_balancer.test_with_ipv6,
'IPv6 testing is disabled')
def test_mixed_HTTPS_SIP_member_create(self):
"""Test the member creation with mixed IP protocol members/VIP."""
pool_id = self._listener_pool_create(
const.HTTPS, const.HTTPS, const.LB_ALGORITHM_SOURCE_IP_PORT)
self._test_mixed_member_create(pool_id)
@decorators.idempotent_id('ab745620-bf92-49e1-ac35-e42f266a7612')
@testtools.skipUnless(CONF.load_balancer.test_with_ipv6,
'IPv6 testing is disabled')
def test_mixed_PROXY_SIP_member_create(self):
"""Test the member creation with mixed IP protocol members/VIP."""
pool_id = self._listener_pool_create(
const.TCP, const.PROXY, const.LB_ALGORITHM_SOURCE_IP_PORT)
self._test_mixed_member_create(pool_id)
@decorators.idempotent_id('c7ffbd6e-5d9f-45e8-a5d0-2d26ea6b0ed0')
@testtools.skipUnless(CONF.load_balancer.test_with_ipv6,
'IPv6 testing is disabled')
def test_mixed_TCP_SIP_member_create(self):
"""Test the member creation with mixed IP protocol members/VIP."""
pool_id = self._listener_pool_create(
const.TCP, const.TCP, const.LB_ALGORITHM_SOURCE_IP_PORT)
self._test_mixed_member_create(pool_id)
@decorators.idempotent_id('aa6b282c-d1c2-4a39-b085-33c224d4faff')
@testtools.skipUnless(CONF.load_balancer.test_with_ipv6,
'IPv6 testing is disabled')
# Skipping test for amphora driver until "UDP load balancers cannot mix
# protocol versions" (https://storyboard.openstack.org/#!/story/2003329) is
# fixed
@decorators.skip_because(
bug='2003329',
bug_type='storyboard',
condition=CONF.load_balancer.provider in const.AMPHORA_PROVIDERS)
def test_mixed_UDP_SIP_member_create(self):
"""Test the member creation with mixed IP protocol members/VIP."""
if not self.mem_listener_client.is_version_supported(
self.api_version, '2.1'):
raise self.skipException('UDP listener support is only available '
'in Octavia API version 2.1 or newer')
pool_id = self._listener_pool_create(
const.UDP, const.UDP, const.LB_ALGORITHM_SOURCE_IP_PORT)
self._test_mixed_member_create(pool_id)

View File

@ -12,12 +12,14 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import testtools
from uuid import UUID from uuid import UUID
from dateutil import parser from dateutil import parser
from tempest import config from tempest import config
from tempest.lib.common.utils import data_utils from tempest.lib.common.utils import data_utils
from tempest.lib import decorators from tempest.lib import decorators
from tempest.lib import exceptions
from octavia_tempest_plugin.common import constants as const from octavia_tempest_plugin.common import constants as const
from octavia_tempest_plugin.tests import test_base from octavia_tempest_plugin.tests import test_base
@ -50,40 +52,286 @@ class PoolScenarioTest(test_base.LoadBalancerBaseTest):
const.ACTIVE, const.ACTIVE,
CONF.load_balancer.lb_build_interval, CONF.load_balancer.lb_build_interval,
CONF.load_balancer.lb_build_timeout) CONF.load_balancer.lb_build_timeout)
cls.protocol = const.HTTP
cls.lb_feature_enabled = CONF.loadbalancer_feature_enabled
if not cls.lb_feature_enabled.l7_protocol_enabled:
cls.protocol = cls.lb_feature_enabled.l4_protocol
listener_name = data_utils.rand_name("lb_member_listener1_pool") # Pool with Least Connections algorithm
listener_kwargs = { @decorators.idempotent_id('f30bd185-ca13-45c1-8a2f-f4179e7f0c3a')
const.NAME: listener_name, def test_HTTP_LC_pool_standalone_CRUD(self):
const.PROTOCOL: cls.protocol, self._test_pool_CRUD(listener_protocol=None,
const.PROTOCOL_PORT: '80', pool_protocol=const.HTTP, protocol_port=10,
const.LOADBALANCER_ID: cls.lb_id, algorithm=const.LB_ALGORITHM_LEAST_CONNECTIONS)
}
listener = cls.mem_listener_client.create_listener(**listener_kwargs)
cls.listener_id = listener[const.ID]
cls.addClassResourceCleanup(
cls.mem_listener_client.cleanup_listener,
cls.listener_id,
lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer, @decorators.idempotent_id('d8c428b0-dee4-4374-8286-31e52aeb7fe5')
cls.lb_id, const.PROVISIONING_STATUS, def test_HTTP_LC_pool_with_listener_CRUD(self):
const.ACTIVE, self._test_pool_CRUD(listener_protocol=const.HTTP,
CONF.load_balancer.build_interval, pool_protocol=const.HTTP, protocol_port=11,
CONF.load_balancer.build_timeout) algorithm=const.LB_ALGORITHM_LEAST_CONNECTIONS)
@decorators.idempotent_id('82d8e035-4068-4bad-a87b-e4907bf6d464')
def test_HTTPS_LC_pool_standalone_CRUD(self):
self._test_pool_CRUD(listener_protocol=None,
pool_protocol=const.HTTPS, protocol_port=12,
algorithm=const.LB_ALGORITHM_LEAST_CONNECTIONS)
@decorators.idempotent_id('726beb03-de8c-43cd-ba5f-e7d6faf627a3')
def test_HTTPS_LC_pool_with_listener_CRUD(self):
self._test_pool_CRUD(listener_protocol=const.HTTPS,
pool_protocol=const.HTTPS, protocol_port=13,
algorithm=const.LB_ALGORITHM_LEAST_CONNECTIONS)
@decorators.idempotent_id('b3cef24e-343a-4e77-833b-422158d54673')
def test_PROXY_LC_pool_standalone_CRUD(self):
self._test_pool_CRUD(listener_protocol=None,
pool_protocol=const.PROXY, protocol_port=14,
algorithm=const.LB_ALGORITHM_LEAST_CONNECTIONS)
@decorators.idempotent_id('f1edfb45-a9d3-4150-8bc9-4fc3427c6346')
def test_PROXY_LC_pool_with_listener_CRUD(self):
self._test_pool_CRUD(listener_protocol=const.TCP,
pool_protocol=const.PROXY, protocol_port=15,
algorithm=const.LB_ALGORITHM_LEAST_CONNECTIONS)
@decorators.idempotent_id('d6d067c3-ec63-4b5d-a364-acc7493ae3b8')
def test_TCP_LC_pool_standalone_CRUD(self):
self._test_pool_CRUD(listener_protocol=None,
pool_protocol=const.TCP, protocol_port=16,
algorithm=const.LB_ALGORITHM_LEAST_CONNECTIONS)
@decorators.idempotent_id('a159c345-9463-4c01-b571-086c789bd7d5')
def test_TCP_LC_pool_with_listener_CRUD(self):
self._test_pool_CRUD(listener_protocol=const.TCP,
pool_protocol=const.TCP, protocol_port=17,
algorithm=const.LB_ALGORITHM_LEAST_CONNECTIONS)
@decorators.idempotent_id('6fea6a39-19eb-4a0e-b507-82ecc57c1dc5')
def test_UDP_LC_pool_standalone_CRUD(self):
self._test_pool_CRUD(listener_protocol=None,
pool_protocol=const.UDP, protocol_port=18,
algorithm=const.LB_ALGORITHM_LEAST_CONNECTIONS)
@decorators.idempotent_id('6ce12d8c-ad59-4e48-8de1-d26926735457')
def test_UDP_LC_pool_with_listener_CRUD(self):
self._test_pool_CRUD(listener_protocol=const.UDP,
pool_protocol=const.UDP, protocol_port=19,
algorithm=const.LB_ALGORITHM_LEAST_CONNECTIONS)
# Pool with Round Robin algorithm
@decorators.idempotent_id('dfa120bf-81b9-4f22-bb5e-7df660c18173') @decorators.idempotent_id('dfa120bf-81b9-4f22-bb5e-7df660c18173')
def test_pool_standalone_CRUD(self): def test_HTTP_RR_pool_standalone_CRUD(self):
self._test_pool_CRUD(has_listener=False) self._test_pool_CRUD(listener_protocol=None,
pool_protocol=const.HTTP, protocol_port=20,
algorithm=const.LB_ALGORITHM_ROUND_ROBIN)
@decorators.idempotent_id('087da8ab-79c7-48ba-871c-5769185cea3e') @decorators.idempotent_id('087da8ab-79c7-48ba-871c-5769185cea3e')
def test_pool_with_listener_CRUD(self): def test_HTTP_RR_pool_with_listener_CRUD(self):
self._test_pool_CRUD(has_listener=True) self._test_pool_CRUD(listener_protocol=const.HTTP,
pool_protocol=const.HTTP, protocol_port=21,
algorithm=const.LB_ALGORITHM_ROUND_ROBIN)
def _test_pool_CRUD(self, has_listener): @decorators.idempotent_id('6179a5d1-6425-4144-a437-b0d260b7b883')
def test_HTTPS_RR_pool_standalone_CRUD(self):
self._test_pool_CRUD(listener_protocol=None,
pool_protocol=const.HTTPS, protocol_port=22,
algorithm=const.LB_ALGORITHM_ROUND_ROBIN)
@decorators.idempotent_id('1b4585b4-c521-48e8-a69a-8a1d729a2949')
def test_HTTPS_RR_pool_with_listener_CRUD(self):
self._test_pool_CRUD(listener_protocol=const.HTTPS,
pool_protocol=const.HTTPS, protocol_port=23,
algorithm=const.LB_ALGORITHM_ROUND_ROBIN)
@decorators.idempotent_id('6b9f4f01-cb78-409a-b9fe-cbbeb27d0c5f')
def test_PROXY_RR_pool_standalone_CRUD(self):
self._test_pool_CRUD(listener_protocol=None,
pool_protocol=const.PROXY, protocol_port=24,
algorithm=const.LB_ALGORITHM_ROUND_ROBIN)
@decorators.idempotent_id('0228ea63-dff5-4dfb-b48a-193e8509caa8')
def test_PROXY_RR_pool_with_listener_CRUD(self):
self._test_pool_CRUD(listener_protocol=const.TCP,
pool_protocol=const.PROXY, protocol_port=25,
algorithm=const.LB_ALGORITHM_ROUND_ROBIN)
@decorators.idempotent_id('71088923-cfdf-4821-a6a8-c7c9045b624d')
def test_TCP_RR_pool_standalone_CRUD(self):
self._test_pool_CRUD(listener_protocol=None,
pool_protocol=const.TCP, protocol_port=26,
algorithm=const.LB_ALGORITHM_ROUND_ROBIN)
@decorators.idempotent_id('4b663772-5c6b-49a3-b592-49d91bd71ff1')
def test_TCP_RR_pool_with_listener_CRUD(self):
self._test_pool_CRUD(listener_protocol=const.TCP,
pool_protocol=const.TCP, protocol_port=27,
algorithm=const.LB_ALGORITHM_ROUND_ROBIN)
@decorators.idempotent_id('45aefaa0-c909-4861-91c6-517ea10285a5')
def test_UDP_RR_pool_standalone_CRUD(self):
self._test_pool_CRUD(listener_protocol=None,
pool_protocol=const.UDP, protocol_port=28,
algorithm=const.LB_ALGORITHM_ROUND_ROBIN)
@decorators.idempotent_id('cff21560-52be-439f-a41f-789d365db567')
def test_UDP_RR_pool_with_listener_CRUD(self):
self._test_pool_CRUD(listener_protocol=const.UDP,
pool_protocol=const.UDP, protocol_port=29,
algorithm=const.LB_ALGORITHM_ROUND_ROBIN)
# Pool with Source IP algorithm
@decorators.idempotent_id('4ef47185-ef22-4396-8c9c-b98b9b476605')
def test_HTTP_SI_pool_standalone_CRUD(self):
self._test_pool_CRUD(listener_protocol=None,
pool_protocol=const.HTTP, protocol_port=30,
algorithm=const.LB_ALGORITHM_SOURCE_IP)
@decorators.idempotent_id('13a5caba-42a5-4b8c-a389-74d630a91687')
def test_HTTP_SI_pool_with_listener_CRUD(self):
self._test_pool_CRUD(listener_protocol=const.HTTP,
pool_protocol=const.HTTP, protocol_port=31,
algorithm=const.LB_ALGORITHM_SOURCE_IP)
@decorators.idempotent_id('5ff7732a-7481-4c03-8efc-5ee794feb11a')
def test_HTTPS_SI_pool_standalone_CRUD(self):
self._test_pool_CRUD(listener_protocol=None,
pool_protocol=const.HTTPS, protocol_port=32,
algorithm=const.LB_ALGORITHM_SOURCE_IP)
@decorators.idempotent_id('30f3d93c-cc22-4821-8805-d5c41023eccd')
def test_HTTPS_SI_pool_with_listener_CRUD(self):
self._test_pool_CRUD(listener_protocol=const.HTTPS,
pool_protocol=const.HTTPS, protocol_port=33,
algorithm=const.LB_ALGORITHM_SOURCE_IP)
@decorators.idempotent_id('7cbb01b8-196b-4ac3-9fec-a41abf867850')
def test_PROXY_SI_pool_standalone_CRUD(self):
self._test_pool_CRUD(listener_protocol=None,
pool_protocol=const.PROXY, protocol_port=34,
algorithm=const.LB_ALGORITHM_SOURCE_IP)
@decorators.idempotent_id('29327103-4949-4a77-a748-87ab725237b7')
def test_PROXY_SI_pool_with_listener_CRUD(self):
self._test_pool_CRUD(listener_protocol=const.TCP,
pool_protocol=const.PROXY, protocol_port=35,
algorithm=const.LB_ALGORITHM_SOURCE_IP)
@decorators.idempotent_id('6a4dd425-d7d9-40dd-b451-feb4b3c551cc')
def test_TCP_SI_pool_standalone_CRUD(self):
self._test_pool_CRUD(listener_protocol=None,
pool_protocol=const.TCP, protocol_port=36,
algorithm=const.LB_ALGORITHM_SOURCE_IP)
@decorators.idempotent_id('4391d6a5-bb1c-4ff0-9f74-7b8c43a0b150')
def test_TCP_SI_pool_with_listener_CRUD(self):
self._test_pool_CRUD(listener_protocol=const.TCP,
pool_protocol=const.TCP, protocol_port=37,
algorithm=const.LB_ALGORITHM_SOURCE_IP)
@decorators.idempotent_id('211a688c-f495-4f32-a297-c64d240b5de0')
def test_UDP_SI_pool_standalone_CRUD(self):
self._test_pool_CRUD(listener_protocol=None,
pool_protocol=const.UDP, protocol_port=38,
algorithm=const.LB_ALGORITHM_SOURCE_IP)
@decorators.idempotent_id('b19f1285-dbf2-4ac9-9199-3c3693148133')
def test_UDP_SI_pool_with_listener_CRUD(self):
self._test_pool_CRUD(listener_protocol=const.UDP,
pool_protocol=const.UDP, protocol_port=39,
algorithm=const.LB_ALGORITHM_SOURCE_IP)
# Pool with Source IP Port algorithm
@decorators.idempotent_id('fee61d34-e272-42f5-92e2-69b515c6cded')
def test_HTTP_SIP_pool_standalone_CRUD(self):
self._test_pool_CRUD(listener_protocol=None,
pool_protocol=const.HTTP, protocol_port=40,
algorithm=const.LB_ALGORITHM_SOURCE_IP_PORT)
@decorators.idempotent_id('d99948da-649d-493c-a74d-72e532df0605')
def test_HTTP_SIP_pool_with_listener_CRUD(self):
self._test_pool_CRUD(listener_protocol=const.HTTP,
pool_protocol=const.HTTP, protocol_port=41,
algorithm=const.LB_ALGORITHM_SOURCE_IP_PORT)
@decorators.idempotent_id('b3c68f89-634e-4279-9546-9f2d2eac4bfa')
def test_HTTPS_SIP_pool_standalone_CRUD(self):
self._test_pool_CRUD(listener_protocol=None,
pool_protocol=const.HTTPS, protocol_port=42,
algorithm=const.LB_ALGORITHM_SOURCE_IP_PORT)
@decorators.idempotent_id('4327f636-50c3-411c-b90e-0b907bdaffc5')
def test_HTTPS_SIP_pool_with_listener_CRUD(self):
self._test_pool_CRUD(listener_protocol=const.HTTPS,
pool_protocol=const.HTTPS, protocol_port=43,
algorithm=const.LB_ALGORITHM_SOURCE_IP_PORT)
@decorators.idempotent_id('95a93e91-6ac0-40d5-999c-84a8b68c14f4')
def test_PROXY_SIP_pool_standalone_CRUD(self):
self._test_pool_CRUD(listener_protocol=None,
pool_protocol=const.PROXY, protocol_port=44,
algorithm=const.LB_ALGORITHM_SOURCE_IP_PORT)
@decorators.idempotent_id('13893ac9-150f-4605-be68-6bdf65e2bb12')
def test_PROXY_SIP_pool_with_listener_CRUD(self):
self._test_pool_CRUD(listener_protocol=const.TCP,
pool_protocol=const.PROXY, protocol_port=45,
algorithm=const.LB_ALGORITHM_SOURCE_IP_PORT)
@decorators.idempotent_id('d045ea39-b6dd-4171-bb90-2b9970e25303')
def test_TCP_SIP_pool_standalone_CRUD(self):
self._test_pool_CRUD(listener_protocol=None,
pool_protocol=const.TCP, protocol_port=46,
algorithm=const.LB_ALGORITHM_SOURCE_IP_PORT)
@decorators.idempotent_id('ec22ab54-8e0a-4472-8f70-78c34f28dc36')
def test_TCP_SIP_pool_with_listener_CRUD(self):
self._test_pool_CRUD(listener_protocol=const.TCP,
pool_protocol=const.TCP, protocol_port=47,
algorithm=const.LB_ALGORITHM_SOURCE_IP_PORT)
@decorators.idempotent_id('0e0f0299-8c5e-4d7c-a99e-85db43b45446')
def test_UDP_SIP_pool_standalone_CRUD(self):
self._test_pool_CRUD(listener_protocol=None,
pool_protocol=const.UDP, protocol_port=48,
algorithm=const.LB_ALGORITHM_SOURCE_IP_PORT)
@decorators.idempotent_id('66d50010-13ca-4588-ae36-61bb783d556e')
def test_UDP_SIP_pool_with_listener_CRUD(self):
self._test_pool_CRUD(listener_protocol=const.UDP,
pool_protocol=const.UDP, protocol_port=49,
algorithm=const.LB_ALGORITHM_SOURCE_IP_PORT)
# Test with session persistence
@decorators.idempotent_id('d6b8119b-40e9-487d-a037-9972a1e688e8')
def test_HTTP_RR_app_cookie_pool_with_listener_CRUD(self):
self._test_pool_CRUD(
listener_protocol=const.HTTP,
pool_protocol=const.HTTP, protocol_port=50,
algorithm=const.LB_ALGORITHM_ROUND_ROBIN,
session_persistence=const.SESSION_PERSISTENCE_APP_COOKIE)
@decorators.idempotent_id('a67f2276-6469-48d4-bf7e-ddf6d8694dba')
def test_HTTP_RR_http_cookie_pool_with_listener_CRUD(self):
self._test_pool_CRUD(
listener_protocol=const.HTTP,
pool_protocol=const.HTTP, protocol_port=51,
algorithm=const.LB_ALGORITHM_ROUND_ROBIN,
session_persistence=const.SESSION_PERSISTENCE_HTTP_COOKIE)
@decorators.idempotent_id('c248e3d8-43d9-4fd4-93af-845747c9b939')
def test_HTTP_RR_source_IP_pool_with_listener_CRUD(self):
self._test_pool_CRUD(
listener_protocol=const.HTTP,
pool_protocol=const.HTTP, protocol_port=52,
algorithm=const.LB_ALGORITHM_ROUND_ROBIN,
session_persistence=const.SESSION_PERSISTENCE_SOURCE_IP)
@decorators.idempotent_id('dc7f0ed5-f94c-4498-9dca-5dbc08e7162f')
def test_UDP_RR_source_ip_pool_with_listener_CRUD(self):
self._test_pool_CRUD(
listener_protocol=const.UDP,
pool_protocol=const.UDP, protocol_port=53,
algorithm=const.LB_ALGORITHM_ROUND_ROBIN,
session_persistence=const.SESSION_PERSISTENCE_SOURCE_IP)
def _test_pool_CRUD(self, listener_protocol, pool_protocol, protocol_port,
algorithm, session_persistence=None):
"""Tests pool create, read, update, delete """Tests pool create, read, update, delete
* Create a fully populated pool. * Create a fully populated pool.
@ -91,6 +339,35 @@ class PoolScenarioTest(test_base.LoadBalancerBaseTest):
* Update the pool. * Update the pool.
* Delete the pool. * Delete the pool.
""" """
if (algorithm == const.LB_ALGORITHM_SOURCE_IP_PORT and not
self.mem_listener_client.is_version_supported(
self.api_version, '2.13')):
raise testtools.TestCase.skipException(
'Skipping this test as load balancing algorithm '
'SOURCE_IP_PORT requires API version 2.13 or newer.')
# Listener create
if listener_protocol is not None:
listener_name = data_utils.rand_name("lb_member_listener1_pool")
listener_kwargs = {
const.NAME: listener_name,
const.PROTOCOL: listener_protocol,
const.PROTOCOL_PORT: protocol_port,
const.LOADBALANCER_ID: self.lb_id,
}
listener = self.mem_listener_client.create_listener(
**listener_kwargs)
listener_id = listener[const.ID]
self.addClassResourceCleanup(
self.mem_listener_client.cleanup_listener, listener_id,
lb_client=self.mem_lb_client, lb_id=self.lb_id)
waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
self.lb_id, const.PROVISIONING_STATUS,
const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
# Pool create # Pool create
pool_name = data_utils.rand_name("lb_member_pool1-CRUD") pool_name = data_utils.rand_name("lb_member_pool1-CRUD")
pool_description = data_utils.arbitrary_string(size=255) pool_description = data_utils.arbitrary_string(size=255)
@ -99,20 +376,44 @@ class PoolScenarioTest(test_base.LoadBalancerBaseTest):
const.NAME: pool_name, const.NAME: pool_name,
const.DESCRIPTION: pool_description, const.DESCRIPTION: pool_description,
const.ADMIN_STATE_UP: False, const.ADMIN_STATE_UP: False,
const.PROTOCOL: self.protocol, const.PROTOCOL: pool_protocol,
const.LB_ALGORITHM: self.lb_algorithm, const.LB_ALGORITHM: algorithm,
} }
if self.lb_feature_enabled.session_persistence_enabled:
if session_persistence == const.SESSION_PERSISTENCE_APP_COOKIE:
pool_kwargs[const.SESSION_PERSISTENCE] = { pool_kwargs[const.SESSION_PERSISTENCE] = {
const.TYPE: const.SESSION_PERSISTENCE_APP_COOKIE, const.TYPE: const.SESSION_PERSISTENCE_APP_COOKIE,
const.COOKIE_NAME: pool_sp_cookie_name, const.COOKIE_NAME: pool_sp_cookie_name
} }
if has_listener: elif session_persistence == const.SESSION_PERSISTENCE_HTTP_COOKIE:
pool_kwargs[const.LISTENER_ID] = self.listener_id pool_kwargs[const.SESSION_PERSISTENCE] = {
const.TYPE: const.SESSION_PERSISTENCE_HTTP_COOKIE
}
elif session_persistence == const.SESSION_PERSISTENCE_SOURCE_IP:
pool_kwargs[const.SESSION_PERSISTENCE] = {
const.TYPE: const.SESSION_PERSISTENCE_SOURCE_IP
}
if listener_protocol is not None:
pool_kwargs[const.LISTENER_ID] = listener_id
else: else:
pool_kwargs[const.LOADBALANCER_ID] = self.lb_id pool_kwargs[const.LOADBALANCER_ID] = self.lb_id
pool = self.mem_pool_client.create_pool(**pool_kwargs) # This is a special case as the reference driver does not support
# SOURCE-IP-PORT. Since it runs with not_implemented_is_error, we must
# handle this test case special.
try:
pool = self.mem_pool_client.create_pool(**pool_kwargs)
except exceptions.NotImplemented as e:
if algorithm != const.LB_ALGORITHM_SOURCE_IP_PORT:
raise
message = ("The configured provider driver '{driver}' "
"does not support a feature required for this "
"test.".format(driver=CONF.load_balancer.provider))
if hasattr(e, 'resp_body'):
message = e.resp_body.get('faultstring', message)
raise testtools.TestCase.skipException(message)
self.addCleanup( self.addCleanup(
self.mem_pool_client.cleanup_pool, self.mem_pool_client.cleanup_pool,
pool[const.ID], pool[const.ID],
@ -137,24 +438,31 @@ class PoolScenarioTest(test_base.LoadBalancerBaseTest):
parser.parse(pool[const.UPDATED_AT]) parser.parse(pool[const.UPDATED_AT])
UUID(pool[const.ID]) UUID(pool[const.ID])
self.assertEqual(const.OFFLINE, pool[const.OPERATING_STATUS]) self.assertEqual(const.OFFLINE, pool[const.OPERATING_STATUS])
self.assertEqual(self.protocol, pool[const.PROTOCOL]) self.assertEqual(pool_protocol, pool[const.PROTOCOL])
self.assertEqual(1, len(pool[const.LOADBALANCERS])) self.assertEqual(1, len(pool[const.LOADBALANCERS]))
self.assertEqual(self.lb_id, pool[const.LOADBALANCERS][0][const.ID]) self.assertEqual(self.lb_id, pool[const.LOADBALANCERS][0][const.ID])
if has_listener: if listener_protocol is not None:
self.assertEqual(1, len(pool[const.LISTENERS])) self.assertEqual(1, len(pool[const.LISTENERS]))
self.assertEqual(self.listener_id, self.assertEqual(listener_id, pool[const.LISTENERS][0][const.ID])
pool[const.LISTENERS][0][const.ID])
else: else:
self.assertEmpty(pool[const.LISTENERS]) self.assertEmpty(pool[const.LISTENERS])
self.assertEqual(self.lb_algorithm, self.assertEqual(algorithm, pool[const.LB_ALGORITHM])
pool[const.LB_ALGORITHM])
if self.lb_feature_enabled.session_persistence_enabled: if session_persistence == const.SESSION_PERSISTENCE_APP_COOKIE:
self.assertIsNotNone(pool.get(const.SESSION_PERSISTENCE)) self.assertIsNotNone(pool.get(const.SESSION_PERSISTENCE))
self.assertEqual(const.SESSION_PERSISTENCE_APP_COOKIE, self.assertEqual(const.SESSION_PERSISTENCE_APP_COOKIE,
pool[const.SESSION_PERSISTENCE][const.TYPE]) pool[const.SESSION_PERSISTENCE][const.TYPE])
self.assertEqual(pool_sp_cookie_name, self.assertEqual(pool_sp_cookie_name,
pool[const.SESSION_PERSISTENCE][ pool[const.SESSION_PERSISTENCE][
const.COOKIE_NAME]) const.COOKIE_NAME])
elif session_persistence == const.SESSION_PERSISTENCE_HTTP_COOKIE:
self.assertIsNotNone(pool.get(const.SESSION_PERSISTENCE))
self.assertEqual(const.SESSION_PERSISTENCE_HTTP_COOKIE,
pool[const.SESSION_PERSISTENCE][const.TYPE])
elif session_persistence == const.SESSION_PERSISTENCE_SOURCE_IP:
self.assertIsNotNone(pool.get(const.SESSION_PERSISTENCE))
self.assertEqual(const.SESSION_PERSISTENCE_SOURCE_IP,
pool[const.SESSION_PERSISTENCE][const.TYPE])
# Pool update # Pool update
new_name = data_utils.rand_name("lb_member_pool1-update") new_name = data_utils.rand_name("lb_member_pool1-update")
@ -166,14 +474,26 @@ class PoolScenarioTest(test_base.LoadBalancerBaseTest):
const.ADMIN_STATE_UP: True, const.ADMIN_STATE_UP: True,
} }
if self.lb_feature_enabled.pool_algorithms_enabled: # We have to set it to the same protocol as not all
pool_update_kwargs[const.LB_ALGORITHM] = ( # drivers support more than one pool algorithm
const.LB_ALGORITHM_LEAST_CONNECTIONS) pool_update_kwargs[const.LB_ALGORITHM] = algorithm
if self.protocol == const.HTTP and ( if session_persistence == const.SESSION_PERSISTENCE_APP_COOKIE:
self.lb_feature_enabled.session_persistence_enabled):
pool_update_kwargs[const.SESSION_PERSISTENCE] = { pool_update_kwargs[const.SESSION_PERSISTENCE] = {
const.TYPE: const.SESSION_PERSISTENCE_HTTP_COOKIE} const.TYPE: const.SESSION_PERSISTENCE_HTTP_COOKIE
}
elif session_persistence == const.SESSION_PERSISTENCE_HTTP_COOKIE:
pool_update_kwargs[const.SESSION_PERSISTENCE] = {
const.TYPE: const.SESSION_PERSISTENCE_APP_COOKIE,
const.COOKIE_NAME: pool_sp_cookie_name
}
elif session_persistence == const.SESSION_PERSISTENCE_SOURCE_IP:
# Some protocols only support source IP session persistence
# so set this to the same.
pool_update_kwargs[const.SESSION_PERSISTENCE] = {
const.TYPE: const.SESSION_PERSISTENCE_SOURCE_IP
}
pool = self.mem_pool_client.update_pool( pool = self.mem_pool_client.update_pool(
pool[const.ID], **pool_update_kwargs) pool[const.ID], **pool_update_kwargs)
@ -192,15 +512,27 @@ class PoolScenarioTest(test_base.LoadBalancerBaseTest):
self.assertEqual(new_name, pool[const.NAME]) self.assertEqual(new_name, pool[const.NAME])
self.assertEqual(new_description, pool[const.DESCRIPTION]) self.assertEqual(new_description, pool[const.DESCRIPTION])
self.assertTrue(pool[const.ADMIN_STATE_UP]) self.assertTrue(pool[const.ADMIN_STATE_UP])
if self.lb_feature_enabled.pool_algorithms_enabled: self.assertEqual(algorithm, pool[const.LB_ALGORITHM])
self.assertEqual(const.LB_ALGORITHM_LEAST_CONNECTIONS,
pool[const.LB_ALGORITHM]) if session_persistence == const.SESSION_PERSISTENCE_APP_COOKIE:
if self.lb_feature_enabled.session_persistence_enabled:
self.assertIsNotNone(pool.get(const.SESSION_PERSISTENCE)) self.assertIsNotNone(pool.get(const.SESSION_PERSISTENCE))
self.assertEqual(const.SESSION_PERSISTENCE_HTTP_COOKIE, self.assertEqual(const.SESSION_PERSISTENCE_HTTP_COOKIE,
pool[const.SESSION_PERSISTENCE][const.TYPE]) pool[const.SESSION_PERSISTENCE][const.TYPE])
self.assertIsNone( self.assertIsNone(
pool[const.SESSION_PERSISTENCE].get(const.COOKIE_NAME)) pool[const.SESSION_PERSISTENCE].get(const.COOKIE_NAME))
elif session_persistence == const.SESSION_PERSISTENCE_HTTP_COOKIE:
self.assertIsNotNone(pool.get(const.SESSION_PERSISTENCE))
self.assertEqual(const.SESSION_PERSISTENCE_APP_COOKIE,
pool[const.SESSION_PERSISTENCE][const.TYPE])
self.assertEqual(pool_sp_cookie_name,
pool[const.SESSION_PERSISTENCE][
const.COOKIE_NAME])
elif session_persistence == const.SESSION_PERSISTENCE_SOURCE_IP:
self.assertIsNotNone(pool.get(const.SESSION_PERSISTENCE))
self.assertEqual(const.SESSION_PERSISTENCE_SOURCE_IP,
pool[const.SESSION_PERSISTENCE][const.TYPE])
self.assertIsNone(
pool[const.SESSION_PERSISTENCE].get(const.COOKIE_NAME))
# Pool delete # Pool delete
waiters.wait_for_status( waiters.wait_for_status(
@ -209,6 +541,7 @@ class PoolScenarioTest(test_base.LoadBalancerBaseTest):
const.ACTIVE, const.ACTIVE,
CONF.load_balancer.check_interval, CONF.load_balancer.check_interval,
CONF.load_balancer.check_timeout) CONF.load_balancer.check_timeout)
self.mem_pool_client.delete_pool(pool[const.ID]) self.mem_pool_client.delete_pool(pool[const.ID])
waiters.wait_for_deleted_status_or_not_found( waiters.wait_for_deleted_status_or_not_found(

View File

@ -14,7 +14,6 @@
import datetime import datetime
import ipaddress import ipaddress
import requests
import shlex import shlex
import testtools import testtools
import time import time
@ -24,21 +23,30 @@ from oslo_utils import uuidutils
from tempest import config from tempest import config
from tempest.lib.common.utils import data_utils from tempest.lib.common.utils import data_utils
from tempest.lib import decorators from tempest.lib import decorators
from tempest.lib import exceptions
from octavia_tempest_plugin.common import constants as const from octavia_tempest_plugin.common import constants as const
from octavia_tempest_plugin.tests import test_base from octavia_tempest_plugin.tests import test_base
from octavia_tempest_plugin.tests import validators
from octavia_tempest_plugin.tests import waiters from octavia_tempest_plugin.tests import waiters
CONF = config.CONF CONF = config.CONF
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@testtools.skipUnless(
CONF.validation.run_validation,
'Traffic tests will not work without run_validation enabled.')
class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute): class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
@classmethod
def skip_checks(cls):
super().skip_checks()
if not CONF.validation.run_validation:
raise cls.skipException('Traffic tests will not work without '
'run_validation enabled.')
if CONF.load_balancer.test_with_noop:
raise cls.skipException('Traffic tests will not work in noop '
'mode.')
@classmethod @classmethod
def resource_setup(cls): def resource_setup(cls):
"""Setup resources needed by the tests.""" """Setup resources needed by the tests."""
@ -80,27 +88,19 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
else: else:
cls.lb_vip_address = lb[const.VIP_ADDRESS] cls.lb_vip_address = lb[const.VIP_ADDRESS]
# Per protocol listeners and pools IDs
cls.listener_ids = {}
cls.pool_ids = {}
cls.protocol = const.HTTP
lb_feature_enabled = CONF.loadbalancer_feature_enabled
if not lb_feature_enabled.l7_protocol_enabled:
cls.protocol = lb_feature_enabled.l4_protocol
# Don't use same ports for HTTP/l4_protocol and UDP because some
# releases (<=train) don't support it
cls._listener_pool_create(cls.protocol, 80)
cls._listener_pool_create(const.UDP, 8080)
@classmethod @classmethod
def _listener_pool_create(cls, protocol, protocol_port): def _listener_pool_create(cls, protocol, protocol_port,
pool_algorithm=const.LB_ALGORITHM_ROUND_ROBIN):
if (protocol == const.UDP and if (protocol == const.UDP and
not cls.mem_listener_client.is_version_supported( not cls.mem_listener_client.is_version_supported(
cls.api_version, '2.1')): cls.api_version, '2.1')):
return return
if (pool_algorithm == const.LB_ALGORITHM_SOURCE_IP_PORT and not
cls.mem_listener_client.is_version_supported(
cls.api_version, '2.13')):
raise testtools.TestCase.skipException(
'Skipping this test as load balancing algorithm '
'SOURCE_IP_PORT requires API version 2.13 or newer.')
listener_name = data_utils.rand_name("lb_member_listener1_operations") listener_name = data_utils.rand_name("lb_member_listener1_operations")
listener_kwargs = { listener_kwargs = {
@ -112,12 +112,10 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
# haproxy process and use haproxy>=1.8: # haproxy process and use haproxy>=1.8:
const.CONNECTION_LIMIT: 200, const.CONNECTION_LIMIT: 200,
} }
listener = cls.mem_listener_client.create_listener( listener = cls.mem_listener_client.create_listener(**listener_kwargs)
**listener_kwargs)
cls.listener_ids[protocol] = listener[const.ID]
cls.addClassResourceCleanup( cls.addClassResourceCleanup(
cls.mem_listener_client.cleanup_listener, cls.mem_listener_client.cleanup_listener,
cls.listener_ids[protocol], listener[const.ID],
lb_client=cls.mem_lb_client, lb_id=cls.lb_id) lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer, waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
@ -130,14 +128,13 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
pool_kwargs = { pool_kwargs = {
const.NAME: pool_name, const.NAME: pool_name,
const.PROTOCOL: protocol, const.PROTOCOL: protocol,
const.LB_ALGORITHM: cls.lb_algorithm, const.LB_ALGORITHM: pool_algorithm,
const.LISTENER_ID: cls.listener_ids[protocol], const.LISTENER_ID: listener[const.ID],
} }
pool = cls.mem_pool_client.create_pool(**pool_kwargs) pool = cls.mem_pool_client.create_pool(**pool_kwargs)
cls.pool_ids[protocol] = pool[const.ID]
cls.addClassResourceCleanup( cls.addClassResourceCleanup(
cls.mem_pool_client.cleanup_pool, cls.mem_pool_client.cleanup_pool,
cls.pool_ids[protocol], pool[const.ID],
lb_client=cls.mem_lb_client, lb_id=cls.lb_id) lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer, waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
@ -146,7 +143,12 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
CONF.load_balancer.build_interval, CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout) CONF.load_balancer.build_timeout)
def _test_basic_traffic(self, protocol, protocol_port): return listener[const.ID], pool[const.ID]
def _test_basic_traffic(
self, protocol, protocol_port, listener_id, pool_id,
persistent=True, traffic_member_count=2, source_port=None,
delay=None):
"""Tests sending traffic through a loadbalancer """Tests sending traffic through a loadbalancer
* Set up members on a loadbalancer. * Set up members on a loadbalancer.
@ -155,7 +157,7 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
# Set up Member 1 for Webserver 1 # Set up Member 1 for Webserver 1
member1_name = data_utils.rand_name("lb_member_member1-traffic") member1_name = data_utils.rand_name("lb_member_member1-traffic")
member1_kwargs = { member1_kwargs = {
const.POOL_ID: self.pool_ids[protocol], const.POOL_ID: pool_id,
const.NAME: member1_name, const.NAME: member1_name,
const.ADMIN_STATE_UP: True, const.ADMIN_STATE_UP: True,
const.ADDRESS: self.webserver1_ip, const.ADDRESS: self.webserver1_ip,
@ -164,11 +166,10 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
if self.lb_member_1_subnet: if self.lb_member_1_subnet:
member1_kwargs[const.SUBNET_ID] = self.lb_member_1_subnet[const.ID] member1_kwargs[const.SUBNET_ID] = self.lb_member_1_subnet[const.ID]
member1 = self.mem_member_client.create_member( member1 = self.mem_member_client.create_member(**member1_kwargs)
**member1_kwargs)
self.addCleanup( self.addCleanup(
self.mem_member_client.cleanup_member, self.mem_member_client.cleanup_member,
member1[const.ID], pool_id=self.pool_ids[protocol], member1[const.ID], pool_id=pool_id,
lb_client=self.mem_lb_client, lb_id=self.lb_id) lb_client=self.mem_lb_client, lb_id=self.lb_id)
waiters.wait_for_status( waiters.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id, self.mem_lb_client.show_loadbalancer, self.lb_id,
@ -179,7 +180,7 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
# Set up Member 2 for Webserver 2 # Set up Member 2 for Webserver 2
member2_name = data_utils.rand_name("lb_member_member2-traffic") member2_name = data_utils.rand_name("lb_member_member2-traffic")
member2_kwargs = { member2_kwargs = {
const.POOL_ID: self.pool_ids[protocol], const.POOL_ID: pool_id,
const.NAME: member2_name, const.NAME: member2_name,
const.ADMIN_STATE_UP: True, const.ADMIN_STATE_UP: True,
const.ADDRESS: self.webserver2_ip, const.ADDRESS: self.webserver2_ip,
@ -188,11 +189,10 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
if self.lb_member_2_subnet: if self.lb_member_2_subnet:
member2_kwargs[const.SUBNET_ID] = self.lb_member_2_subnet[const.ID] member2_kwargs[const.SUBNET_ID] = self.lb_member_2_subnet[const.ID]
member2 = self.mem_member_client.create_member( member2 = self.mem_member_client.create_member(**member2_kwargs)
**member2_kwargs)
self.addCleanup( self.addCleanup(
self.mem_member_client.cleanup_member, self.mem_member_client.cleanup_member,
member2[const.ID], pool_id=self.pool_ids[protocol], member2[const.ID], pool_id=pool_id,
lb_client=self.mem_lb_client, lb_id=self.lb_id) lb_client=self.mem_lb_client, lb_id=self.lb_id)
waiters.wait_for_status( waiters.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id, self.mem_lb_client.show_loadbalancer, self.lb_id,
@ -201,16 +201,27 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
CONF.load_balancer.check_timeout) CONF.load_balancer.check_timeout)
# Send some traffic # Send some traffic
self.check_members_balanced(self.lb_vip_address, self.check_members_balanced(
protocol_port=protocol_port, self.lb_vip_address, protocol_port=protocol_port,
protocol=protocol) persistent=persistent, protocol=protocol,
traffic_member_count=traffic_member_count, source_port=source_port,
delay=delay)
@decorators.attr(type=['smoke', 'slow']) @decorators.attr(type=['smoke', 'slow'])
@testtools.skipIf(CONF.load_balancer.test_with_noop, @testtools.skipIf(CONF.load_balancer.test_with_noop,
'Traffic tests will not work in noop mode.') 'Traffic tests will not work in noop mode.')
@decorators.idempotent_id('6751135d-e15a-4e22-89f4-bfcc3408d424') @decorators.idempotent_id('6751135d-e15a-4e22-89f4-bfcc3408d424')
def test_basic_traffic(self): def test_basic_http_traffic(self):
self._test_basic_traffic(self.protocol, 80) listener_id, pool_id = self._listener_pool_create(const.HTTP, 80)
self._test_basic_traffic(const.HTTP, 80, listener_id, pool_id)
@testtools.skipIf(CONF.load_balancer.test_with_noop,
'Traffic tests will not work in noop mode.')
@decorators.idempotent_id('332a08e0-eff1-4c19-b46c-bf87148a6d84')
def test_basic_tcp_traffic(self):
listener_id, pool_id = self._listener_pool_create(const.TCP, 81)
self._test_basic_traffic(const.TCP, 81, listener_id, pool_id,
persistent=False)
@testtools.skipIf(CONF.load_balancer.test_with_noop, @testtools.skipIf(CONF.load_balancer.test_with_noop,
'Traffic tests will not work in noop mode.') 'Traffic tests will not work in noop mode.')
@ -220,10 +231,11 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
self.api_version, '2.1'): self.api_version, '2.1'):
raise self.skipException('UDP listener support is only available ' raise self.skipException('UDP listener support is only available '
'in Octavia API version 2.1 or newer') 'in Octavia API version 2.1 or newer')
listener_id, pool_id = self._listener_pool_create(const.UDP, 8080)
self._test_basic_traffic(const.UDP, 8080, listener_id, pool_id)
self._test_basic_traffic(const.UDP, 8080) def _test_healthmonitor_traffic(self, protocol, protocol_port,
listener_id, pool_id, persistent=True):
def _test_healthmonitor_traffic(self, protocol, protocol_port):
"""Tests traffic is correctly routed based on healthmonitor status """Tests traffic is correctly routed based on healthmonitor status
* Create three members: * Create three members:
@ -242,7 +254,7 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
member1_name = data_utils.rand_name("lb_member_member1-hm-traffic") member1_name = data_utils.rand_name("lb_member_member1-hm-traffic")
member1_kwargs = { member1_kwargs = {
const.POOL_ID: self.pool_ids[protocol], const.POOL_ID: pool_id,
const.NAME: member1_name, const.NAME: member1_name,
const.ADMIN_STATE_UP: True, const.ADMIN_STATE_UP: True,
const.ADDRESS: self.webserver1_ip, const.ADDRESS: self.webserver1_ip,
@ -251,12 +263,11 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
if self.lb_member_1_subnet: if self.lb_member_1_subnet:
member1_kwargs[const.SUBNET_ID] = self.lb_member_1_subnet[const.ID] member1_kwargs[const.SUBNET_ID] = self.lb_member_1_subnet[const.ID]
member1 = self.mem_member_client.create_member( member1 = self.mem_member_client.create_member(**member1_kwargs)
**member1_kwargs)
member1_id = member1[const.ID] member1_id = member1[const.ID]
self.addCleanup( self.addCleanup(
self.mem_member_client.cleanup_member, self.mem_member_client.cleanup_member,
member1_id, pool_id=self.pool_ids[protocol], member1_id, pool_id=pool_id,
lb_client=self.mem_lb_client, lb_id=self.lb_id) lb_client=self.mem_lb_client, lb_id=self.lb_id)
waiters.wait_for_status( waiters.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id, self.mem_lb_client.show_loadbalancer, self.lb_id,
@ -267,7 +278,7 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
# Set up Member 2 for Webserver 2 # Set up Member 2 for Webserver 2
member2_name = data_utils.rand_name("lb_member_member2-hm-traffic") member2_name = data_utils.rand_name("lb_member_member2-hm-traffic")
member2_kwargs = { member2_kwargs = {
const.POOL_ID: self.pool_ids[protocol], const.POOL_ID: pool_id,
const.NAME: member2_name, const.NAME: member2_name,
const.ADMIN_STATE_UP: True, const.ADMIN_STATE_UP: True,
const.ADDRESS: self.webserver2_ip, const.ADDRESS: self.webserver2_ip,
@ -277,12 +288,11 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
if self.lb_member_2_subnet: if self.lb_member_2_subnet:
member2_kwargs[const.SUBNET_ID] = self.lb_member_2_subnet[const.ID] member2_kwargs[const.SUBNET_ID] = self.lb_member_2_subnet[const.ID]
member2 = self.mem_member_client.create_member( member2 = self.mem_member_client.create_member(**member2_kwargs)
**member2_kwargs)
member2_id = member2[const.ID] member2_id = member2[const.ID]
self.addCleanup( self.addCleanup(
self.mem_member_client.cleanup_member, self.mem_member_client.cleanup_member,
member2_id, pool_id=self.pool_ids[protocol], member2_id, pool_id=pool_id,
lb_client=self.mem_lb_client, lb_id=self.lb_id) lb_client=self.mem_lb_client, lb_id=self.lb_id)
waiters.wait_for_status( waiters.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id, self.mem_lb_client.show_loadbalancer, self.lb_id,
@ -293,19 +303,18 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
# Set up Member 3 as a non-existent disabled node # Set up Member 3 as a non-existent disabled node
member3_name = data_utils.rand_name("lb_member_member3-hm-traffic") member3_name = data_utils.rand_name("lb_member_member3-hm-traffic")
member3_kwargs = { member3_kwargs = {
const.POOL_ID: self.pool_ids[protocol], const.POOL_ID: pool_id,
const.NAME: member3_name, const.NAME: member3_name,
const.ADMIN_STATE_UP: False, const.ADMIN_STATE_UP: False,
const.ADDRESS: '192.0.2.1', const.ADDRESS: '192.0.2.1',
const.PROTOCOL_PORT: 80, const.PROTOCOL_PORT: 80,
} }
member3 = self.mem_member_client.create_member( member3 = self.mem_member_client.create_member(**member3_kwargs)
**member3_kwargs)
member3_id = member3[const.ID] member3_id = member3[const.ID]
self.addCleanup( self.addCleanup(
self.mem_member_client.cleanup_member, self.mem_member_client.cleanup_member,
member3_id, pool_id=self.pool_ids[protocol], member3_id, pool_id=pool_id,
lb_client=self.mem_lb_client, lb_id=self.lb_id) lb_client=self.mem_lb_client, lb_id=self.lb_id)
waiters.wait_for_status( waiters.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id, self.mem_lb_client.show_loadbalancer, self.lb_id,
@ -320,27 +329,26 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
const.NO_MONITOR, const.NO_MONITOR,
CONF.load_balancer.build_interval, CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout, CONF.load_balancer.build_timeout,
pool_id=self.pool_ids[protocol]) pool_id=pool_id)
waiters.wait_for_status( waiters.wait_for_status(
self.mem_member_client.show_member, self.mem_member_client.show_member,
member2_id, const.OPERATING_STATUS, member2_id, const.OPERATING_STATUS,
const.NO_MONITOR, const.NO_MONITOR,
CONF.load_balancer.build_interval, CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout, CONF.load_balancer.build_timeout,
pool_id=self.pool_ids[protocol]) pool_id=pool_id)
waiters.wait_for_status( waiters.wait_for_status(
self.mem_member_client.show_member, self.mem_member_client.show_member,
member3_id, const.OPERATING_STATUS, member3_id, const.OPERATING_STATUS,
const.OFFLINE, const.OFFLINE,
CONF.load_balancer.build_interval, CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout, CONF.load_balancer.build_timeout,
pool_id=self.pool_ids[protocol]) pool_id=pool_id)
# Send some traffic and verify it is balanced # Send some traffic and verify it is balanced
self.check_members_balanced(self.lb_vip_address, self.check_members_balanced(self.lb_vip_address,
protocol_port=protocol_port, protocol_port=protocol_port,
protocol=protocol, protocol=protocol, persistent=persistent)
traffic_member_count=2)
# Create the healthmonitor # Create the healthmonitor
hm_name = data_utils.rand_name("lb_member_hm1-hm-traffic") hm_name = data_utils.rand_name("lb_member_hm1-hm-traffic")
@ -351,7 +359,7 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
hm_type = const.HEALTH_MONITOR_TCP hm_type = const.HEALTH_MONITOR_TCP
hm_kwargs = { hm_kwargs = {
const.POOL_ID: self.pool_ids[protocol], const.POOL_ID: pool_id,
const.NAME: hm_name, const.NAME: hm_name,
const.TYPE: hm_type, const.TYPE: hm_type,
const.DELAY: 3, const.DELAY: 3,
@ -362,7 +370,7 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
} }
else: else:
hm_kwargs = { hm_kwargs = {
const.POOL_ID: self.pool_ids[protocol], const.POOL_ID: pool_id,
const.NAME: hm_name, const.NAME: hm_name,
const.TYPE: const.HEALTH_MONITOR_HTTP, const.TYPE: const.HEALTH_MONITOR_HTTP,
const.DELAY: 2, const.DELAY: 2,
@ -400,27 +408,28 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
CONF.load_balancer.build_interval, CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout, CONF.load_balancer.build_timeout,
error_ok=True, error_ok=True,
pool_id=self.pool_ids[protocol]) pool_id=pool_id)
waiters.wait_for_status( waiters.wait_for_status(
self.mem_member_client.show_member, self.mem_member_client.show_member,
member2_id, const.OPERATING_STATUS, member2_id, const.OPERATING_STATUS,
const.ERROR, const.ERROR,
CONF.load_balancer.build_interval, CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout, CONF.load_balancer.build_timeout,
pool_id=self.pool_ids[protocol]) pool_id=pool_id)
waiters.wait_for_status( waiters.wait_for_status(
self.mem_member_client.show_member, self.mem_member_client.show_member,
member3_id, const.OPERATING_STATUS, member3_id, const.OPERATING_STATUS,
const.OFFLINE, const.OFFLINE,
CONF.load_balancer.build_interval, CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout, CONF.load_balancer.build_timeout,
pool_id=self.pool_ids[protocol]) pool_id=pool_id)
# Send some traffic and verify it is *unbalanced*, as expected # Send some traffic and verify it is *unbalanced*, as expected
self.check_members_balanced(self.lb_vip_address, self.check_members_balanced(self.lb_vip_address,
protocol_port=protocol_port, protocol_port=protocol_port,
protocol=protocol, protocol=protocol,
traffic_member_count=1) traffic_member_count=1,
persistent=persistent)
# Delete the healthmonitor # Delete the healthmonitor
self.mem_healthmonitor_client.delete_healthmonitor(hm[const.ID]) self.mem_healthmonitor_client.delete_healthmonitor(hm[const.ID])
@ -438,37 +447,38 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
const.NO_MONITOR, const.NO_MONITOR,
CONF.load_balancer.build_interval, CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout, CONF.load_balancer.build_timeout,
pool_id=self.pool_ids[protocol]) pool_id=pool_id)
waiters.wait_for_status( waiters.wait_for_status(
self.mem_member_client.show_member, self.mem_member_client.show_member,
member2_id, const.OPERATING_STATUS, member2_id, const.OPERATING_STATUS,
const.NO_MONITOR, const.NO_MONITOR,
CONF.load_balancer.build_interval, CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout, CONF.load_balancer.build_timeout,
pool_id=self.pool_ids[protocol]) pool_id=pool_id)
waiters.wait_for_status( waiters.wait_for_status(
self.mem_member_client.show_member, self.mem_member_client.show_member,
member3_id, const.OPERATING_STATUS, member3_id, const.OPERATING_STATUS,
const.OFFLINE, const.OFFLINE,
CONF.load_balancer.build_interval, CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout, CONF.load_balancer.build_timeout,
pool_id=self.pool_ids[protocol]) pool_id=pool_id)
# Send some traffic and verify it is balanced again # Send some traffic and verify it is balanced again
self.check_members_balanced(self.lb_vip_address, self.check_members_balanced(self.lb_vip_address,
protocol_port=protocol_port, protocol_port=protocol_port,
protocol=protocol) protocol=protocol, persistent=persistent)
@testtools.skipUnless(
CONF.loadbalancer_feature_enabled.health_monitor_enabled,
'Health monitor testing is disabled')
@decorators.idempotent_id('a16f8eb4-a77c-4b0e-8b1b-91c237039713') @decorators.idempotent_id('a16f8eb4-a77c-4b0e-8b1b-91c237039713')
def test_healthmonitor_traffic(self): def test_healthmonitor_http_traffic(self):
self._test_healthmonitor_traffic(self.protocol, 80) listener_id, pool_id = self._listener_pool_create(const.HTTP, 82)
self._test_healthmonitor_traffic(const.HTTP, 82, listener_id, pool_id)
@decorators.idempotent_id('22f00c34-343b-4aa9-90be-4567ecf85772')
def test_healthmonitor_tcp_traffic(self):
listener_id, pool_id = self._listener_pool_create(const.TCP, 83)
self._test_healthmonitor_traffic(const.TCP, 83, listener_id, pool_id,
persistent=False)
@testtools.skipUnless(
CONF.loadbalancer_feature_enabled.health_monitor_enabled,
'Health monitor testing is disabled')
@decorators.idempotent_id('80b86513-1a76-4e42-91c9-cb23c879e536') @decorators.idempotent_id('80b86513-1a76-4e42-91c9-cb23c879e536')
def test_healthmonitor_udp_traffic(self): def test_healthmonitor_udp_traffic(self):
if not self.mem_listener_client.is_version_supported( if not self.mem_listener_client.is_version_supported(
@ -476,13 +486,11 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
raise self.skipException('UDP listener support is only available ' raise self.skipException('UDP listener support is only available '
'in Octavia API version 2.1 or newer') 'in Octavia API version 2.1 or newer')
self._test_healthmonitor_traffic(const.UDP, 8080) listener_id, pool_id = self._listener_pool_create(const.UDP, 8081)
self._test_healthmonitor_traffic(const.UDP, 8081, listener_id, pool_id)
@testtools.skipUnless(
CONF.loadbalancer_feature_enabled.l7_protocol_enabled,
'L7 protocol testing is disabled')
@decorators.idempotent_id('3558186d-6dcd-4d9d-b7f7-adc190b66149') @decorators.idempotent_id('3558186d-6dcd-4d9d-b7f7-adc190b66149')
def test_l7policies_and_l7rules(self): def test_http_l7policies_and_l7rules(self):
"""Tests sending traffic through a loadbalancer with l7rules """Tests sending traffic through a loadbalancer with l7rules
* Create an extra pool. * Create an extra pool.
@ -492,6 +500,9 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
* Create a policy/rule to reject connections. * Create a policy/rule to reject connections.
* Test traffic to ensure it goes to the correct place. * Test traffic to ensure it goes to the correct place.
""" """
LISTENER_PORT = 84
listener_id, pool_id = self._listener_pool_create(const.HTTP,
LISTENER_PORT)
protocol = const.HTTP protocol = const.HTTP
# Create a second pool # Create a second pool
@ -499,14 +510,14 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
pool_kwargs = { pool_kwargs = {
const.NAME: pool_name, const.NAME: pool_name,
const.PROTOCOL: protocol, const.PROTOCOL: protocol,
const.LB_ALGORITHM: self.lb_algorithm, const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
const.LOADBALANCER_ID: self.lb_id, const.LOADBALANCER_ID: self.lb_id,
} }
pool = self.mem_pool_client.create_pool(**pool_kwargs) pool = self.mem_pool_client.create_pool(**pool_kwargs)
pool_id = pool[const.ID] pool2_id = pool[const.ID]
self.addCleanup( self.addCleanup(
self.mem_pool_client.cleanup_pool, self.mem_pool_client.cleanup_pool,
pool_id, pool2_id,
lb_client=self.mem_lb_client, lb_id=self.lb_id) lb_client=self.mem_lb_client, lb_id=self.lb_id)
waiters.wait_for_status(self.mem_lb_client.show_loadbalancer, waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
@ -518,7 +529,7 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
# Set up Member 1 for Webserver 1 on the default pool # Set up Member 1 for Webserver 1 on the default pool
member1_name = data_utils.rand_name("lb_member_member1-l7redirect") member1_name = data_utils.rand_name("lb_member_member1-l7redirect")
member1_kwargs = { member1_kwargs = {
const.POOL_ID: self.pool_ids[protocol], const.POOL_ID: pool_id,
const.NAME: member1_name, const.NAME: member1_name,
const.ADMIN_STATE_UP: True, const.ADMIN_STATE_UP: True,
const.ADDRESS: self.webserver1_ip, const.ADDRESS: self.webserver1_ip,
@ -531,7 +542,7 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
**member1_kwargs) **member1_kwargs)
self.addCleanup( self.addCleanup(
self.mem_member_client.cleanup_member, self.mem_member_client.cleanup_member,
member1[const.ID], pool_id=self.pool_ids[protocol], member1[const.ID], pool_id=pool_id,
lb_client=self.mem_lb_client, lb_id=self.lb_id) lb_client=self.mem_lb_client, lb_id=self.lb_id)
waiters.wait_for_status( waiters.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id, self.mem_lb_client.show_loadbalancer, self.lb_id,
@ -542,7 +553,7 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
# Set up Member 2 for Webserver 2 on the alternate pool # Set up Member 2 for Webserver 2 on the alternate pool
member2_name = data_utils.rand_name("lb_member_member2-l7redirect") member2_name = data_utils.rand_name("lb_member_member2-l7redirect")
member2_kwargs = { member2_kwargs = {
const.POOL_ID: pool_id, const.POOL_ID: pool2_id,
const.NAME: member2_name, const.NAME: member2_name,
const.ADMIN_STATE_UP: True, const.ADMIN_STATE_UP: True,
const.ADDRESS: self.webserver2_ip, const.ADDRESS: self.webserver2_ip,
@ -555,7 +566,7 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
**member2_kwargs) **member2_kwargs)
self.addCleanup( self.addCleanup(
self.mem_member_client.cleanup_member, self.mem_member_client.cleanup_member,
member2[const.ID], pool_id=self.pool_ids[protocol], member2[const.ID], pool_id=pool_id,
lb_client=self.mem_lb_client, lb_id=self.lb_id) lb_client=self.mem_lb_client, lb_id=self.lb_id)
waiters.wait_for_status( waiters.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id, self.mem_lb_client.show_loadbalancer, self.lb_id,
@ -567,13 +578,13 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
l7policy1_name = data_utils.rand_name("lb_member_l7policy1-l7redirect") l7policy1_name = data_utils.rand_name("lb_member_l7policy1-l7redirect")
l7policy1_description = data_utils.arbitrary_string(size=255) l7policy1_description = data_utils.arbitrary_string(size=255)
l7policy1_kwargs = { l7policy1_kwargs = {
const.LISTENER_ID: self.listener_ids[protocol], const.LISTENER_ID: listener_id,
const.NAME: l7policy1_name, const.NAME: l7policy1_name,
const.DESCRIPTION: l7policy1_description, const.DESCRIPTION: l7policy1_description,
const.ADMIN_STATE_UP: True, const.ADMIN_STATE_UP: True,
const.POSITION: 1, const.POSITION: 1,
const.ACTION: const.REDIRECT_TO_POOL, const.ACTION: const.REDIRECT_TO_POOL,
const.REDIRECT_POOL_ID: pool_id, const.REDIRECT_POOL_ID: pool2_id,
} }
l7policy1 = self.mem_l7policy_client.create_l7policy( l7policy1 = self.mem_l7policy_client.create_l7policy(
**l7policy1_kwargs) **l7policy1_kwargs)
@ -612,7 +623,7 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
l7policy2_name = data_utils.rand_name("lb_member_l7policy2-l7redirect") l7policy2_name = data_utils.rand_name("lb_member_l7policy2-l7redirect")
l7policy2_description = data_utils.arbitrary_string(size=255) l7policy2_description = data_utils.arbitrary_string(size=255)
l7policy2_kwargs = { l7policy2_kwargs = {
const.LISTENER_ID: self.listener_ids[protocol], const.LISTENER_ID: listener_id,
const.NAME: l7policy2_name, const.NAME: l7policy2_name,
const.DESCRIPTION: l7policy2_description, const.DESCRIPTION: l7policy2_description,
const.ADMIN_STATE_UP: True, const.ADMIN_STATE_UP: True,
@ -657,7 +668,7 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
l7policy3_name = data_utils.rand_name("lb_member_l7policy3-l7redirect") l7policy3_name = data_utils.rand_name("lb_member_l7policy3-l7redirect")
l7policy3_description = data_utils.arbitrary_string(size=255) l7policy3_description = data_utils.arbitrary_string(size=255)
l7policy3_kwargs = { l7policy3_kwargs = {
const.LISTENER_ID: self.listener_ids[protocol], const.LISTENER_ID: listener_id,
const.NAME: l7policy3_name, const.NAME: l7policy3_name,
const.DESCRIPTION: l7policy3_description, const.DESCRIPTION: l7policy3_description,
const.ADMIN_STATE_UP: True, const.ADMIN_STATE_UP: True,
@ -699,17 +710,20 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
CONF.load_balancer.build_timeout) CONF.load_balancer.build_timeout)
# Assert that normal traffic goes to pool1->member1 # Assert that normal traffic goes to pool1->member1
url_for_member1 = 'http://{}/'.format(self.lb_vip_address) url_for_member1 = 'http://{}:{}/'.format(self.lb_vip_address,
LISTENER_PORT)
self.assertConsistentResponse((200, self.webserver1_response), self.assertConsistentResponse((200, self.webserver1_response),
url_for_member1) url_for_member1)
# Assert that slow traffic goes to pool2->member2 # Assert that slow traffic goes to pool2->member2
url_for_member2 = 'http://{}/slow?delay=1s'.format(self.lb_vip_address) url_for_member2 = 'http://{}:{}/slow?delay=1s'.format(
self.lb_vip_address, LISTENER_PORT)
self.assertConsistentResponse((200, self.webserver2_response), self.assertConsistentResponse((200, self.webserver2_response),
url_for_member2) url_for_member2)
# Assert that /turtles is redirected to identity # Assert that /turtles is redirected to identity
url_for_identity = 'http://{}/turtles'.format(self.lb_vip_address) url_for_identity = 'http://{}:{}/turtles'.format(self.lb_vip_address,
LISTENER_PORT)
self.assertConsistentResponse((302, CONF.identity.uri_v3), self.assertConsistentResponse((302, CONF.identity.uri_v3),
url_for_identity, url_for_identity,
redirect=True) redirect=True)
@ -719,7 +733,9 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
url_for_member1, url_for_member1,
headers={'reject': 'true'}) headers={'reject': 'true'})
def _test_mixed_ipv4_ipv6_members_traffic(self, protocol, protocol_port): def _test_mixed_ipv4_ipv6_members_traffic(self, protocol, protocol_port,
listener_id, pool_id,
persistent=True):
"""Tests traffic through a loadbalancer with IPv4 and IPv6 members. """Tests traffic through a loadbalancer with IPv4 and IPv6 members.
* Set up members on a loadbalancer. * Set up members on a loadbalancer.
@ -729,7 +745,7 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
# Set up Member 1 for Webserver 1 # Set up Member 1 for Webserver 1
member1_name = data_utils.rand_name("lb_member_member1-traffic") member1_name = data_utils.rand_name("lb_member_member1-traffic")
member1_kwargs = { member1_kwargs = {
const.POOL_ID: self.pool_ids[protocol], const.POOL_ID: pool_id,
const.NAME: member1_name, const.NAME: member1_name,
const.ADMIN_STATE_UP: True, const.ADMIN_STATE_UP: True,
const.ADDRESS: self.webserver1_ip, const.ADDRESS: self.webserver1_ip,
@ -742,7 +758,7 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
**member1_kwargs) **member1_kwargs)
self.addCleanup( self.addCleanup(
self.mem_member_client.cleanup_member, self.mem_member_client.cleanup_member,
member1[const.ID], pool_id=self.pool_ids[protocol], member1[const.ID], pool_id=pool_id,
lb_client=self.mem_lb_client, lb_id=self.lb_id) lb_client=self.mem_lb_client, lb_id=self.lb_id)
waiters.wait_for_status( waiters.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id, self.mem_lb_client.show_loadbalancer, self.lb_id,
@ -753,7 +769,7 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
# Set up Member 2 for Webserver 2 # Set up Member 2 for Webserver 2
member2_name = data_utils.rand_name("lb_member_member2-traffic") member2_name = data_utils.rand_name("lb_member_member2-traffic")
member2_kwargs = { member2_kwargs = {
const.POOL_ID: self.pool_ids[protocol], const.POOL_ID: pool_id,
const.NAME: member2_name, const.NAME: member2_name,
const.ADMIN_STATE_UP: True, const.ADMIN_STATE_UP: True,
const.ADDRESS: self.webserver2_ipv6, const.ADDRESS: self.webserver2_ipv6,
@ -767,7 +783,7 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
**member2_kwargs) **member2_kwargs)
self.addCleanup( self.addCleanup(
self.mem_member_client.cleanup_member, self.mem_member_client.cleanup_member,
member2[const.ID], pool_id=self.pool_ids[protocol], member2[const.ID], pool_id=pool_id,
lb_client=self.mem_lb_client, lb_id=self.lb_id) lb_client=self.mem_lb_client, lb_id=self.lb_id)
waiters.wait_for_status( waiters.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id, self.mem_lb_client.show_loadbalancer, self.lb_id,
@ -778,15 +794,28 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
# Send some traffic # Send some traffic
self.check_members_balanced(self.lb_vip_address, self.check_members_balanced(self.lb_vip_address,
protocol_port=protocol_port, protocol_port=protocol_port,
protocol=protocol) protocol=protocol, persistent=persistent)
@testtools.skipIf(CONF.load_balancer.test_with_noop, @testtools.skipIf(CONF.load_balancer.test_with_noop,
'Traffic tests will not work in noop mode.') 'Traffic tests will not work in noop mode.')
@testtools.skipUnless(CONF.load_balancer.test_with_ipv6, @testtools.skipUnless(CONF.load_balancer.test_with_ipv6,
'Mixed IPv4/IPv6 member test requires IPv6.') 'Mixed IPv4/IPv6 member test requires IPv6.')
@decorators.idempotent_id('20b6b671-0101-4bed-a249-9af6ee3aa6d9') @decorators.idempotent_id('20b6b671-0101-4bed-a249-9af6ee3aa6d9')
def test_mixed_ipv4_ipv6_members_traffic(self): def test_mixed_ipv4_ipv6_members_http_traffic(self):
self._test_mixed_ipv4_ipv6_members_traffic(self.protocol, 80) listener_id, pool_id = self._listener_pool_create(const.HTTP, 85)
self._test_mixed_ipv4_ipv6_members_traffic(const.HTTP, 85,
listener_id, pool_id)
@testtools.skipIf(CONF.load_balancer.test_with_noop,
'Traffic tests will not work in noop mode.')
@testtools.skipUnless(CONF.load_balancer.test_with_ipv6,
'Mixed IPv4/IPv6 member test requires IPv6.')
@decorators.idempotent_id('c442ae84-0abc-4470-8c7e-14a07e92a6fa')
def test_mixed_ipv4_ipv6_members_tcp_traffic(self):
listener_id, pool_id = self._listener_pool_create(const.TCP, 86)
self._test_mixed_ipv4_ipv6_members_traffic(const.TCP, 86,
listener_id, pool_id,
persistent=False)
@testtools.skipIf(CONF.load_balancer.test_with_noop, @testtools.skipIf(CONF.load_balancer.test_with_noop,
'Traffic tests will not work in noop mode.') 'Traffic tests will not work in noop mode.')
@ -805,8 +834,143 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
self.api_version, '2.1'): self.api_version, '2.1'):
raise self.skipException('UDP listener support is only available ' raise self.skipException('UDP listener support is only available '
'in Octavia API version 2.1 or newer') 'in Octavia API version 2.1 or newer')
listener_id, pool_id = self._listener_pool_create(const.UDP, 8082)
self._test_mixed_ipv4_ipv6_members_traffic(const.UDP, 8082,
listener_id, pool_id)
self._test_mixed_ipv4_ipv6_members_traffic(const.UDP, 8080) @testtools.skipIf(CONF.load_balancer.test_with_noop,
'Traffic tests will not work in noop mode.')
@decorators.idempotent_id('a58063fb-b9e8-4cfc-8a8c-7b2e9e884e7a')
def test_least_connections_http_traffic(self):
listener_id, pool_id = self._listener_pool_create(
const.HTTP, 87,
pool_algorithm=const.LB_ALGORITHM_LEAST_CONNECTIONS)
self._test_basic_traffic(const.HTTP, 87, listener_id, pool_id)
@testtools.skipIf(CONF.load_balancer.test_with_noop,
'Traffic tests will not work in noop mode.')
@decorators.idempotent_id('e1056709-6a1a-4a15-80c2-5cbb8279f924')
def test_least_connections_tcp_traffic(self):
listener_id, pool_id = self._listener_pool_create(
const.TCP, 88, pool_algorithm=const.LB_ALGORITHM_LEAST_CONNECTIONS)
self._test_basic_traffic(const.TCP, 88, listener_id, pool_id,
persistent=False, delay=0.2)
@testtools.skipIf(CONF.load_balancer.test_with_noop,
'Traffic tests will not work in noop mode.')
@decorators.idempotent_id('b5285410-507c-4629-90d4-6161540033d9')
def test_least_connections_udp_traffic(self):
if not self.mem_listener_client.is_version_supported(
self.api_version, '2.1'):
raise self.skipException('UDP listener support is only available '
'in Octavia API version 2.1 or newer')
listener_id, pool_id = self._listener_pool_create(
const.UDP, 8083,
pool_algorithm=const.LB_ALGORITHM_LEAST_CONNECTIONS)
self._test_basic_traffic(const.UDP, 8083, listener_id, pool_id)
@testtools.skipIf(CONF.load_balancer.test_with_noop,
'Traffic tests will not work in noop mode.')
@decorators.idempotent_id('881cc3e9-a011-4043-b0e3-a6185f736053')
def test_source_ip_http_traffic(self):
listener_id, pool_id = self._listener_pool_create(
const.HTTP, 89,
pool_algorithm=const.LB_ALGORITHM_SOURCE_IP)
self._test_basic_traffic(const.HTTP, 89, listener_id, pool_id,
traffic_member_count=1, persistent=False)
@testtools.skipIf(CONF.load_balancer.test_with_noop,
'Traffic tests will not work in noop mode.')
@decorators.idempotent_id('4568db0e-4243-4191-a822-9d327a55fa64')
def test_source_ip_tcp_traffic(self):
listener_id, pool_id = self._listener_pool_create(
const.TCP, 90, pool_algorithm=const.LB_ALGORITHM_SOURCE_IP)
self._test_basic_traffic(const.TCP, 90, listener_id, pool_id,
traffic_member_count=1, persistent=False)
@testtools.skipIf(CONF.load_balancer.test_with_noop,
'Traffic tests will not work in noop mode.')
@decorators.idempotent_id('be9e6ef2-7840-47d7-9315-cdb1e897b202')
def test_source_ip_udp_traffic(self):
if not self.mem_listener_client.is_version_supported(
self.api_version, '2.1'):
raise self.skipException('UDP listener support is only available '
'in Octavia API version 2.1 or newer')
listener_id, pool_id = self._listener_pool_create(
const.UDP, 8084,
pool_algorithm=const.LB_ALGORITHM_SOURCE_IP)
self._test_basic_traffic(const.UDP, 8084, listener_id, pool_id,
traffic_member_count=1, persistent=False)
@testtools.skipIf(CONF.load_balancer.test_with_noop,
'Traffic tests will not work in noop mode.')
@decorators.idempotent_id('a446585b-5651-40ce-a4db-cb2ab4d37c03')
def test_source_ip_port_http_traffic(self):
# This is a special case as the reference driver does not support
# this test. Since it runs with not_implemented_is_error, we must
# handle this test case special.
try:
listener_id, pool_id = self._listener_pool_create(
const.HTTP, 60091,
pool_algorithm=const.LB_ALGORITHM_SOURCE_IP_PORT)
self._test_basic_traffic(
const.HTTP, 60091, listener_id, pool_id,
traffic_member_count=1, persistent=False, source_port=60091)
except exceptions.NotImplemented as e:
message = ("The configured provider driver '{driver}' "
"does not support a feature required for this "
"test.".format(driver=CONF.load_balancer.provider))
if hasattr(e, 'resp_body'):
message = e.resp_body.get('faultstring', message)
raise testtools.TestCase.skipException(message)
@testtools.skipIf(CONF.load_balancer.test_with_noop,
'Traffic tests will not work in noop mode.')
@decorators.idempotent_id('60108f30-d870-487c-ab96-8d8a9b587b94')
def test_source_ip_port_tcp_traffic(self):
# This is a special case as the reference driver does not support
# this test. Since it runs with not_implemented_is_error, we must
# handle this test case special.
try:
listener_id, pool_id = self._listener_pool_create(
const.TCP, 60092,
pool_algorithm=const.LB_ALGORITHM_SOURCE_IP_PORT)
self._test_basic_traffic(
const.TCP, 60092, listener_id, pool_id, traffic_member_count=1,
persistent=False, source_port=60092)
except exceptions.NotImplemented as e:
message = ("The configured provider driver '{driver}' "
"does not support a feature required for this "
"test.".format(driver=CONF.load_balancer.provider))
if hasattr(e, 'resp_body'):
message = e.resp_body.get('faultstring', message)
raise testtools.TestCase.skipException(message)
@testtools.skipIf(CONF.load_balancer.test_with_noop,
'Traffic tests will not work in noop mode.')
@decorators.idempotent_id('a67dfa58-6953-4a0f-8a65-3f153b254c98')
def test_source_ip_port_udp_traffic(self):
if not self.mem_listener_client.is_version_supported(
self.api_version, '2.1'):
raise self.skipException('UDP listener support is only available '
'in Octavia API version 2.1 or newer')
# This is a special case as the reference driver does not support
# this test. Since it runs with not_implemented_is_error, we must
# handle this test case special.
try:
listener_id, pool_id = self._listener_pool_create(
const.UDP, 8085,
pool_algorithm=const.LB_ALGORITHM_SOURCE_IP_PORT)
self._test_basic_traffic(
const.UDP, 8085, listener_id, pool_id, traffic_member_count=1,
persistent=False, source_port=8085)
except exceptions.NotImplemented as e:
message = ("The configured provider driver '{driver}' "
"does not support a feature required for this "
"test.".format(driver=CONF.load_balancer.provider))
if hasattr(e, 'resp_body'):
message = e.resp_body.get('faultstring', message)
raise testtools.TestCase.skipException(message)
@testtools.skipIf(CONF.load_balancer.test_with_noop, @testtools.skipIf(CONF.load_balancer.test_with_noop,
'Log offload tests will not work in noop mode.') 'Log offload tests will not work in noop mode.')
@ -814,9 +978,6 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
CONF.loadbalancer_feature_enabled.log_offload_enabled, CONF.loadbalancer_feature_enabled.log_offload_enabled,
'Skipping log offload tests because tempest configuration ' 'Skipping log offload tests because tempest configuration '
'[loadbalancer-feature-enabled] log_offload_enabled is False.') '[loadbalancer-feature-enabled] log_offload_enabled is False.')
@testtools.skipUnless(
CONF.loadbalancer_feature_enabled.l7_protocol_enabled,
'Log offload tests require l7_protocol_enabled.')
@decorators.idempotent_id('571dddd9-f5bd-404e-a799-9df7ac9e2fa9') @decorators.idempotent_id('571dddd9-f5bd-404e-a799-9df7ac9e2fa9')
def test_tenant_flow_log(self): def test_tenant_flow_log(self):
"""Tests tenant flow log offloading """Tests tenant flow log offloading
@ -898,7 +1059,7 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
# Make the request # Make the request
URL = 'http://{0}:{1}/{2}'.format( URL = 'http://{0}:{1}/{2}'.format(
self.lb_vip_address, protocol_port, unique_request_id) self.lb_vip_address, protocol_port, unique_request_id)
validators.validate_URL_response(URL, expected_status_code=200) self.validate_URL_response(URL, expected_status_code=200)
# We need to give the log subsystem time to commit the log # We need to give the log subsystem time to commit the log
time.sleep(CONF.load_balancer.check_interval) time.sleep(CONF.load_balancer.check_interval)
@ -942,10 +1103,68 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
self.assertTrue(fields[14].isdigit()) # processing_time self.assertTrue(fields[14].isdigit()) # processing_time
self.assertEqual('----', fields[15]) # term_state self.assertEqual('----', fields[15]) # term_state
@testtools.skipIf(CONF.load_balancer.test_with_noop, @decorators.idempotent_id('04399db0-04f0-4cb5-bb27-a12bf18bfe08')
'Traffic tests will not work in noop mode.') def test_http_LC_listener_with_allowed_cidrs(self):
self._test_listener_with_allowed_cidrs(
const.HTTP, 90, const.LB_ALGORITHM_LEAST_CONNECTIONS)
@decorators.idempotent_id('3d8d95b6-55e8-4bb9-b474-4ac35abaff22')
def test_tcp_LC_listener_with_allowed_cidrs(self):
self._test_listener_with_allowed_cidrs(
const.TCP, 91, const.LB_ALGORITHM_LEAST_CONNECTIONS, delay=0.2)
@decorators.idempotent_id('7456b558-9add-4e0e-988e-06803f8047f7')
def test_udp_LC_listener_with_allowed_cidrs(self):
self._test_listener_with_allowed_cidrs(
const.UDP, 92, const.LB_ALGORITHM_LEAST_CONNECTIONS)
@decorators.idempotent_id('13b0f2de-9934-457b-8be0-f1bffc6915a0') @decorators.idempotent_id('13b0f2de-9934-457b-8be0-f1bffc6915a0')
def test_listener_with_allowed_cidrs(self): def test_http_RR_listener_with_allowed_cidrs(self):
self._test_listener_with_allowed_cidrs(
const.HTTP, 93, const.LB_ALGORITHM_ROUND_ROBIN)
@decorators.idempotent_id('8bca1325-f894-494d-95c6-3ea4c3df6a0b')
def test_tcp_RR_listener_with_allowed_cidrs(self):
self._test_listener_with_allowed_cidrs(
const.TCP, 94, const.LB_ALGORITHM_ROUND_ROBIN)
@decorators.idempotent_id('93675cc3-e765-464b-9563-e0848dc75330')
def test_udp_RR_listener_with_allowed_cidrs(self):
self._test_listener_with_allowed_cidrs(
const.UDP, 95, const.LB_ALGORITHM_ROUND_ROBIN)
@decorators.idempotent_id('fb5f35c1-08c9-43f7-8ed1-0395a3ef4735')
def test_http_SI_listener_with_allowed_cidrs(self):
self._test_listener_with_allowed_cidrs(
const.HTTP, 96, const.LB_ALGORITHM_SOURCE_IP)
@decorators.idempotent_id('c0904c88-2479-42e2-974f-55041f30e6c5')
def test_tcp_SI_listener_with_allowed_cidrs(self):
self._test_listener_with_allowed_cidrs(
const.TCP, 97, const.LB_ALGORITHM_SOURCE_IP)
@decorators.idempotent_id('4f73bac5-2c98-45f9-8976-724c99e39979')
def test_udp_SI_listener_with_allowed_cidrs(self):
self._test_listener_with_allowed_cidrs(
const.UDP, 98, const.LB_ALGORITHM_SOURCE_IP)
@decorators.idempotent_id('d198ddc5-1bcb-4310-a1b0-fa1a6328c4e9')
def test_http_SIP_listener_with_allowed_cidrs(self):
self._test_listener_with_allowed_cidrs(
const.HTTP, 99, const.LB_ALGORITHM_SOURCE_IP_PORT)
@decorators.idempotent_id('bbb09dbb-2aad-4281-9383-4bb4ad420ee1')
def test_tcp_SIP_listener_with_allowed_cidrs(self):
self._test_listener_with_allowed_cidrs(
const.TCP, 100, const.LB_ALGORITHM_SOURCE_IP_PORT, delay=0.2)
@decorators.idempotent_id('70290a9d-0065-42ad-bb46-884a535d2da2')
def test_udp_SIP_listener_with_allowed_cidrs(self):
self._test_listener_with_allowed_cidrs(
const.UDP, 101, const.LB_ALGORITHM_SOURCE_IP_PORT, delay=0.2)
def _test_listener_with_allowed_cidrs(self, protocol, protocol_port,
algorithm, delay=None):
"""Tests traffic through a loadbalancer with allowed CIDRs set. """Tests traffic through a loadbalancer with allowed CIDRs set.
* Set up listener with allowed CIDRS (allow all) on a loadbalancer. * Set up listener with allowed CIDRS (allow all) on a loadbalancer.
@ -963,11 +1182,10 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
'or newer.') 'or newer.')
listener_name = data_utils.rand_name("lb_member_listener2_cidrs") listener_name = data_utils.rand_name("lb_member_listener2_cidrs")
listener_port = 8080
listener_kwargs = { listener_kwargs = {
const.NAME: listener_name, const.NAME: listener_name,
const.PROTOCOL: self.protocol, const.PROTOCOL: protocol,
const.PROTOCOL_PORT: listener_port, const.PROTOCOL_PORT: protocol_port,
const.LOADBALANCER_ID: self.lb_id, const.LOADBALANCER_ID: self.lb_id,
const.ALLOWED_CIDRS: ['0.0.0.0/0'] const.ALLOWED_CIDRS: ['0.0.0.0/0']
} }
@ -987,11 +1205,25 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
pool_name = data_utils.rand_name("lb_member_pool3_cidrs") pool_name = data_utils.rand_name("lb_member_pool3_cidrs")
pool_kwargs = { pool_kwargs = {
const.NAME: pool_name, const.NAME: pool_name,
const.PROTOCOL: self.protocol, const.PROTOCOL: protocol,
const.LB_ALGORITHM: self.lb_algorithm, const.LB_ALGORITHM: algorithm,
const.LISTENER_ID: listener_id, const.LISTENER_ID: listener_id,
} }
pool = self.mem_pool_client.create_pool(**pool_kwargs) # This is a special case as the reference driver does not support
# SOURCE-IP-PORT. Since it runs with not_implemented_is_error, we must
# handle this test case special.
try:
pool = self.mem_pool_client.create_pool(**pool_kwargs)
except exceptions.NotImplemented as e:
if algorithm != const.LB_ALGORITHM_SOURCE_IP_PORT:
raise
message = ("The configured provider driver '{driver}' "
"does not support a feature required for this "
"test.".format(driver=CONF.load_balancer.provider))
if hasattr(e, 'resp_body'):
message = e.resp_body.get('faultstring', message)
raise testtools.TestCase.skipException(message)
pool_id = pool[const.ID] pool_id = pool[const.ID]
self.addCleanup( self.addCleanup(
self.mem_pool_client.cleanup_pool, self.mem_pool_client.cleanup_pool,
@ -1052,8 +1284,13 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
CONF.load_balancer.check_timeout) CONF.load_balancer.check_timeout)
# Send some traffic # Send some traffic
members = 2
if algorithm == const.LB_ALGORITHM_SOURCE_IP:
members = 1
self.check_members_balanced( self.check_members_balanced(
self.lb_vip_address, protocol_port=listener_port) self.lb_vip_address, protocol=protocol,
protocol_port=protocol_port, persistent=False,
traffic_member_count=members, delay=delay)
listener_kwargs = { listener_kwargs = {
const.LISTENER_ID: listener_id, const.LISTENER_ID: listener_id,
@ -1066,21 +1303,27 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
CONF.load_balancer.build_interval, CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout) CONF.load_balancer.build_timeout)
url_for_vip = 'http://{}:{}/'.format(
self.lb_vip_address, listener_port)
# NOTE: Before we start with the consistent response check, we must # NOTE: Before we start with the consistent response check, we must
# wait until Neutron completes the SG update. # wait until Neutron completes the SG update.
# See https://bugs.launchpad.net/neutron/+bug/1866353. # See https://bugs.launchpad.net/neutron/+bug/1866353.
def expect_conn_error(url): def expect_timeout_error(address, protocol, protocol_port):
try: try:
requests.Session().get(url) self.make_request(address, protocol=protocol,
except requests.exceptions.ConnectionError: protocol_port=protocol_port)
except exceptions.TimeoutException:
return True return True
return False return False
waiters.wait_until_true(expect_conn_error, url=url_for_vip) waiters.wait_until_true(
expect_timeout_error, address=self.lb_vip_address,
protocol=protocol, protocol_port=protocol_port)
# Assert that the server is consistently unavailable # Assert that the server is consistently unavailable
if protocol == const.UDP:
url_for_vip = 'udp://{}:{}/'.format(self.lb_vip_address,
protocol_port)
else:
url_for_vip = 'http://{}:{}/'.format(self.lb_vip_address,
protocol_port)
self.assertConsistentResponse( self.assertConsistentResponse(
(None, None), url_for_vip, repeat=3, conn_error=True) (None, None), url_for_vip, repeat=3, expect_connection_error=True)

View File

@ -12,17 +12,13 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import errno
import ipaddress import ipaddress
import pkg_resources import pkg_resources
import random import random
import requests
import shlex import shlex
import socket
import string import string
import subprocess import subprocess
import tempfile import tempfile
import time
from oslo_log import log as logging from oslo_log import log as logging
from oslo_utils import uuidutils from oslo_utils import uuidutils
@ -46,11 +42,8 @@ RETRY_INITIAL_DELAY = 1
RETRY_BACKOFF = 1 RETRY_BACKOFF = 1
RETRY_MAX = 5 RETRY_MAX = 5
SRC_PORT_NUMBER_MIN = 32768
SRC_PORT_NUMBER_MAX = 61000
class LoadBalancerBaseTest(validators.ValidatorsMixin, test.BaseTestCase):
class LoadBalancerBaseTest(test.BaseTestCase):
"""Base class for load balancer tests.""" """Base class for load balancer tests."""
# Setup cls.os_roles_lb_member. cls.os_primary, cls.os_roles_lb_member, # Setup cls.os_roles_lb_member. cls.os_primary, cls.os_roles_lb_member,
@ -65,6 +58,8 @@ class LoadBalancerBaseTest(test.BaseTestCase):
webserver2_response = 5 webserver2_response = 5
used_ips = [] used_ips = []
SRC_PORT_NUMBER_MIN = 32768
SRC_PORT_NUMBER_MAX = 61000
src_port_number = SRC_PORT_NUMBER_MIN src_port_number = SRC_PORT_NUMBER_MIN
@classmethod @classmethod
@ -913,231 +908,20 @@ class LoadBalancerBaseTestWithCompute(LoadBalancerBaseTest):
@classmethod @classmethod
def _validate_webserver(cls, ip_address, start_id): def _validate_webserver(cls, ip_address, start_id):
URL = 'http://{0}'.format(ip_address) URL = 'http://{0}'.format(ip_address)
validators.validate_URL_response(URL, expected_body=str(start_id)) cls.validate_URL_response(URL, expected_body=str(start_id))
URL = 'http://{0}:81'.format(ip_address) URL = 'http://{0}:81'.format(ip_address)
validators.validate_URL_response(URL, expected_body=str(start_id + 1)) cls.validate_URL_response(URL, expected_body=str(start_id + 1))
@classmethod @classmethod
def _validate_udp_server(cls, ip_address, start_id): def _validate_udp_server(cls, ip_address, start_id):
res = cls._udp_request(ip_address, 80) res = cls.make_udp_request(ip_address, 80)
if res != str(start_id): if res != str(start_id):
raise Exception("Response from test server doesn't match the " raise Exception("Response from test server doesn't match the "
"expected value ({0} != {1}).".format( "expected value ({0} != {1}).".format(
res, str(start_id))) res, str(start_id)))
res = cls._udp_request(ip_address, 81) res = cls.make_udp_request(ip_address, 81)
if res != str(start_id + 1): if res != str(start_id + 1):
raise Exception("Response from test server doesn't match the " raise Exception("Response from test server doesn't match the "
"expected value ({0} != {1}).".format( "expected value ({0} != {1}).".format(
res, str(start_id + 1))) res, str(start_id + 1)))
@classmethod
def _udp_request(cls, vip_address, port=80, timeout=None):
if ipaddress.ip_address(vip_address).version == 6:
family = socket.AF_INET6
else:
family = socket.AF_INET
sock = socket.socket(family, socket.SOCK_DGRAM)
# Force the use of an incremental port number for source to avoid
# re-use of a previous source port that will affect the round-robin
# dispatch
while True:
port_number = cls.src_port_number
cls.src_port_number += 1
if cls.src_port_number >= SRC_PORT_NUMBER_MAX:
cls.src_port_number = SRC_PORT_NUMBER_MIN
# catch and skip already used ports on the host
try:
sock.bind(('', port_number))
except OSError as e:
# if error is 'Address already in use', try next port number
if e.errno != errno.EADDRINUSE:
raise e
else:
# successfully bind the socket
break
server_address = (vip_address, port)
data = b"data\n"
if timeout is not None:
sock.settimeout(timeout)
sock.sendto(data, server_address)
data, addr = sock.recvfrom(4096)
sock.close()
return data.decode('utf-8')
def _wait_for_lb_functional(self, vip_address, traffic_member_count,
protocol_port, protocol, verify):
if protocol != const.UDP:
session = requests.Session()
start = time.time()
response_counts = {}
# Send requests to the load balancer until at least
# "traffic_member_count" members have replied (ensure network
# connectivity is functional between the load balancer and the membesr)
while time.time() - start < CONF.load_balancer.build_timeout:
try:
if protocol != const.UDP:
url = "{0}://{1}{2}{3}".format(
protocol.lower(),
vip_address,
':' if protocol_port else '',
protocol_port or '')
r = session.get(url, timeout=2, verify=verify)
data = r.content
else:
data = self._udp_request(vip_address, port=protocol_port,
timeout=2)
if data in response_counts:
response_counts[data] += 1
else:
response_counts[data] = 1
if traffic_member_count == len(response_counts):
LOG.debug('Loadbalancer response totals: %s',
response_counts)
time.sleep(1)
return
except Exception:
LOG.warning('Server is not passing initial traffic. Waiting.')
time.sleep(1)
LOG.debug('Loadbalancer response totals: %s', response_counts)
LOG.error('Server did not begin passing traffic within the timeout '
'period. Failing test.')
raise Exception()
def _send_lb_request(self, handler, protocol, vip_address,
verify, protocol_port, num=20):
response_counts = {}
# Send a number requests to lb vip
for i in range(num):
try:
if protocol != const.UDP:
url = "{0}://{1}{2}{3}".format(
protocol.lower(),
vip_address,
':' if protocol_port else '',
protocol_port or '')
r = handler.get(url, timeout=2, verify=verify)
data = r.content
else:
data = self._udp_request(vip_address, port=protocol_port,
timeout=2)
if data in response_counts:
response_counts[data] += 1
else:
response_counts[data] = 1
except Exception:
LOG.exception('Failed to send request to loadbalancer vip')
raise Exception('Failed to connect to lb')
LOG.debug('Loadbalancer response totals: %s', response_counts)
return response_counts
def _check_members_balanced_round_robin(
self, vip_address, traffic_member_count=2, protocol=const.HTTP,
verify=True, protocol_port=80):
handler = requests.Session()
response_counts = self._send_lb_request(
handler, protocol, vip_address,
verify, protocol_port)
# Ensure the correct number of members
self.assertEqual(traffic_member_count, len(response_counts))
# Ensure both members got the same number of responses
self.assertEqual(1, len(set(response_counts.values())))
def _check_members_balanced_source_ip_port(
self, vip_address, traffic_member_count=2, protocol=const.HTTP,
verify=True, protocol_port=80):
handler = requests
response_counts = self._send_lb_request(
handler, protocol, vip_address,
verify, protocol_port)
# Ensure the correct number of members
self.assertEqual(traffic_member_count, len(response_counts))
if CONF.load_balancer.test_reuse_connection:
handler = requests.Session()
response_counts = self._send_lb_request(
handler, protocol, vip_address,
verify, protocol_port)
# Ensure only one member answered
self.assertEqual(1, len(response_counts))
def check_members_balanced(self, vip_address, traffic_member_count=2,
protocol=const.HTTP, verify=True,
protocol_port=80):
if (ipaddress.ip_address(vip_address).version == 6 and
protocol != const.UDP):
vip_address = '[{}]'.format(vip_address)
self._wait_for_lb_functional(vip_address, traffic_member_count,
protocol_port, protocol, verify)
validate_func = '_check_members_balanced_%s' % self.lb_algorithm
validate_func = getattr(self, validate_func.lower())
validate_func(
vip_address=vip_address,
traffic_member_count=traffic_member_count,
protocol=protocol,
verify=verify,
protocol_port=protocol_port)
def assertConsistentResponse(self, response, url, method='GET', repeat=10,
redirect=False, timeout=2,
conn_error=False, **kwargs):
"""Assert that a request to URL gets the expected response.
:param response: Expected response in format (status_code, content).
:param url: The URL to request.
:param method: The HTTP method to use (GET, POST, PUT, etc)
:param repeat: How many times to test the response.
:param data: Optional data to send in the request.
:param headers: Optional headers to send in the request.
:param cookies: Optional cookies to send in the request.
:param redirect: Is the request a redirect? If true, assume the passed
content should be the next URL in the chain.
:param timeout: Optional seconds to wait for the server to send data.
:param conn_error: Optional Expect a connection error?
:return: boolean success status
:raises: testtools.matchers.MismatchError
"""
session = requests.Session()
response_code, response_content = response
for i in range(0, repeat):
if conn_error:
self.assertRaises(
requests.exceptions.ConnectionError, session.request,
method, url, allow_redirects=not redirect, timeout=timeout,
**kwargs)
continue
req = session.request(method, url, allow_redirects=not redirect,
timeout=timeout, **kwargs)
if response_code:
self.assertEqual(response_code, req.status_code)
if redirect:
self.assertTrue(req.is_redirect)
self.assertEqual(response_content,
session.get_redirect_target(req))
elif response_content:
self.assertEqual(str(response_content), req.text)

View File

@ -1,6 +1,7 @@
# Copyright 2017 GoDaddy # Copyright 2017 GoDaddy
# Copyright 2017 Catalyst IT Ltd # Copyright 2017 Catalyst IT Ltd
# Copyright 2018 Rackspace US Inc. All rights reserved. # Copyright 2018 Rackspace US Inc. All rights reserved.
# Copyright 2020 Red Hat, Inc. All rights reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain # not use this file except in compliance with the License. You may obtain
@ -13,44 +14,68 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import errno
import ipaddress
import requests import requests
import socket
import time import time
from urllib.parse import urlparse
from oslo_log import log as logging from oslo_log import log as logging
from tempest import config from tempest import config
from tempest.lib import exceptions from tempest.lib import exceptions
from tempest import test
from octavia_tempest_plugin.common import constants as const
from octavia_tempest_plugin.common import requests_adapters
CONF = config.CONF CONF = config.CONF
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
def validate_URL_response(URL, expected_status_code=200, class ValidatorsMixin(test.BaseTestCase):
expected_body=None, HTTPS_verify=True,
client_cert_path=None, CA_certs_path=None, @staticmethod
request_interval=CONF.load_balancer.build_interval, def validate_URL_response(
request_timeout=CONF.load_balancer.build_timeout): URL, expected_status_code=200, requests_session=None,
"""Check a URL response (HTTP or HTTPS). expected_body=None, HTTPS_verify=True, client_cert_path=None,
CA_certs_path=None, source_port=None,
request_interval=CONF.load_balancer.build_interval,
request_timeout=CONF.load_balancer.build_timeout):
"""Check a URL response (HTTP or HTTPS).
:param URL: The URL to query.
:param expected_status_code: The expected HTTP status code.
:param requests_session: A requests session to use for the request.
If None, a new session will be created.
:param expected_body: The expected response text, None will not
compare.
:param HTTPS_verify: Should we verify the HTTPS server.
:param client_cert_path: Filesystem path to a file with the client
private key and certificate.
:param CA_certs_path: Filesystem path to a file containing CA
certificates to use for HTTPS validation.
:param source_port: If set, the request will come from this source port
number. If None, a random port will be used.
:param request_interval: Time, in seconds, to timeout a request.
:param request_timeout: The maximum time, in seconds, to attempt
requests. Failed validation of expected
results does not result in a retry.
:raises InvalidHttpSuccessCode: The expected_status_code did not match.
:raises InvalidHTTPResponseBody: The response body did not match the
expected content.
:raises TimeoutException: The request timed out.
:returns: The response data.
"""
session = requests_session
if requests_session is None:
session = requests.Session()
if source_port:
session.mount('http://',
requests_adapters.SourcePortAdapter(source_port))
session.mount('https://',
requests_adapters.SourcePortAdapter(source_port))
:param URL: The URL to query.
:param expected_status_code: The expected HTTP status code.
:param expected_body: The expected response text, None will not compare.
:param HTTPS_verify: Should we verify the HTTPS server.
:param client_cert_path: Filesystem path to a file with the client private
key and certificate.
:param CA_certs_path: Filesystem path to a file containing CA certificates
to use for HTTPS validation.
:param request_interval: Time, in seconds, to timeout a request.
:param request_timeout: The maximum time, in seconds, to attempt requests.
Failed validation of expected results does not
result in a retry.
:raises InvalidHttpSuccessCode: The expected_status_code did not match.
:raises InvalidHTTPResponseBody: The response body did not match the
expected content.
:raises TimeoutException: The request timed out.
:returns: None
"""
with requests.Session() as session:
session_kwargs = {} session_kwargs = {}
if not HTTPS_verify: if not HTTPS_verify:
session_kwargs['verify'] = False session_kwargs['verify'] = False
@ -63,25 +88,333 @@ def validate_URL_response(URL, expected_status_code=200,
while time.time() - start < request_timeout: while time.time() - start < request_timeout:
try: try:
response = session.get(URL, **session_kwargs) response = session.get(URL, **session_kwargs)
if response.status_code != expected_status_code: response_status_code = response.status_code
response_text = response.text
response.close()
if response_status_code != expected_status_code:
raise exceptions.InvalidHttpSuccessCode( raise exceptions.InvalidHttpSuccessCode(
'{0} is not the expected code {1}'.format( '{0} is not the expected code {1}'.format(
response.status_code, expected_status_code)) response_status_code, expected_status_code))
if expected_body and response.text != expected_body: if expected_body and response_text != expected_body:
details = '{} does not match expected {}'.format( details = '{} does not match expected {}'.format(
response.text, expected_body) response_text, expected_body)
raise exceptions.InvalidHTTPResponseBody( raise exceptions.InvalidHTTPResponseBody(
resp_body=details) resp_body=details)
return if requests_session is None:
session.close()
return response_text
except requests.exceptions.Timeout: except requests.exceptions.Timeout:
# Don't sleep as we have already waited the interval. # Don't sleep as we have already waited the interval.
LOG.info('Request for {} timed out. Retrying.'.format(URL)) LOG.info('Request for {} timed out. Retrying.'.format(URL))
except (exceptions.InvalidHttpSuccessCode, except (exceptions.InvalidHttpSuccessCode,
exceptions.InvalidHTTPResponseBody, exceptions.InvalidHTTPResponseBody,
requests.exceptions.SSLError): requests.exceptions.SSLError):
if requests_session is None:
session.close()
raise raise
except Exception as e: except Exception as e:
LOG.info('Validate URL got exception: {0}. ' LOG.info('Validate URL got exception: {0}. '
'Retrying.'.format(e)) 'Retrying.'.format(e))
time.sleep(request_interval) time.sleep(request_interval)
if requests_session is None:
session.close()
raise exceptions.TimeoutException() raise exceptions.TimeoutException()
@classmethod
def make_udp_request(cls, vip_address, port=80, timeout=None,
source_port=None):
if ipaddress.ip_address(vip_address).version == 6:
family = socket.AF_INET6
else:
family = socket.AF_INET
sock = socket.socket(family, socket.SOCK_DGRAM)
# Force the use of an incremental port number for source to avoid
# re-use of a previous source port that will affect the round-robin
# dispatch
while True:
port_number = cls.src_port_number
cls.src_port_number += 1
if cls.src_port_number >= cls.SRC_PORT_NUMBER_MAX:
cls.src_port_number = cls.SRC_PORT_NUMBER_MIN
# catch and skip already used ports on the host
try:
if source_port:
sock.bind(('', source_port))
else:
sock.bind(('', port_number))
except OSError as e:
# if error is 'Address already in use', try next port number
# If source_port is defined and already in use, a test
# developer has made a mistake by using a duplicate source
# port.
if e.errno != errno.EADDRINUSE or source_port:
raise e
else:
# successfully bind the socket
break
server_address = (vip_address, port)
data = b"data\n"
if timeout is not None:
sock.settimeout(timeout)
try:
sock.sendto(data, server_address)
data, addr = sock.recvfrom(4096)
except socket.timeout:
# Normalize the timeout exception so that UDP and other protocol
# tests all return a common timeout exception.
raise exceptions.TimeoutException()
finally:
sock.close()
return data.decode('utf-8')
def make_request(
self, vip_address, protocol=const.HTTP, HTTPS_verify=True,
protocol_port=80, requests_session=None, client_cert_path=None,
CA_certs_path=None, request_timeout=2, source_port=None):
"""Make a request to a VIP.
:param vip_address: The VIP address to test.
:param protocol: The protocol to use for the test.
:param HTTPS_verify: How to verify the TLS certificate. True: verify
using the system CA certificates. False: Do not
verify the VIP certificate. <path>: Filesytem path
to a CA certificate bundle file or directory. For
directories, the directory must be processed using
the c_rehash utility from openssl.
:param protocol_port: The port number to use for the test.
:param requests_session: A requests session to use for the request.
If None, a new session will be created.
:param request_timeout: The maximum time, in seconds, to attempt
requests.
:param client_cert_path: Filesystem path to a file with the client
private key and certificate.
:param CA_certs_path: Filesystem path to a file containing CA
certificates to use for HTTPS validation.
:param source_port: If set, the request will come from this source port
number. If None, a random port will be used.
:raises InvalidHttpSuccessCode: The expected_status_code did not match.
:raises InvalidHTTPResponseBody: The response body did not match the
expected content.
:raises TimeoutException: The request timed out.
:raises Exception: If a protocol is requested that is not implemented.
:returns: The response data.
"""
# Note: We are using HTTP as the TCP protocol check to simplify
# the test setup. HTTP is a TCP based protocol.
if protocol == const.HTTP or protocol == const.TCP:
url = "http://{0}{1}{2}".format(
vip_address, ':' if protocol_port else '',
protocol_port or '')
data = self.validate_URL_response(
url, HTTPS_verify=False, requests_session=requests_session,
request_timeout=request_timeout,
source_port=source_port)
elif (protocol == const.HTTPS or
protocol == const.TERMINATED_HTTPS):
url = "https://{0}{1}{2}".format(
vip_address, ':' if protocol_port else '',
protocol_port or '')
data = self.validate_URL_response(
url, HTTPS_verify=HTTPS_verify,
requests_session=requests_session,
client_cert_path=client_cert_path,
CA_certs_path=CA_certs_path, source_port=source_port,
request_timeout=request_timeout)
elif protocol == const.UDP:
data = self.make_udp_request(
vip_address, port=protocol_port, timeout=request_timeout,
source_port=source_port)
else:
message = ("Unknown protocol %s. Unable to check if the "
"load balancer is balanced.", protocol)
LOG.error(message)
raise Exception(message)
return data
def check_members_balanced(
self, vip_address, traffic_member_count=2, protocol=const.HTTP,
HTTPS_verify=True, protocol_port=80, persistent=True, repeat=20,
client_cert_path=None, CA_certs_path=None, request_interval=2,
request_timeout=10, source_port=None, delay=None):
"""Checks that members are evenly balanced behind a VIP.
:param vip_address: The VIP address to test.
:param traffic_member_count: The expected number of members.
:param protocol: The protocol to use for the test.
:param HTTPS_verify: How to verify the TLS certificate. True: verify
using the system CA certificates. False: Do not
verify the VIP certificate. <path>: Filesytem path
to a CA certificate bundle file or directory. For
directories, the directory must be processed using
the c_rehash utility from openssl.
:param protocol_port: The port number to use for the test.
:param persistent: True when the test should persist cookies and use
the protocol keepalive mechanism with the target.
This may include maintaining a connection to the
member server across requests.
:param repeat: The number of requests to make against the VIP.
:param request_timeout: The maximum time, in seconds, to attempt
requests.
:param client_cert_path: Filesystem path to a file with the client
private key and certificate.
:param CA_certs_path: Filesystem path to a file containing CA
certificates to use for HTTPS validation.
:param source_port: If set, the request will come from this source port
number. If None, a random port will be used.
:param delay: The time to pause between requests in seconds, can be
fractional.
"""
if (ipaddress.ip_address(vip_address).version == 6 and
protocol != const.UDP):
vip_address = '[{}]'.format(vip_address)
requests_session = None
if persistent:
requests_session = requests.Session()
self._wait_for_lb_functional(
vip_address, traffic_member_count, protocol_port, protocol,
HTTPS_verify, requests_session=requests_session,
source_port=source_port)
response_counts = {}
# Send a number requests to lb vip
for i in range(repeat):
try:
data = self.make_request(
vip_address, protocol=protocol, HTTPS_verify=HTTPS_verify,
protocol_port=protocol_port,
requests_session=requests_session,
client_cert_path=client_cert_path,
CA_certs_path=CA_certs_path, source_port=source_port,
request_timeout=request_timeout)
if data in response_counts:
response_counts[data] += 1
else:
response_counts[data] = 1
if delay is not None:
time.sleep(delay)
except Exception:
LOG.exception('Failed to send request to loadbalancer vip')
if persistent:
requests_session.close()
raise Exception('Failed to connect to lb')
if persistent:
requests_session.close()
LOG.debug('Loadbalancer response totals: %s', response_counts)
# Ensure the correct number of members responded
self.assertEqual(traffic_member_count, len(response_counts))
# Ensure both members got the same number of responses
self.assertEqual(1, len(set(response_counts.values())))
def assertConsistentResponse(self, response, url, method='GET', repeat=10,
redirect=False, timeout=2,
expect_connection_error=False, **kwargs):
"""Assert that a request to URL gets the expected response.
:param response: Expected response in format (status_code, content).
:param url: The URL to request.
:param method: The HTTP method to use (GET, POST, PUT, etc)
:param repeat: How many times to test the response.
:param data: Optional data to send in the request.
:param headers: Optional headers to send in the request.
:param cookies: Optional cookies to send in the request.
:param redirect: Is the request a redirect? If true, assume the passed
content should be the next URL in the chain.
:param timeout: Optional seconds to wait for the server to send data.
:param expect_connection_error: Should we expect a connection error
:param expect_timeout: Should we expect a connection timeout
:return: boolean success status
:raises: testtools.matchers.MismatchError
"""
session = requests.Session()
response_code, response_content = response
for i in range(repeat):
if url.startswith(const.HTTP.lower()):
if expect_connection_error:
self.assertRaises(
requests.exceptions.ConnectionError, session.request,
method, url, allow_redirects=not redirect,
timeout=timeout, **kwargs)
continue
req = session.request(method, url,
allow_redirects=not redirect,
timeout=timeout, **kwargs)
if response_code:
self.assertEqual(response_code, req.status_code)
if redirect:
self.assertTrue(req.is_redirect)
self.assertEqual(response_content,
session.get_redirect_target(req))
elif response_content:
self.assertEqual(str(response_content), req.text)
elif url.startswith(const.UDP.lower()):
parsed_url = urlparse(url)
if expect_connection_error:
self.assertRaises(exceptions.TimeoutException,
self.make_udp_request,
parsed_url.hostname,
port=parsed_url.port, timeout=timeout)
continue
data = self.make_udp_request(parsed_url.hostname,
port=parsed_url.port,
timeout=timeout)
self.assertEqual(response_content, data)
def _wait_for_lb_functional(
self, vip_address, traffic_member_count, protocol_port, protocol,
HTTPS_verify, client_cert_path=None, CA_certs_path=None,
request_interval=2, request_timeout=10, requests_session=None,
source_port=None):
start = time.time()
response_counts = {}
# Send requests to the load balancer until at least
# "traffic_member_count" members have replied (ensure network
# connectivity is functional between the load balancer and the members)
while time.time() - start < CONF.load_balancer.build_timeout:
try:
data = self.make_request(
vip_address, protocol=protocol, HTTPS_verify=HTTPS_verify,
protocol_port=protocol_port,
client_cert_path=client_cert_path,
CA_certs_path=CA_certs_path, source_port=source_port,
request_timeout=request_timeout,
requests_session=requests_session)
if data in response_counts:
response_counts[data] += 1
else:
response_counts[data] = 1
if traffic_member_count == len(response_counts):
LOG.debug('Loadbalancer response totals: %s',
response_counts)
time.sleep(1)
return
except Exception:
LOG.warning('Server is not passing initial traffic. Waiting.')
time.sleep(1)
LOG.debug('Loadbalancer wait for load balancer response totals: %s',
response_counts)
message = ('Server %s on port %s did not begin passing traffic within '
'the timeout period. Failing test.' % (vip_address,
protocol_port))
LOG.error(message)
raise Exception(message)

View File

@ -195,6 +195,8 @@
load_balancer: load_balancer:
check_interval: 1 check_interval: 1
check_timeout: 180 check_timeout: 180
loadbalancer-feature-enabled:
not_implemented_is_error: True
devstack_services: devstack_services:
neutron-qos: true neutron-qos: true
devstack_plugins: devstack_plugins:
@ -871,3 +873,33 @@
required-projects: required-projects:
- name: openstack/diskimage-builder - name: openstack/diskimage-builder
override-checkout: 2.30.0 override-checkout: 2.30.0
######### Third party jobs ##########
- job:
name: neutron-ovn-provider-v2-scenario
parent: ovn-octavia-provider-v2-dsvm-scenario
description: Runs the neutron OVN provider driver for Octavia scenario test.
voting: false
timeout: 5400
attempts: 1
tags: ovn-octavia-provider
irrelevant-files:
- ^.*\.rst$
- ^api-ref/.*$
- ^doc/.*$
- ^etc/.*$
- ^releasenotes/.*$
- ^octavia/amphorae/.*$
- ^octavia/api/drivers/amphora_driver/.*$
- ^octavia/compute/.*$
- ^octavia/controller/.*$
- ^octavia/distributor/.*$
- ^octavia/volume/.*$
- ^octavia/tests/.*$
vars:
devstack_local_conf:
test-config:
"$TEMPEST_CONFIG":
loadbalancer-feature-enabled:
not_implemented_is_error: False

View File

@ -44,6 +44,9 @@
voting: false voting: false
- octavia-v2-dsvm-cinder-amphora: - octavia-v2-dsvm-cinder-amphora:
voting: false voting: false
# Third party provider jobs
- neutron-ovn-provider-v2-scenario:
voting: false
gate: gate:
fail-fast: true fail-fast: true
queue: octavia queue: octavia