Adding 3 traffic based test cases.

1) test_udp_update_pool_healthmonitor_listener
   Traffic should PASS after updating any of LB's components.

2) test_healtmonitor_status_error_if_members_are_paused
   Validate that the healthmoitor is getting into ERROR operation status
   if VMs (LB's members) are paused.

3) test_hm_op_status_changed_as_expected_on_update
   Update health monitor with various combinations of:
   HTTP method, expected HTTP status codes and backend URL.
   Validate that members' operation status is getting into
   appropriate state on each update.

Change-Id: Ie80378ac1a96941eefa905fd6f49c8fa7e9c3692
This commit is contained in:
Arkady Shtempler 2020-09-30 18:20:03 +03:00
parent c50539cc07
commit 777b03ab3c
3 changed files with 289 additions and 48 deletions

View File

@ -136,6 +136,11 @@ GET = 'GET'
POST = 'POST'
PUT = 'PUT'
DELETE = 'DELETE'
HEAD = 'HEAD'
OPTIONS = 'OPTIONS'
PATCH = 'PATCH'
CONNECT = 'CONNECT'
TRACE = 'TRACE'
# HM Types
HEALTH_MONITOR_PING = 'PING'

View File

@ -204,6 +204,64 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
traffic_member_count=traffic_member_count, source_port=source_port,
delay=delay)
def _pool_add_healthmonitor(self, pool_id, protocol):
hm_name = data_utils.rand_name("lb_member_hm1-hm-traffic")
if protocol != const.HTTP:
if protocol == const.UDP:
hm_type = const.HEALTH_MONITOR_UDP_CONNECT
elif protocol == const.TCP:
hm_type = const.HEALTH_MONITOR_TCP
hm_kwargs = {
const.POOL_ID: pool_id,
const.NAME: hm_name,
const.TYPE: hm_type,
const.DELAY: 3,
const.TIMEOUT: 2,
const.MAX_RETRIES: 2,
const.MAX_RETRIES_DOWN: 2,
const.ADMIN_STATE_UP: True,
}
else:
hm_kwargs = {
const.POOL_ID: pool_id,
const.NAME: hm_name,
const.TYPE: const.HEALTH_MONITOR_HTTP,
const.DELAY: 2,
const.TIMEOUT: 2,
const.MAX_RETRIES: 2,
const.MAX_RETRIES_DOWN: 2,
const.HTTP_METHOD: const.GET,
const.URL_PATH: '/',
const.EXPECTED_CODES: '200',
const.ADMIN_STATE_UP: True,
}
hm = self.mem_healthmonitor_client.create_healthmonitor(**hm_kwargs)
self.addCleanup(
self.mem_healthmonitor_client.cleanup_healthmonitor,
hm[const.ID], lb_client=self.mem_lb_client, lb_id=self.lb_id)
waiters.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
hm = waiters.wait_for_status(
self.mem_healthmonitor_client.show_healthmonitor,
hm[const.ID], const.PROVISIONING_STATUS,
const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
return hm
def _pause_vm(self, vm_id, action):
if action == 'pause':
return self.os_admin_servers_client.pause_server(vm_id)
elif action == 'unpause':
return self.os_admin_servers_client.unpause_server(vm_id)
else:
raise RuntimeError('Failed to {} the VM with'
' ID: '.format(action, vm_id))
@decorators.attr(type=['smoke', 'slow'])
@testtools.skipIf(CONF.load_balancer.test_with_noop,
'Traffic tests will not work in noop mode.')
@ -348,54 +406,7 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
protocol=protocol, persistent=persistent)
# Create the healthmonitor
hm_name = data_utils.rand_name("lb_member_hm1-hm-traffic")
if protocol != const.HTTP:
if protocol == const.UDP:
hm_type = const.HEALTH_MONITOR_UDP_CONNECT
elif protocol == const.TCP:
hm_type = const.HEALTH_MONITOR_TCP
hm_kwargs = {
const.POOL_ID: pool_id,
const.NAME: hm_name,
const.TYPE: hm_type,
const.DELAY: 3,
const.TIMEOUT: 2,
const.MAX_RETRIES: 2,
const.MAX_RETRIES_DOWN: 2,
const.ADMIN_STATE_UP: True,
}
else:
hm_kwargs = {
const.POOL_ID: pool_id,
const.NAME: hm_name,
const.TYPE: const.HEALTH_MONITOR_HTTP,
const.DELAY: 2,
const.TIMEOUT: 2,
const.MAX_RETRIES: 2,
const.MAX_RETRIES_DOWN: 2,
const.HTTP_METHOD: const.GET,
const.URL_PATH: '/',
const.EXPECTED_CODES: '200',
const.ADMIN_STATE_UP: True,
}
hm = self.mem_healthmonitor_client.create_healthmonitor(**hm_kwargs)
self.addCleanup(
self.mem_healthmonitor_client.cleanup_healthmonitor,
hm[const.ID], lb_client=self.mem_lb_client, lb_id=self.lb_id)
waiters.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
hm = waiters.wait_for_status(
self.mem_healthmonitor_client.show_healthmonitor,
hm[const.ID], const.PROVISIONING_STATUS,
const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
hm = self._pool_add_healthmonitor(pool_id, protocol)
# Wait for members to adjust to the correct OPERATING_STATUS
waiters.wait_for_status(
@ -1391,3 +1402,194 @@ class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
listener_port), const.X_FORWARDED_PROTO: const.HTTP.lower()}
received_headers = _data_parser(data, expected_headers)
self.assertEqual(expected_headers, received_headers)
@decorators.idempotent_id('c79f2cd0-0324-11eb-bc8e-74e5f9e2a801')
def test_udp_update_pool_healthmonitor_listener(self):
"""Test scenario:
* Prerequisites:
* Create: UDP listener, pool, healtmonitor and validate UDP traffic.
* Test scenario:
* Update pool algorithm to: "source_ip" and start sending UDP traffic.
* Expected: successfully received UDP packages from LB VIP.
* Update healtmonitor with: "delay=20" and start sending UDP traffic.
* Expected: successfully received UDP packages from LB VIP.
* Update listener with: "connection-limit=300" and start sending
* UDP traffic.
* Expected: successfully received UDP packages from LB VIP.
"""
if not self.mem_listener_client.is_version_supported(
self.api_version, '2.1'):
raise self.skipException('UDP listener support is only available '
'in Octavia API version 2.1 or newer')
listener_port = 103
high_src_port = 60093
listener_id, pool_id = self._listener_pool_create(
const.UDP, listener_port)
healthmonitor_id = self._pool_add_healthmonitor(
pool_id, protocol=const.UDP)[const.ID]
self._test_basic_traffic(
const.UDP, listener_port, pool_id)
# Update LB pool
self.mem_pool_client.update_pool(
pool_id=pool_id, lb_algorithm=const.LB_ALGORITHM_SOURCE_IP)
waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
self.lb_id, const.PROVISIONING_STATUS,
const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
self.assertIsNotNone(self.make_udp_requests_with_retries(
vip_address=self.lb_vip_address, dst_port=listener_port,
number_of_retries=3, src_port=high_src_port),
'Failed - all UDP retries to LB VIP has failed')
# Update LB healthmonitor
self.mem_healthmonitor_client.update_healthmonitor(
healthmonitor_id=healthmonitor_id, delay=5)
waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
self.lb_id, const.PROVISIONING_STATUS,
const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
self.assertIsNotNone(self.make_udp_requests_with_retries(
vip_address=self.lb_vip_address, dst_port=listener_port,
number_of_retries=3, src_port=high_src_port),
'Failed - all UDP retries to LB VIP has failed')
# Update LB listener
listener_kwargs = {const.LISTENER_ID: listener_id,
const.CONNECTION_LIMIT: 300}
self.mem_listener_client.update_listener(**listener_kwargs)
waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
self.lb_id, const.PROVISIONING_STATUS,
const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
self.assertIsNotNone(self.make_udp_requests_with_retries(
vip_address=self.lb_vip_address, dst_port=listener_port,
number_of_retries=3, src_port=high_src_port),
'Failed - all UDP retries to LB VIP has failed')
@decorators.idempotent_id('ce037ad2-089b-11eb-abec-74e5f9e2a801')
def test_healtmonitor_status_error_if_members_are_paused(self):
"""Test scenario:
* Create TCP listener, pool and TCP healthmonitor
* Validate TCP traffic.
* Pause the VMs used for pool's members.
* Validate that healthmoitor is getting into appropriate ERROR
* operation status.
* Note: Unpause the VMs (done by cleanup in tear down)
"""
if not self.mem_listener_client.is_version_supported(
self.api_version, '2.1'):
raise self.skipException('TCP listener support is only available '
'in Octavia API version 2.1 or newer')
listener_port = 104
listener_id, pool_id = self._listener_pool_create(
const.TCP, listener_port)
self._pool_add_healthmonitor(pool_id, protocol=const.TCP)
self._test_basic_traffic(const.TCP, listener_port,
pool_id, persistent=False)
vm_ids = [self.lb_member_webserver1[const.ID],
self.lb_member_webserver2[const.ID]]
for vm_id in vm_ids:
self._pause_vm(vm_id, action='pause')
self.addCleanup(self._pause_vm, vm_id, 'unpause')
mb_ids = [mb[const.ID] for mb in
self.mem_member_client.list_members(pool_id)]
for mb_id in mb_ids:
waiters.wait_for_status(
self.mem_member_client.show_member,
mb_id, const.OPERATING_STATUS,
const.ERROR,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout,
pool_id=pool_id)
@decorators.idempotent_id('cd5aeefa-0e16-11eb-b8dc-74e5f9e2a801')
def test_hm_op_status_changed_as_expected_on_update(self):
"""Test scenario:
* Create HTTP listener, pool and HTTP health monitor.
* Update health monitor with various combinations of:
* HTTP method, expected HTTP status codes and backend URL.
* Note: see "fault_cases" and "valid_cases" lists in test's code.
* Validate that members' operation status is getting into
* appropriate state after each particular update done within the test.
* Important: "operation status" value is expected to be changed from
* ONLINE to ERROR after each update, otherwise we may miss
* the potential bug.
"""
if not self.mem_listener_client.is_version_supported(
self.api_version, '2.1'):
raise self.skipException('TCP listener support is only available '
'in Octavia API version 2.1 or newer')
listener_port = 105
listener_id, pool_id = self._listener_pool_create(
const.TCP, listener_port)
hm_id = self._pool_add_healthmonitor(
pool_id, protocol=const.HTTP)[const.ID]
self._test_basic_traffic(
const.HTTP, listener_port, pool_id, persistent=False)
mb_ids = [mb[const.ID] for
mb in self.mem_member_client.list_members(pool_id)]
# Create list of test cases to be covered in test
fault_cases = [
{'mthd': const.POST, 'code': '101-102', 'op_stat': const.ERROR,
'url_path': '/request?response_code=103'},
{'mthd': const.DELETE, 'code': '201-204', 'op_stat': const.ERROR,
'url_path': '/request?response_code=205'},
{'mthd': const.PUT, 'code': '301-302', 'op_stat': const.ERROR,
'url_path': '/request?response_code=303'},
{'mthd': const.HEAD, 'code': '400-404', 'op_stat': const.ERROR,
'url_path': '/request?response_code=405'},
{'mthd': const.OPTIONS, 'code': '500-504', 'op_stat': const.ERROR,
'url_path': '/request?response_code=505'},
{'mthd': const.PATCH, 'code': '201-204', 'op_stat': const.ERROR,
'url_path': '/request?response_code=205'},
{'mthd': const.CONNECT, 'code': '201-204', 'op_stat': const.ERROR,
'url_path': '/request?response_code=205'},
{'mthd': const.TRACE, 'code': '201-204', 'op_stat': const.ERROR,
'url_path': '/request?response_code=205'}]
valid_cases = [
{'mthd': const.GET, 'code': '101-102', 'op_stat': const.ONLINE,
'url_path': '/request?response_code=102'},
{'mthd': const.GET, 'code': '201-204', 'op_stat': const.ONLINE,
'url_path': '/request?response_code=202'},
{'mthd': const.GET, 'code': '301-302', 'op_stat': const.ONLINE,
'url_path': '/request?response_code=302'},
{'mthd': const.GET, 'code': '400-404', 'op_stat': const.ONLINE,
'url_path': '/request?response_code=404'},
{'mthd': const.GET, 'code': '500-504', 'op_stat': const.ONLINE,
'url_path': '/request?response_code=504'},
{'mthd': const.GET, 'code': '201-204', 'op_stat': const.ONLINE,
'url_path': '/request?response_code=204'},
{'mthd': const.GET, 'code': '201-204', 'op_stat': const.ONLINE,
'url_path': '/request?response_code=204'},
{'mthd': const.GET, 'code': '201-204', 'op_stat': const.ONLINE,
'url_path': '/request?response_code=204'}]
flip_flop = [v for f in zip(valid_cases, fault_cases) for v in f]
# For each test case, update HM and validate that members'
# "Operation Status" is changed to expected value.
for ff in flip_flop:
LOG.info('Tested test case is: {}'.format(ff))
self.mem_healthmonitor_client.update_healthmonitor(
hm_id, expected_codes=ff['code'], http_method=ff['mthd'],
url_path=ff['url_path'])
waiters.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
for mb_id in mb_ids:
waiters.wait_for_status(
self.mem_member_client.show_member,
mb_id, const.OPERATING_STATUS,
ff['op_stat'],
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout,
error_ok=True, pool_id=pool_id)

View File

@ -421,3 +421,37 @@ class ValidatorsMixin(test.BaseTestCase):
protocol_port))
LOG.error(message)
raise Exception(message)
def make_udp_requests_with_retries(
self, vip_address, number_of_retries, dst_port,
src_port=None, socket_timeout=20):
"""Send UDP packages using retries mechanism
The delivery of data to the destination cannot be guaranteed in UDP.
In case when UDP package is getting lost and we might want to check
what could be the reason for that (Network issues or Server Side),
well need to send more packages to get into the conclusion.
:param vip_address: LB VIP address
:param number_of_retries: integer number of retries
:param dst_port: UDP server destination port
:param src_port: UDP source port to bind for UDP connection
:param socket_timeout: UDP socket timeout
:return: None if all UPD retries failed, else first successful
response data from UDP server.
"""
retry_number = 0
received_data = None
while retry_number < number_of_retries:
LOG.info('make_udp_requests_with_retries attempt '
'number:{}'.format(retry_number))
retry_number += 1
try:
received_data = self.make_udp_request(
vip_address, dst_port, timeout=socket_timeout,
source_port=src_port)
break
except Exception as e:
LOG.warning('make_udp_requests_with_retries has failed with: '
'{}'.format(e))
return received_data