Add a scenario test for spare pool
This patch adds a scenario test for testing amphora spare pool feature. It adds new check jobs to test against master as well as stable branches. Change-Id: I87249017453628e3d2cd320a02677d81397f1b26
This commit is contained in:
parent
3c0bde3d19
commit
5b963c4c21
|
@ -84,6 +84,7 @@ FLAVOR_PROFILE_ID = 'flavor_profile_id'
|
|||
|
||||
# Other constants
|
||||
ACTIVE = 'ACTIVE'
|
||||
PENDING_UPDATE = 'PENDING_UPDATE'
|
||||
ADMIN_STATE_UP_TRUE = 'true'
|
||||
ASC = 'asc'
|
||||
DELETED = 'DELETED'
|
||||
|
|
|
@ -185,4 +185,8 @@ LBFeatureEnabledGroup = [
|
|||
default="TCP",
|
||||
help="The type of L4 Protocol which is supported with the "
|
||||
"provider driver."),
|
||||
cfg.StrOpt('spare_pool_enabled',
|
||||
default=False,
|
||||
help="Wether spare pool is available with amphora provider "
|
||||
"driver or not."),
|
||||
]
|
||||
|
|
|
@ -0,0 +1,240 @@
|
|||
# Copyright 2019 Red Hat Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_log import log as logging
|
||||
from tempest import config
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib import decorators
|
||||
|
||||
from octavia_tempest_plugin.common import constants as const
|
||||
from octavia_tempest_plugin.tests import test_base
|
||||
from octavia_tempest_plugin.tests import waiters
|
||||
|
||||
CONF = config.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SparePoolTest(test_base.LoadBalancerBaseTestWithCompute):
|
||||
|
||||
@classmethod
|
||||
def skip_checks(cls):
|
||||
super(SparePoolTest, cls).skip_checks()
|
||||
|
||||
if CONF.load_balancer.provider not in ['amphora', 'octavia']:
|
||||
raise cls.skipException("Amphora tests require provider 'amphora' "
|
||||
"or 'octavia' (alias to 'amphora', "
|
||||
"deprecated) set")
|
||||
if not CONF.loadbalancer_feature_enabled.spare_pool_enabled:
|
||||
raise cls.skipException('[loadbalancer-feature-enabled] '
|
||||
'"spare_pool_enabled" is set to False in '
|
||||
'the Tempest configuration. Spare pool '
|
||||
'tests will be skipped.')
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
"""Setup resources needed by the tests"""
|
||||
super(SparePoolTest, cls).resource_setup()
|
||||
|
||||
@decorators.idempotent_id('2ba3a2c2-de9d-4556-9535-cbe9209b4eaa')
|
||||
def test_health_manager_failover_to_spare_amp(self):
|
||||
"""Tests Health Manager failover to amphora in spare pool.
|
||||
|
||||
* Check amphora spare pool availability
|
||||
* Test the load balancer to make sure it is functioning
|
||||
* Delete amphora compute instance associated to load balancer
|
||||
* Validate load balancer fails over to spare amphora
|
||||
* Send traffic through load balancer
|
||||
* Validate amphora spare pool size is restored
|
||||
"""
|
||||
|
||||
# Check there is at least one amphora in spare pool
|
||||
spare_amps = waiters.wait_for_spare_amps(
|
||||
self.os_admin.amphora_client.list_amphorae,
|
||||
CONF.load_balancer.lb_build_interval,
|
||||
CONF.load_balancer.lb_build_timeout)
|
||||
|
||||
# Setup a load balancer for the tests to use
|
||||
lb_name = data_utils.rand_name("lb_spare_pool")
|
||||
lb_kwargs = {const.PROVIDER: CONF.load_balancer.provider,
|
||||
const.NAME: lb_name}
|
||||
|
||||
self._setup_lb_network_kwargs(lb_kwargs, 4)
|
||||
|
||||
lb = self.mem_lb_client.create_loadbalancer(**lb_kwargs)
|
||||
self.lb_id = lb[const.ID]
|
||||
self.addClassResourceCleanup(self.mem_lb_client.cleanup_loadbalancer,
|
||||
self.lb_id)
|
||||
|
||||
if CONF.validation.connect_method == 'floating':
|
||||
port_id = lb[const.VIP_PORT_ID]
|
||||
result = self.lb_mem_float_ip_client.create_floatingip(
|
||||
floating_network_id=CONF.network.public_network_id,
|
||||
port_id=port_id)
|
||||
floating_ip = result['floatingip']
|
||||
LOG.info('lb1_floating_ip: {}'.format(floating_ip))
|
||||
self.addClassResourceCleanup(
|
||||
waiters.wait_for_not_found,
|
||||
self.lb_mem_float_ip_client.delete_floatingip,
|
||||
self.lb_mem_float_ip_client.show_floatingip,
|
||||
floatingip_id=floating_ip['id'])
|
||||
self.lb_vip_address = floating_ip['floating_ip_address']
|
||||
else:
|
||||
self.lb_vip_address = lb[const.VIP_ADDRESS]
|
||||
|
||||
waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
|
||||
self.lb_id, const.PROVISIONING_STATUS,
|
||||
const.ACTIVE,
|
||||
CONF.load_balancer.lb_build_interval,
|
||||
CONF.load_balancer.lb_build_timeout)
|
||||
|
||||
# Confirm the spare pool has changed since last check
|
||||
spare_amps_2 = waiters.wait_for_spare_amps(
|
||||
self.os_admin.amphora_client.list_amphorae,
|
||||
CONF.load_balancer.lb_build_interval,
|
||||
CONF.load_balancer.lb_build_timeout)
|
||||
self.assertNotEqual(spare_amps, spare_amps_2)
|
||||
|
||||
listener_name = data_utils.rand_name("lb_member_listener1_spare")
|
||||
listener_kwargs = {
|
||||
const.NAME: listener_name,
|
||||
const.PROTOCOL: const.HTTP,
|
||||
const.PROTOCOL_PORT: '80',
|
||||
const.LOADBALANCER_ID: self.lb_id,
|
||||
}
|
||||
listener = self.mem_listener_client.create_listener(**listener_kwargs)
|
||||
self.listener_id = listener[const.ID]
|
||||
self.addClassResourceCleanup(
|
||||
self.mem_listener_client.cleanup_listener,
|
||||
self.listener_id,
|
||||
lb_client=self.mem_lb_client, lb_id=self.lb_id)
|
||||
|
||||
waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
|
||||
self.lb_id, const.PROVISIONING_STATUS,
|
||||
const.ACTIVE,
|
||||
CONF.load_balancer.build_interval,
|
||||
CONF.load_balancer.build_timeout)
|
||||
|
||||
pool_name = data_utils.rand_name("lb_member_pool1-spare")
|
||||
pool_kwargs = {
|
||||
const.NAME: pool_name,
|
||||
const.PROTOCOL: const.HTTP,
|
||||
const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
|
||||
const.LISTENER_ID: self.listener_id,
|
||||
}
|
||||
pool = self.mem_pool_client.create_pool(**pool_kwargs)
|
||||
self.pool_id = pool[const.ID]
|
||||
self.addClassResourceCleanup(
|
||||
self.mem_pool_client.cleanup_pool,
|
||||
self.pool_id,
|
||||
lb_client=self.mem_lb_client, lb_id=self.lb_id)
|
||||
|
||||
waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
|
||||
self.lb_id, const.PROVISIONING_STATUS,
|
||||
const.ACTIVE,
|
||||
CONF.load_balancer.build_interval,
|
||||
CONF.load_balancer.build_timeout)
|
||||
|
||||
# Set up Member 1 for Webserver 1
|
||||
member1_name = data_utils.rand_name("lb_member_member1-spare")
|
||||
member1_kwargs = {
|
||||
const.POOL_ID: self.pool_id,
|
||||
const.NAME: member1_name,
|
||||
const.ADMIN_STATE_UP: True,
|
||||
const.ADDRESS: self.webserver1_ip,
|
||||
const.PROTOCOL_PORT: 80,
|
||||
}
|
||||
if self.lb_member_1_subnet:
|
||||
member1_kwargs[const.SUBNET_ID] = self.lb_member_1_subnet[const.ID]
|
||||
|
||||
member1 = self.mem_member_client.create_member(
|
||||
**member1_kwargs)
|
||||
self.addClassResourceCleanup(
|
||||
self.mem_member_client.cleanup_member,
|
||||
member1[const.ID], pool_id=self.pool_id,
|
||||
lb_client=self.mem_lb_client, lb_id=self.lb_id)
|
||||
waiters.wait_for_status(
|
||||
self.mem_lb_client.show_loadbalancer, self.lb_id,
|
||||
const.PROVISIONING_STATUS, const.ACTIVE,
|
||||
CONF.load_balancer.check_interval,
|
||||
CONF.load_balancer.check_timeout)
|
||||
|
||||
# Set up Member 2 for Webserver 2
|
||||
member2_name = data_utils.rand_name("lb_member_member2-spare")
|
||||
member2_kwargs = {
|
||||
const.POOL_ID: self.pool_id,
|
||||
const.NAME: member2_name,
|
||||
const.ADMIN_STATE_UP: True,
|
||||
const.ADDRESS: self.webserver2_ip,
|
||||
const.PROTOCOL_PORT: 80,
|
||||
}
|
||||
if self.lb_member_2_subnet:
|
||||
member2_kwargs[const.SUBNET_ID] = self.lb_member_2_subnet[const.ID]
|
||||
|
||||
member2 = self.mem_member_client.create_member(
|
||||
**member2_kwargs)
|
||||
self.addClassResourceCleanup(
|
||||
self.mem_member_client.cleanup_member,
|
||||
member2[const.ID], pool_id=self.pool_id,
|
||||
lb_client=self.mem_lb_client, lb_id=self.lb_id)
|
||||
waiters.wait_for_status(
|
||||
self.mem_lb_client.show_loadbalancer, self.lb_id,
|
||||
const.PROVISIONING_STATUS, const.ACTIVE,
|
||||
CONF.load_balancer.check_interval,
|
||||
CONF.load_balancer.check_timeout)
|
||||
|
||||
# Send some traffic
|
||||
self.check_members_balanced(self.lb_vip_address)
|
||||
|
||||
# Check there is at least one amphora in spare pool
|
||||
spare_amps = waiters.wait_for_spare_amps(
|
||||
self.os_admin.amphora_client.list_amphorae,
|
||||
CONF.load_balancer.lb_build_interval,
|
||||
CONF.load_balancer.lb_build_timeout)
|
||||
|
||||
# Delete amphora compute instance
|
||||
amp = self.os_admin.amphora_client.list_amphorae(
|
||||
query_params='{loadbalancer_id}={lb_id}'.format(
|
||||
loadbalancer_id=const.LOADBALANCER_ID, lb_id=self.lb_id))
|
||||
|
||||
self.os_admin_servers_client.delete_server(amp[0][const.COMPUTE_ID])
|
||||
|
||||
# Wait for the amphora failover to start
|
||||
waiters.wait_for_status(
|
||||
self.mem_lb_client.show_loadbalancer,
|
||||
self.lb_id, const.PROVISIONING_STATUS,
|
||||
const.PENDING_UPDATE, CONF.load_balancer.check_interval,
|
||||
CONF.load_balancer.check_timeout)
|
||||
|
||||
# Wait for the load balancer to return to ACTIVE so the
|
||||
# cleanup steps will pass
|
||||
waiters.wait_for_status(
|
||||
self.mem_lb_client.show_loadbalancer,
|
||||
self.lb_id, const.PROVISIONING_STATUS,
|
||||
const.ACTIVE, CONF.load_balancer.lb_build_interval,
|
||||
CONF.load_balancer.lb_build_timeout)
|
||||
|
||||
# Send some traffic
|
||||
self.check_members_balanced(self.lb_vip_address)
|
||||
|
||||
# Confirm the spare pool has changed since last check
|
||||
spare_amps_2 = waiters.wait_for_spare_amps(
|
||||
self.os_admin.amphora_client.list_amphorae,
|
||||
CONF.load_balancer.lb_build_interval,
|
||||
CONF.load_balancer.lb_build_timeout)
|
||||
self.assertNotEqual(spare_amps, spare_amps_2)
|
||||
|
||||
# Check there is at least one amphora in spare pool
|
||||
waiters.wait_for_spare_amps(self.os_admin.amphora_client.list_amphorae,
|
||||
CONF.load_balancer.lb_build_interval,
|
||||
CONF.load_balancer.lb_build_timeout)
|
|
@ -127,6 +127,7 @@ class LoadBalancerBaseTest(test.BaseTestCase):
|
|||
cls.lb_admin_flavor_client = cls.os_roles_lb_admin.flavor_client
|
||||
cls.mem_flavor_client = cls.os_roles_lb_member.flavor_client
|
||||
cls.mem_provider_client = cls.os_roles_lb_member.provider_client
|
||||
cls.os_admin_servers_client = cls.os_admin.servers_client
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
|
|
|
@ -180,3 +180,31 @@ def wait_for_deleted_status_or_not_found(
|
|||
raise exceptions.TimeoutException(message)
|
||||
|
||||
time.sleep(check_interval)
|
||||
|
||||
|
||||
def wait_for_spare_amps(list_func, check_interval, check_timeout):
|
||||
"""Waits for amphorae in spare pool.
|
||||
|
||||
:param list_func: The tempest service client amphora list method.
|
||||
Ex. cls.os_admin.amphora_client.list_amphorae
|
||||
:check_interval: How often to check the status, in seconds.
|
||||
:check_timeout: The maximum time, in seconds, to check the status.
|
||||
:raises TimeoutException: No amphora available in spare pool in the
|
||||
check_timeout period.
|
||||
:returns: A list of amphorae in spare pool.
|
||||
"""
|
||||
|
||||
LOG.info('Waiting for amphorae in spare pool')
|
||||
start = int(time.time())
|
||||
while True:
|
||||
spare_amps = list_func(
|
||||
query_params='{status}={status_ready}'.format(
|
||||
status=const.STATUS, status_ready=const.STATUS_READY))
|
||||
if len(spare_amps) >= 1:
|
||||
return spare_amps
|
||||
if int(time.time()) - start >= check_timeout:
|
||||
message = ("No available amphorae in spare pool within the "
|
||||
"required time {timeout}.".format(
|
||||
timeout=check_timeout))
|
||||
raise exceptions.TimeoutException(message)
|
||||
time.sleep(check_interval)
|
||||
|
|
|
@ -405,6 +405,43 @@
|
|||
parent: octavia-v2-dsvm-tls-barbican
|
||||
override-checkout: stable/queens
|
||||
|
||||
- job:
|
||||
name: octavia-v2-dsvm-spare-pool
|
||||
parent: octavia-v2-dsvm-scenario
|
||||
vars:
|
||||
tempest_test_regex: ^octavia_tempest_plugin.tests.spare_pool_scenario.v2
|
||||
devstack_local_conf:
|
||||
post-config:
|
||||
$OCTAVIA_CONF:
|
||||
house_keeping:
|
||||
spare_amphora_pool_size: 1
|
||||
test-config:
|
||||
"$TEMPEST_CONFIG":
|
||||
loadbalancer-feature-enabled:
|
||||
spare_pool_enabled: True
|
||||
|
||||
- job:
|
||||
name: octavia-v2-dsvm-py2-spare-pool
|
||||
parent: octavia-v2-dsvm-spare-pool
|
||||
vars:
|
||||
devstack_localrc:
|
||||
USE_PYTHON3: False
|
||||
|
||||
- job:
|
||||
name: octavia-v2-dsvm-spare-pool-stable-stein
|
||||
parent: octavia-v2-dsvm-spare-pool
|
||||
override-checkout: stable/stein
|
||||
|
||||
- job:
|
||||
name: octavia-v2-dsvm-py2-spare-pool-stable-rocky
|
||||
parent: octavia-v2-dsvm-py2-spare-pool
|
||||
override-checkout: stable/rocky
|
||||
|
||||
- job:
|
||||
name: octavia-v2-dsvm-py2-spare-pool-stable-queens
|
||||
parent: octavia-v2-dsvm-py2-spare-pool
|
||||
override-checkout: stable/queens
|
||||
|
||||
# Temporary transitional aliases for gates used in other repos
|
||||
# Remove once octavia has transitioned job names
|
||||
- job:
|
||||
|
|
|
@ -33,6 +33,16 @@
|
|||
voting: false
|
||||
- octavia-v2-dsvm-tls-barbican-stable-queens:
|
||||
voting: false
|
||||
- octavia-v2-dsvm-spare-pool:
|
||||
voting: false
|
||||
- octavia-v2-dsvm-py2-spare-pool:
|
||||
voting: false
|
||||
- octavia-v2-dsvm-spare-pool-stable-stein:
|
||||
voting: false
|
||||
- octavia-v2-dsvm-py2-spare-pool-stable-rocky:
|
||||
voting: false
|
||||
- octavia-v2-dsvm-py2-spare-pool-stable-queens:
|
||||
voting: false
|
||||
gate:
|
||||
queue: octavia
|
||||
jobs:
|
||||
|
|
Loading…
Reference in New Issue