Remove Heat stacks from Octavia

Heat might not be supported in OSP18.

After the Tobiko team decided to remove the dependency on Heat, this
patch removes Heat stacks and templates from Octavia.

Note: Backend servers (such as Ubuntu) and subnet still use heat
templates, and Octavia uses them, but after this patch there
won't be any Octavia API calls which will be called by Heat.

Change-Id: I5c335d68c093e8e5e1d247c300186859b889bd39
This commit is contained in:
Omer 2023-03-03 14:13:57 +01:00 committed by Omer Schwartz
parent 19aa88b433
commit c3cb8ce38c
17 changed files with 473 additions and 913 deletions

View File

@ -14,17 +14,15 @@
from __future__ import absolute_import
from tobiko.openstack.octavia import _amphora
from tobiko.openstack.octavia import _deployers
from tobiko.openstack.octavia import _client
from tobiko.openstack.octavia import _constants
from tobiko.openstack.octavia import _exceptions
from tobiko.openstack.octavia import _load_balancer
from tobiko.openstack.octavia import _validators
from tobiko.openstack.octavia import _waiters
AmphoraIdType = _amphora.AmphoraIdType
AmphoraType = _amphora.AmphoraType
get_amphora_id = _amphora.get_amphora_id
get_amphora = _amphora.get_amphora
get_amphora_compute_node = _amphora.get_amphora_compute_node
get_master_amphora = _amphora.get_master_amphora
list_amphorae = _amphora.list_amphorae
@ -36,16 +34,19 @@ get_octavia_client = _client.get_octavia_client
octavia_client = _client.octavia_client
OctaviaClientFixture = _client.OctaviaClientFixture
OctaviaClientType = _client.OctaviaClientType
get_member = _client.get_member
list_members = _client.list_members
LoadBalancerIdType = _load_balancer.LoadBalancerIdType
LoadBalancerType = _load_balancer.LoadBalancerType
get_load_balancer = _load_balancer.get_load_balancer
get_load_balancer_id = _load_balancer.get_load_balancer_id
find_load_balancer = _client.find_load_balancer
create_load_balancer = _client.create_load_balancer
find_listener = _client.find_listener
create_listener = _client.create_listener
find_pool = _client.find_pool
create_pool = _client.create_pool
find_member = _client.find_member
create_member = _client.create_member
# Waiters
wait_for_status = _waiters.wait_for_status
wait_for_octavia_service = _waiters.wait_for_octavia_service
# Validators
check_members_balanced = _validators.check_members_balanced
@ -63,6 +64,13 @@ PROVISIONING_STATUS = _constants.PROVISIONING_STATUS
ACTIVE = _constants.ACTIVE
ERROR = _constants.ERROR
PENDING_UPDATE = _constants.PENDING_UPDATE
PROTOCOL_HTTP = _constants.PROTOCOL_HTTP
PROTOCOL_TCP = _constants.PROTOCOL_TCP
LB_ALGORITHM_ROUND_ROBIN = _constants.LB_ALGORITHM_ROUND_ROBIN
LB_ALGORITHM_SOURCE_IP_PORT = _constants.LB_ALGORITHM_SOURCE_IP_PORT
AMPHORA_PROVIDER = _constants.AMPHORA_PROVIDER
OVN_PROVIDER = _constants.OVN_PROVIDER
OCTAVIA_PROVIDERS_NAMES = _constants.OCTAVIA_PROVIDERS_NAMES
WORKER_SERVICE = _constants.WORKER_SERVICE
HOUSEKEEPING_SERVICE = _constants.HOUSEKEEPING_SERVICE
HM_SERVICE = _constants.HM_SERVICE
@ -75,3 +83,15 @@ OCTAVIA_SERVICES = [WORKER_SERVICE, HOUSEKEEPING_SERVICE, HM_SERVICE,
API_SERVICE]
OCTAVIA_CONTAINERS = [WORKER_CONTAINER, HOUSEKEEPING_CONTAINER, HM_CONTAINER,
API_CONTAINER]
LB_AMP_NAME = _constants.LB_AMP_NAME
LISTENER_AMP_NAME = _constants.LISTENER_AMP_NAME
POOL_AMP_NAME = _constants.POOL_AMP_NAME
MEMBER_AMP_NAME_PREFIX = _constants.MEMBER_AMP_NAME_PREFIX
LB_OVN_NAME = _constants.LB_OVN_NAME
LISTENER_OVN_NAME = _constants.LISTENER_OVN_NAME
POOL_OVN_NAME = _constants.POOL_OVN_NAME
MEMBER_OVN_NAME_PREFIX = _constants.MEMBER_OVN_NAME_PREFIX
# Deployers
deploy_ipv4_amphora_lb = _deployers.deploy_ipv4_amphora_lb
deploy_ipv4_ovn_lb = _deployers.deploy_ipv4_ovn_lb

View File

@ -21,8 +21,7 @@ import tobiko
from tobiko import tripleo
from tobiko import config
from tobiko.openstack.octavia import _client
from tobiko.openstack.octavia import _load_balancer
from tobiko.openstack import nova
from tobiko.openstack import nova, openstacksdkclient
from tobiko.openstack.octavia import _validators
from tobiko.openstack import topology
from tobiko.shell import sh
@ -34,32 +33,24 @@ AmphoraType = typing.Dict[str, typing.Any]
AmphoraIdType = typing.Union[str, AmphoraType]
def get_amphora_id(amphora: AmphoraIdType) -> str:
if isinstance(amphora, str):
return amphora
else:
return amphora['id']
def get_os_conn():
"""Get openstacksdk client fixture
"""
return openstacksdkclient.openstacksdk_client()
def get_amphora(amphora: AmphoraIdType,
client: _client.OctaviaClientType = None) -> AmphoraType:
amphora_id = get_amphora_id(amphora)
return _client.octavia_client(client).amphora_show(amphora_id)['amphora']
def get_amphora(amp_id: str):
return get_os_conn().load_balancer.get_amphora(amp_id)
def list_amphorae(load_balancer: _load_balancer.LoadBalancerIdType = None,
client: _client.OctaviaClientType = None,
**params) \
-> tobiko.Selection[AmphoraType]:
if load_balancer is not None:
params['load_balancer_id'] = _load_balancer.get_load_balancer_id(
load_balancer)
amphorae = _client.octavia_client(client).amphora_list(
**params)['amphorae']
return tobiko.select(amphorae)
def list_amphorae(load_balancer_id: str = None, **params):
if load_balancer_id is not None:
params['load_balancer_id'] = load_balancer_id
return get_os_conn().load_balancer.amphorae(
loadbalancer_id=load_balancer_id)
def get_amphora_compute_node(load_balancer: _load_balancer.LoadBalancerIdType,
def get_amphora_compute_node(load_balancer_id: str,
port: int,
protocol: str,
ip_address: str,
@ -72,14 +63,14 @@ def get_amphora_compute_node(load_balancer: _load_balancer.LoadBalancerIdType,
(e.g. if the LB's topology is Active/standby), so the compute node which
hosts the master amphora will be returned.
:param load_balancer: the load balancer ID.
:param load_balancer_id: the load balancer ID.
:param port: the load balancer port.
:param protocol: the load balancer protocol.
:param ip_address: the IP address of the load balancer
:param client: the Octavia client
:return: the compute node which hosts the Amphora.
"""
amphorae = list_amphorae(load_balancer)
amphorae = list_amphorae(load_balancer_id=load_balancer_id)
amphora = get_master_amphora(amphorae=amphorae,
port=port,
protocol=protocol,
@ -90,6 +81,8 @@ def get_amphora_compute_node(load_balancer: _load_balancer.LoadBalancerIdType,
return topology.get_openstack_node(hostname=hostname)
# TODO (oschwart): use openstacksdk in this method implementation whenever
# openstacksdk has its API call
def get_amphora_stats(amphora_id, client=None):
"""
:param amphora_id: the amphora id
@ -146,8 +139,8 @@ def get_master_amphora(amphorae: typing.Iterable[AmphoraType],
def run_command_on_amphora(command: str,
lb_id: _load_balancer.LoadBalancerIdType = None,
lb_fip: str = None,
lb_id: str = None,
lb_vip: str = None,
amp_id: str = None,
sudo: bool = False) -> str:
"""
@ -155,22 +148,21 @@ def run_command_on_amphora(command: str,
:param command: The command to run on the amphora
:param lb_id: The load balancer id whose amphora should run the command
:param lb_fip: The loadbalancer floating ip
:param lb_vip: The loadbalancer VIP
:param amp_id: The single/master amphora id
:param sudo: (bool) Whether to run the command with sudo permissions
on the amphora
:return: The command output (str)
"""
# Get the master/single amphora lb_network_ip
if amp_id:
amp_lb_network_ip = get_amphora(amphora=amp_id)['lb_network_ip']
elif lb_id and lb_fip:
amp_lb_network_ip = get_amphora(amp_id)['lb_network_ip']
elif lb_id and lb_vip:
amphorae = list_amphorae(load_balancer_id=lb_id)
amphora = get_master_amphora(amphorae=amphorae,
port=80,
protocol='HTTP',
ip_address=lb_fip)
ip_address=lb_vip)
amp_lb_network_ip = amphora['lb_network_ip']
else:
raise ValueError('Either amphora id or both the loadbalancer id '

View File

@ -18,7 +18,7 @@ import typing
from octaviaclient.api.v2 import octavia
import tobiko
from tobiko.openstack import _client
from tobiko.openstack import _client, openstacksdkclient
from tobiko.openstack import keystone
@ -76,10 +76,47 @@ def get_octavia_client(session=None, shared=True, init_client=None,
return client.client
def get_member(pool_id: str, member_id: str, client=None):
return octavia_client(client).member_show(pool_id=pool_id,
member_id=member_id)
def list_members(pool_id: str):
os_sdk_client = openstacksdkclient.openstacksdk_client()
return os_sdk_client.load_balancer.members(pool=pool_id)
def list_members(pool_id: str, client=None):
return octavia_client(client).member_list(pool_id=pool_id)['members']
def find_load_balancer(lb_name: str):
os_sdk_client = openstacksdkclient.openstacksdk_client()
return os_sdk_client.load_balancer.find_load_balancer(lb_name)
def create_load_balancer(lb_kwargs):
os_sdk_client = openstacksdkclient.openstacksdk_client()
return os_sdk_client.load_balancer.create_load_balancer(**lb_kwargs)
def find_listener(listener_name: str):
os_sdk_client = openstacksdkclient.openstacksdk_client()
return os_sdk_client.load_balancer.find_listener(listener_name)
def create_listener(listener_kwargs):
os_sdk_client = openstacksdkclient.openstacksdk_client()
return os_sdk_client.load_balancer.create_listener(**listener_kwargs)
def find_pool(pool_name: str):
os_sdk_client = openstacksdkclient.openstacksdk_client()
return os_sdk_client.load_balancer.find_pool(pool_name)
def create_pool(pool_kwargs):
os_sdk_client = openstacksdkclient.openstacksdk_client()
return os_sdk_client.load_balancer.create_pool(**pool_kwargs)
def find_member(member_name: str, pool: str):
# Note that pool could be either id or name
os_sdk_client = openstacksdkclient.openstacksdk_client()
return os_sdk_client.load_balancer.find_member(member_name, pool)
def create_member(member_kwargs):
os_sdk_client = openstacksdkclient.openstacksdk_client()
return os_sdk_client.load_balancer.create_member(**member_kwargs)

View File

@ -21,6 +21,18 @@ ACTIVE = 'ACTIVE'
ERROR = 'ERROR'
PENDING_UPDATE = 'PENDING_UPDATE'
# Octavia protocols
PROTOCOL_HTTP = 'HTTP'
PROTOCOL_TCP = 'TCP'
# Octavia lb algorithms
LB_ALGORITHM_ROUND_ROBIN = 'ROUND_ROBIN'
LB_ALGORITHM_SOURCE_IP_PORT = 'SOURCE_IP_PORT'
# Octavia providers
AMPHORA_PROVIDER = 'amphora'
OVN_PROVIDER = 'ovn'
# Octavia services
WORKER_SERVICE = 'tripleo_octavia_worker.service'
HOUSEKEEPING_SERVICE = 'tripleo_octavia_housekeeping.service'
@ -32,3 +44,35 @@ WORKER_CONTAINER = 'octavia_worker'
HOUSEKEEPING_CONTAINER = 'octavia_housekeeping'
HM_CONTAINER = 'octavia_health_manager'
API_CONTAINER = 'octavia_api'
# Octavia amphora provider resources names
LB_AMP_NAME = 'tobiko_octavia_amphora_lb'
LISTENER_AMP_NAME = 'tobiko_octavia_http_listener'
POOL_AMP_NAME = 'tobiko_octavia_http_pool'
MEMBER_AMP_NAME_PREFIX = 'tobiko_octavia_http_member'
# Octavia ovn provider resources names
LB_OVN_NAME = 'tobiko_octavia_ovn_lb'
LISTENER_OVN_NAME = 'tobiko_octavia_tcp_listener'
POOL_OVN_NAME = 'tobiko_octavia_tcp_pool'
MEMBER_OVN_NAME_PREFIX = 'tobiko_octavia_tcp_member'
# Providers/lb-names dictionary
OCTAVIA_PROVIDERS_NAMES = {
'lb': {
AMPHORA_PROVIDER: LB_AMP_NAME,
OVN_PROVIDER: LB_OVN_NAME
},
'listener': {
AMPHORA_PROVIDER: LISTENER_AMP_NAME,
OVN_PROVIDER: LISTENER_OVN_NAME
},
'pool': {
AMPHORA_PROVIDER: POOL_AMP_NAME,
OVN_PROVIDER: POOL_OVN_NAME
},
'member': {
AMPHORA_PROVIDER: MEMBER_AMP_NAME_PREFIX,
OVN_PROVIDER: MEMBER_OVN_NAME_PREFIX
}
}

View File

@ -0,0 +1,184 @@
# Copyright (c) 2023 Red Hat
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from oslo_log import log
import tobiko
from tobiko.openstack import octavia
from tobiko.openstack import neutron
from tobiko.openstack.octavia import _constants
LOG = log.getLogger(__name__)
def deploy_ipv4_lb(provider: str,
protocol: str,
protocol_port: int,
lb_algorithm: str,
servers_stacks=None):
"""Deploy a populated ipv4 LB
:param provider: the loadbalancer provider. For example: amphora
:param protocol: the listener & pool protocol. For example: HTTP
:param protocol_port: the load balancer protocol port
:param lb_algorithm: the pool load balancing protocol. For example:
ROUND_ROBIN
:param servers_stacks: a list of server stacks (until we remove heat
entirely)
:return: all Octavia resources it has created (LB, listener, and pool)
"""
lb_name = octavia.OCTAVIA_PROVIDERS_NAMES['lb'][provider]
lb = octavia.find_load_balancer(lb_name)
if lb:
LOG.debug(f'Loadbalancer {lb.id} already exists. Skipping its'
' creation')
else:
try:
subnet = neutron.find_subnet('external_subnet')
except ModuleNotFoundError:
subnet = None
if subnet is None:
tobiko.skip_test('Replacing heat networking resources for '
'octavia in tobiko wasn\'t implemented yet')
lb_kwargs = {
'provider': provider,
'vip_subnet_id': subnet['id'],
'name': lb_name
}
lb = octavia.create_load_balancer(lb_kwargs)
octavia.wait_for_status(object_id=lb.id)
LOG.debug(f'Loadbalancer {lb.name} was deployed successfully '
f'with id {lb.id}')
listener_name = octavia.OCTAVIA_PROVIDERS_NAMES['listener'][provider]
listener = octavia.find_listener(listener_name)
if listener:
LOG.debug(f'Listener {listener.id} already exists. Skipping'
' its creation')
else:
listener_kwargs = {
'protocol': protocol,
'protocol_port': protocol_port,
'loadbalancer_id': lb.id,
'name': listener_name
}
listener = octavia.create_listener(listener_kwargs)
octavia.wait_for_status(object_id=lb.id)
LOG.debug(f'Listener {listener.name} was deployed '
f'successfully with id {listener.id}')
pool_name = octavia.OCTAVIA_PROVIDERS_NAMES['pool'][provider]
pool = octavia.find_pool(pool_name)
if pool:
LOG.debug(f'Pool {pool.id} already exists. Skipping its '
f'creation')
else:
pool_kwargs = {
'listener_id': listener.id,
'lb_algorithm': lb_algorithm,
'protocol': protocol,
'name': pool_name
}
pool = octavia.create_pool(pool_kwargs)
octavia.wait_for_status(object_id=lb.id)
LOG.debug(f'Pool {pool.name} was deployed successfully with'
f' id {pool.id}')
if servers_stacks:
for idx, server_stack in enumerate(servers_stacks):
member_name_prefix = octavia.OCTAVIA_PROVIDERS_NAMES['member'][
provider]
member_name = member_name_prefix + str(idx)
member = octavia.find_member(member_name=member_name,
pool=pool.id)
if member:
LOG.debug(f'Member {member.id} already exists. Skipping its '
f'creation')
else:
member_kwargs = {
'address': str(server_stack.fixed_ipv4),
'protocol_port': protocol_port,
'name': member_name,
'subnet_id': server_stack.network_stack.ipv4_subnet_id,
'pool': pool.id
}
member = octavia.create_member(member_kwargs)
octavia.wait_for_status(object_id=lb.id)
LOG.debug(f'Member {member.name} was deployed successfully '
f'with id {member.id}')
return lb, listener, pool
def deploy_ipv4_amphora_lb(protocol: str = _constants.PROTOCOL_HTTP,
protocol_port: int = 80,
lb_algorithm: str = (
_constants.LB_ALGORITHM_ROUND_ROBIN),
servers_stacks=None):
"""Deploy a populated ipv4 amphora provider LB with HTTP resources
This deployer method deploys the following resources:
* An IPv4 amphora provider LB
* An HTTP listener
* An HTTP pool with Round Robin LB algorithm
* Octavia members (a member per each nova vm it receives from the
caller)
:param protocol: the listener & pool protocol. For example: HTTP
:param protocol_port: the load balancer protocol port
:param lb_algorithm: the pool load balancing protocol. For example:
ROUND_ROBIN
:param servers_stacks: a list of server stacks (until we remove heat
entirely)
:return: all Octavia resources it has created (LB, listener, and pool)
"""
return deploy_ipv4_lb(provider=octavia.AMPHORA_PROVIDER,
protocol=protocol,
protocol_port=protocol_port,
lb_algorithm=lb_algorithm,
servers_stacks=servers_stacks)
def deploy_ipv4_ovn_lb(protocol: str = _constants.PROTOCOL_TCP,
protocol_port: int = 80,
lb_algorithm: str = (
_constants.LB_ALGORITHM_SOURCE_IP_PORT),
servers_stacks=None):
"""Deploy a populated ipv4 OVN provider LB with TCP resources
This deployer method deploys the following resources:
* An IPv4 ovn provider LB
* An TCP listener
* An TCP pool with Source Ip Port LB algorithm
* Octavia members (a member per each nova vm it receives from the
caller)
:param protocol: the listener & pool protocol. For example: TCP
:param protocol_port: the load balancer protocol port
:param lb_algorithm: the pool load balancing protocol. For example:
SOURCE_IP_PORT
:param servers_stacks: a list of server stacks (until we remove heat
entirely)
:return: all Octavia resources it has created (LB, listener, and pool)
"""
return deploy_ipv4_lb(provider=octavia.OVN_PROVIDER,
protocol=protocol,
protocol_port=protocol_port,
lb_algorithm=lb_algorithm,
servers_stacks=servers_stacks)

View File

@ -1,36 +0,0 @@
# Copyright 2022 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import typing
from tobiko.openstack.octavia import _client
LoadBalancerType = typing.Dict[str, typing.Any]
LoadBalancerIdType = typing.Union[str, LoadBalancerType]
def get_load_balancer_id(load_balancer: LoadBalancerIdType) -> str:
if isinstance(load_balancer, str):
return load_balancer
else:
return load_balancer['id']
def get_load_balancer(load_balancer: LoadBalancerIdType,
client: _client.OctaviaClientType = None) \
-> LoadBalancerType:
load_balancer_id = get_load_balancer_id(load_balancer)
return _client.octavia_client(client).load_balancer_show(
load_balancer_id)

View File

@ -50,7 +50,7 @@ def check_members_balanced(ip_address: str,
' to the function.')
else: # members_count is None and pool_id is not None
members_count = len(octavia.list_members(pool_id=pool_id))
members_count = len(list(octavia.list_members(pool_id=pool_id)))
last_content = None
replies: typing.Dict[str, int] = collections.defaultdict(lambda: 0)

View File

@ -14,10 +14,13 @@
# under the License.
from __future__ import absolute_import
import typing
from oslo_log import log
import tobiko
from tobiko.openstack import octavia
from tobiko.openstack import octavia, openstacksdkclient
from tobiko.openstack.octavia import _constants
from tobiko import config
LOG = log.getLogger(__name__)
@ -25,27 +28,32 @@ LOG = log.getLogger(__name__)
CONF = config.CONF
def wait_for_status(status_key, status, get_client, object_id,
def wait_for_status(object_id: str,
status_key: str = _constants.PROVISIONING_STATUS,
status: str = _constants.ACTIVE,
get_client: typing.Callable = None,
interval: tobiko.Seconds = None,
timeout: tobiko.Seconds = None,
error_ok=False, **kwargs):
timeout: tobiko.Seconds = None, **kwargs):
"""Waits for an object to reach a specific status.
:param object_id: The id of the object to query.
:param status_key: The key of the status field in the response.
Ex. provisioning_status
:param status: The status to wait for. Ex. "ACTIVE"
:param get_client: The tobiko client get method.
Ex. _client.get_loadbalancer
:param object_id: The id of the object to query.
:param interval: How often to check the status, in seconds.
:param timeout: The maximum time, in seconds, to check the status.
:param error_ok: When true, ERROR status will not raise an exception.
:raises TimeoutException: The object did not achieve the status or ERROR in
the check_timeout period.
:raises UnexpectedStatusException: The request returned an unexpected
response code.
"""
if not get_client:
os_sdk_client = openstacksdkclient.openstacksdk_client()
get_client = os_sdk_client.load_balancer.get_load_balancer
for attempt in tobiko.retry(timeout=timeout,
interval=interval,
default_timeout=(
@ -56,13 +64,26 @@ def wait_for_status(status_key, status, get_client, object_id,
if response[status_key] == status:
return response
if response[status_key] == octavia.ERROR and not error_ok:
message = ('{name} {field} was updated to an invalid state of '
'ERROR'.format(name=get_client.__name__,
field=status_key))
raise octavia.RequestException(message)
# it will raise tobiko.RetryTimeLimitError in case of timeout
attempt.check_limits()
LOG.debug(f"Waiting for {get_client.__name__} {status_key} to get "
f"from '{response[status_key]}' to '{status}'...")
def wait_for_octavia_service(interval: tobiko.Seconds = None,
timeout: tobiko.Seconds = None):
for attempt in tobiko.retry(timeout=timeout,
interval=interval,
default_timeout=180.,
default_interval=5.):
try: # Call any Octavia API
octavia.list_amphorae()
except octavia.OctaviaClientException as ex:
LOG.debug(f"Error listing amphorae: {ex}")
if attempt.is_last:
raise
LOG.info('Waiting for the LB to become functional again...')
else:
LOG.info('Octavia service is available!')
break

View File

@ -83,25 +83,8 @@ AffinityServerGroupStackFixture = _nova.AffinityServerGroupStackFixture
AntiAffinityServerGroupStackFixture = _nova.AntiAffinityServerGroupStackFixture
CloudInitServerStackFixture = _nova.CloudInitServerStackFixture
# Octavia resources: Amphora provider resources
AmphoraIPv4LoadBalancerStack = _octavia.AmphoraIPv4LoadBalancerStack
AmphoraIPv6LoadBalancerStack = _octavia.AmphoraIPv6LoadBalancerStack
HttpRoundRobinAmphoraIpv4Listener = _octavia.HttpRoundRobinAmphoraIpv4Listener
HttpRoundRobinAmphoraIpv6Listener = _octavia.HttpRoundRobinAmphoraIpv6Listener
HttpLeastConnectionAmphoraIpv4Listener = (
_octavia.HttpLeastConnectionAmphoraIpv4Listener)
HttpLeastConnectionAmphoraIpv6Listener = (
_octavia.HttpLeastConnectionAmphoraIpv6Listener)
HttpSourceIpAmphoraIpv4Listener = _octavia.HttpSourceIpAmphoraIpv4Listener
HttpSourceIpAmphoraIpv6Listener = _octavia.HttpSourceIpAmphoraIpv6Listener
# Octavia resources: Ovn provider resources
OVNIPv4LoadBalancerStack = _octavia.OVNIPv4LoadBalancerStack
OVNIPv6LoadBalancerStack = _octavia.OVNIPv6LoadBalancerStack
TcpSourceIpPortOvnIpv4Listener = _octavia.TcpSourceIpPortOvnIpv4Listener
TcpSourceIpPortOvnIpv6Listener = _octavia.TcpSourceIpPortOvnIpv6Listener
# Octavia resources: backend servers
OctaviaOtherServerStackFixture = _octavia.OctaviaOtherServerStackFixture
QosNetworkStackFixture = _qos.QosNetworkStackFixture
QosPolicyStackFixture = _qos.QosPolicyStackFixture

View File

@ -15,293 +15,14 @@
# under the License.
from __future__ import absolute_import
from oslo_log import log
import tobiko
from tobiko import config
from tobiko.openstack import heat
from tobiko.openstack import octavia
from tobiko.openstack import neutron
from tobiko.openstack.stacks import _hot
from tobiko.openstack.stacks import _neutron
from tobiko.openstack.stacks import _ubuntu
from tobiko.shell import sh
CONF = config.CONF
LOG = log.getLogger(__name__)
class AmphoraIPv4LoadBalancerStack(heat.HeatStackFixture):
template = _hot.heat_template_file('octavia/load_balancer.yaml')
vip_network = tobiko.required_fixture(_neutron.NetworkStackFixture)
#: Floating IP network where the Neutron floating IP are created
@property
def floating_network(self) -> str:
return self.vip_network.floating_network
@property
def has_floating_ip(self) -> bool:
return bool(self.floating_network)
ip_version = 4
provider = 'amphora'
@property
def vip_subnet_id(self):
if self.ip_version == 4:
return self.vip_network.ipv4_subnet_id
else:
return self.vip_network.ipv6_subnet_id
def wait_for_active_loadbalancer(self,
timeout: tobiko.Seconds = None):
octavia.wait_for_status(status_key=octavia.PROVISIONING_STATUS,
status=octavia.ACTIVE,
get_client=octavia.get_load_balancer,
object_id=self.loadbalancer_id,
timeout=timeout)
def wait_for_update_loadbalancer(self,
timeout: tobiko.Seconds = None):
octavia.wait_for_status(status_key=octavia.PROVISIONING_STATUS,
status=octavia.PENDING_UPDATE,
get_client=octavia.get_load_balancer,
object_id=self.loadbalancer_id,
timeout=timeout)
def wait_for_octavia_service(self,
interval: tobiko.Seconds = None,
timeout: tobiko.Seconds = None,
client=None):
for attempt in tobiko.retry(timeout=timeout,
interval=interval,
default_timeout=180.,
default_interval=5.):
try:
octavia.list_amphorae(loadbalancer_id=self.loadbalancer_id,
client=client)
except octavia.OctaviaClientException as ex:
LOG.debug(f"Error listing amphorae: {ex}")
if attempt.is_last:
raise
LOG.info('Waiting for the LB to become functional again...')
else:
LOG.info('Octavia service is available!')
break
class AmphoraIPv6LoadBalancerStack(AmphoraIPv4LoadBalancerStack):
ip_version = 6
class OctaviaOtherServerStackFixture(_ubuntu.UbuntuServerStackFixture):
pass
class HttpRoundRobinAmphoraIpv4Listener(heat.HeatStackFixture):
template = _hot.heat_template_file('octavia/listener.yaml')
loadbalancer = tobiko.required_fixture(
AmphoraIPv4LoadBalancerStack)
lb_port = 80
lb_protocol = 'HTTP'
@property
def loadbalancer_id(self):
return self.loadbalancer.loadbalancer_id
@property
def loadbalancer_provider(self):
return self.loadbalancer.provider
# Pool attributes
pool_protocol = 'HTTP'
lb_algorithm = 'ROUND_ROBIN'
# healthmonitor attributes
hm_type = 'HTTP'
hm_delay = 3
hm_max_retries = 4
hm_timeout = 3
#: whenever to create the health monitor
has_monitor = True
@property
def listener_id(self):
return self.listener.listener_id
def wait_for_active_members(self):
"""Wait for all pool members to be active"""
for member in octavia.list_members(pool_id=self.pool_id):
self.wait_for_active_member(pool_id=self.pool_id,
member_id=member['id'])
def wait_for_active_member(self, pool_id, member_id, **kwargs):
"""Wait for the member to be active
Waits for the member to have an ACTIVE provisioning status.
:param member_id: the member id.
:param pool_id: the pool id.
"""
octavia.wait_for_status(status_key=octavia.PROVISIONING_STATUS,
status=octavia.ACTIVE,
get_client=octavia.get_member,
object_id=pool_id,
member_id=member_id, **kwargs)
def wait_for_members_to_be_reachable(self,
interval: tobiko.Seconds = None,
timeout: tobiko.Seconds = None):
members = [self.server_stack, self.other_server_stack]
if len(members) < 1:
return
# Wait for members to be reachable from localhost
last_reached_id = 0
for attempt in tobiko.retry(
timeout=timeout,
interval=interval,
default_interval=5.,
default_timeout=members[0].wait_timeout):
try:
for member in members[last_reached_id:]:
octavia.check_members_balanced(
members_count=1,
ip_address=member.ip_address,
protocol=self.lb_protocol,
port=self.lb_port,
requests_count=1)
last_reached_id += 1 # prevent retrying same member again
except sh.ShellCommandFailed:
if attempt.is_last:
raise
LOG.info(
"Waiting for members to have HTTP service available...")
continue
else:
break
else:
raise RuntimeError("Members couldn't be reached!")
# Members attributes
server_stack = tobiko.required_fixture(_ubuntu.UbuntuServerStackFixture)
other_server_stack = tobiko.required_fixture(
OctaviaOtherServerStackFixture)
application_port = 80
ip_version = 4
@property
def pool_id(self):
return self.pool.pool_id
@property
def subnet_id(self):
network_stack = self.server_stack.network_stack
if self.ip_version == 4:
return network_stack.ipv4_subnet_id
else:
return network_stack.ipv6_subnet_id
@property
def member_address(self) -> str:
return self.get_member_address(self.server_stack)
@property
def other_member_address(self) -> str:
return self.get_member_address(self.other_server_stack)
def get_member_address(self, server_stack):
return str(server_stack.find_fixed_ip(ip_version=self.ip_version))
@property
def amphora(self) -> octavia.AmphoraType:
return octavia.get_master_amphora(
amphorae=octavia.list_amphorae(
self.loadbalancer.loadbalancer_id),
ip_address=self.loadbalancer.floating_ip_address,
port=self.lb_port,
protocol=self.lb_protocol)
@property
def amphora_mgmt_port(self) -> neutron.PortType:
""" Get amphora's management network's port
Attaching a floating ip to the amphora's management port will allow
SSHing the amphora.
Disabling the amphora's management port will cause a failover.
"""
# Calling self.amphora in a loop would decrease performance
amphora = self.amphora
for port in neutron.list_ports():
for fixed_ip in port['fixed_ips']:
if fixed_ip['ip_address'] == amphora['lb_network_ip']:
return port
# This should never happen
raise octavia.AmphoraMgmtPortNotFound(
reason='Could not find the Network Managment Port of'
' amphora {amphora}'.
format(amphora=amphora['id']))
class HttpRoundRobinAmphoraIpv6Listener(HttpRoundRobinAmphoraIpv4Listener):
ip_version = 6
class HttpLeastConnectionAmphoraIpv4Listener(
HttpRoundRobinAmphoraIpv4Listener):
lb_algorithm = 'LEAST_CONNECTIONS'
class HttpLeastConnectionAmphoraIpv6Listener(
HttpLeastConnectionAmphoraIpv4Listener):
ip_version = 6
class HttpSourceIpAmphoraIpv4Listener(HttpRoundRobinAmphoraIpv4Listener):
lb_algorithm = 'SOURCE_IP'
class HttpSourceIpAmphoraIpv6Listener(HttpSourceIpAmphoraIpv4Listener):
ip_version = 6
lb_protocol = 'TCP'
# OVN provider stack fixtures
class OVNIPv4LoadBalancerStack(AmphoraIPv4LoadBalancerStack):
provider = 'ovn'
class OVNIPv6LoadBalancerStack(OVNIPv4LoadBalancerStack):
ip_version = 6
class TcpSourceIpPortOvnIpv4Listener(HttpRoundRobinAmphoraIpv4Listener):
loadbalancer = tobiko.required_fixture(OVNIPv4LoadBalancerStack)
lb_protocol = 'TCP'
hm_type = 'TCP'
lb_algorithm = 'SOURCE_IP_PORT'
pool_protocol = 'TCP'
class TcpSourceIpPortOvnIpv6Listener(TcpSourceIpPortOvnIpv4Listener):
ip_version = 6

View File

@ -1,148 +0,0 @@
heat_template_version: newton
description: A Listener for a Load Balancer
parameters:
lb_port:
type: number
default: 80
description: Port used by the listener
lb_protocol:
type: string
default: HTTP
description: Public protocol exposed by the listener
loadbalancer_id:
type: string
description: ID of the load balancer
# Pool attributes
lb_algorithm:
type: string
default: ROUND_ROBIN
description: Load balancing algorithm
pool_protocol:
type: string
default: HTTP
description: Protocol used by the pool members
# Health monitor parameters
has_monitor:
type: boolean
description: whenever to create the health monitor
default: true
hm_delay:
type: number
default: 10
description: >
The minimum time in seconds between regular connections of the
member.
hm_max_retries:
type: number
default: 4
description: >
Number of permissible connection failures before changing the member
status to INACTIVE.
hm_timeout:
type: number
default: 3
description: >
Maximum number of seconds for a monitor to wait for a connection
to be established before it times out.
hm_type:
type: string
default: HTTP
description: Type of health-monitor
# Member attributes
member_address:
type: string
description: The IP address of the member
other_member_address:
type: string
description: The IP address of the member
subnet_id:
type: string
description: the ID of the subnet used by member
constraints:
- custom_constraint: neutron.subnet
application_port:
type: number
default: 80
description: The port number of the member's application
conditions:
has_monitor:
get_param: has_monitor
resources:
listener:
type: OS::Octavia::Listener
properties:
loadbalancer: { get_param: loadbalancer_id }
protocol: { get_param: lb_protocol }
protocol_port: { get_param: lb_port }
pool:
type: OS::Octavia::Pool
properties:
lb_algorithm: { get_param: lb_algorithm }
protocol: { get_param: pool_protocol }
listener: { get_resource: listener }
monitor:
type: OS::Octavia::HealthMonitor
condition: has_monitor
properties:
delay: { get_param: hm_delay }
type: { get_param: hm_type }
timeout: { get_param: hm_timeout }
max_retries: { get_param: hm_max_retries }
pool: { get_resource: pool }
member:
type: OS::Octavia::PoolMember
properties:
pool: { get_resource: pool }
address: { get_param: member_address }
subnet: { get_param: subnet_id }
protocol_port: { get_param: application_port }
other_member:
type: OS::Octavia::PoolMember
properties:
pool: { get_resource: pool }
address: { get_param: other_member_address }
subnet: { get_param: subnet_id }
protocol_port: { get_param: application_port }
outputs:
listener_id:
description: Listener ID
value: { get_resource: listener }
pool_id:
description: Pool ID
value: { get_resource: pool }
monitor_id:
description: Healthmonitor ID
value: { get_resource: monitor }
condition: has_monitor
member_id:
description: member ID
value: { get_resource: member }
other_member_id:
description: member ID
value: { get_resource: other_member }

View File

@ -1,61 +0,0 @@
heat_template_version: newton
description: A Load Balancer
parameters:
provider:
type: string
description: The provider type (amphora,ovn, etc.)
vip_subnet_id:
type: string
description: ID of the load balancer public subnet
constraints:
- custom_constraint: neutron.subnet
has_floating_ip:
type: boolean
description: Whenever server has floating IP associated
default: false
floating_network:
type: string
description: |
Public network for which floating IP addresses will be allocated
constraints:
- custom_constraint: neutron.network
conditions:
has_floating_ip:
get_param: has_floating_ip
resources:
loadbalancer:
type: OS::Octavia::LoadBalancer
properties:
vip_subnet: { get_param: vip_subnet_id }
provider: { get_param: provider }
floating_ip:
type: OS::Neutron::FloatingIP
description: Floating IP address to be connected to the load balancer
condition: has_floating_ip
properties:
floating_network: {get_param: floating_network}
port_id: {get_attr: [loadbalancer, vip_port_id]}
outputs:
loadbalancer_id:
description: Load Balancer ID
value: { get_resource: loadbalancer }
loadbalancer_vip:
description: IP address of the load balancer's VIP port
value: { get_attr: [ loadbalancer, vip_address ] }
floating_ip_address:
description: Floating IP address attached to VIP address
value: { get_attr: [ floating_ip, floating_ip_address ] }
condition: has_floating_ip

View File

@ -1,37 +0,0 @@
heat_template_version: 2015-10-15
description: A Load Balancer Member
parameters:
pool_id:
type: string
description: The ID of the load balancer's pool
member_address:
type: string
description: The IP address of the member
subnet_id:
type: string
description: the ID of the subnet used by member
constraints:
- custom_constraint: neutron.subnet
application_port:
type: number
default: 80
description: The port number of the member's application
resources:
member:
type: OS::Octavia::PoolMember
properties:
pool: { get_param: pool_id }
address: { get_param: member_address }
subnet: { get_param: subnet_id }
protocol_port: { get_param: application_port }
outputs:
member_id:
description: member ID
value: { get_resource: member }

View File

@ -1,82 +0,0 @@
heat_template_version: newton
description: A pool for a Load Balancer
parameters:
lb_algorithm:
type: string
default: ROUND_ROBIN
description: Load balancing algorithm
pool_protocol:
type: string
default: HTTP
description: Protocol used by the pool members
listener_id:
type: string
description: ID of the listener
# Health monitor parameters
has_monitor:
type: boolean
description: whenever to create the health monitor
hm_delay:
type: number
default: 10
description: >
The minimum time in seconds between regular connections of the
member.
hm_max_retries:
type: number
default: 4
description: >
Number of permissible connection failures before changing the member
status to INACTIVE.
hm_timeout:
type: number
default: 3
description: >
Maximum number of seconds for a monitor to wait for a connection
to be established before it times out.
hm_type:
type: string
default: HTTP
description: Type of health-monitor
conditions:
has_monitor:
get_param: has_monitor
resources:
pool:
type: OS::Octavia::Pool
properties:
lb_algorithm: { get_param: lb_algorithm }
protocol: { get_param: pool_protocol }
listener: { get_param: listener_id }
monitor:
type: OS::Octavia::HealthMonitor
condition: has_monitor
properties:
delay: { get_param: hm_delay }
type: { get_param: hm_type }
timeout: { get_param: hm_timeout }
max_retries: { get_param: hm_max_retries }
pool: { get_resource: pool }
outputs:
pool_id:
description: Pool ID
value: { get_resource: pool }
monitor_id:
description: Healthmonitor ID
value: { get_resource: monitor }
condition: has_monitor

View File

@ -21,7 +21,6 @@ import tobiko
from tobiko.openstack import keystone
from tobiko.openstack import octavia
from tobiko.openstack import stacks
from tobiko.shell import ssh
from tobiko.shell import sh
from tobiko import tripleo
@ -36,63 +35,57 @@ class OctaviaBasicFaultTest(testtools.TestCase):
Create a load balancer with 2 members that run a server application,
Create a client that is connected to the load balancer VIP port,
Generate network traffic from the client to the load balanacer VIP.
Generate network traffic from the client to the load balancer VIP.
Restart the amphora's compute node to create a failover.
Reach the members to make sure they are ready to be checked.
Generate network traffic again to verify Octavia functionality.
"""
loadbalancer_stack = tobiko.required_fixture(
stacks.AmphoraIPv4LoadBalancerStack)
listener_stack = tobiko.required_fixture(
stacks.HttpRoundRobinAmphoraIpv4Listener)
lb = None
listener = None
pool = None
server_stack = tobiko.required_fixture(
stacks.UbuntuServerStackFixture)
other_server_stack = tobiko.required_fixture(
stacks.OctaviaOtherServerStackFixture)
def setUp(self):
# pylint: disable=no-member
super(OctaviaBasicFaultTest, self).setUp()
# Wait for Octavia objects to be active
LOG.info('Waiting for member '
f'{self.listener_stack.server_stack.stack_name} and '
f'for member '
f'{self.listener_stack.other_server_stack.stack_name} '
f'to be created...')
self.listener_stack.wait_for_active_members()
self.lb, self.listener, self.pool = octavia.deploy_ipv4_amphora_lb(
servers_stacks=[self.server_stack, self.other_server_stack]
)
self.loadbalancer_stack.wait_for_octavia_service()
self.listener_stack.wait_for_members_to_be_reachable()
self._send_http_traffic()
def _send_http_traffic(self):
# For 5 minutes we ignore specific exceptions as we know
# that Octavia resources are being provisioned
for attempt in tobiko.retry(timeout=300.):
try:
octavia.check_members_balanced(
pool_id=self.listener_stack.pool_id,
ip_address=self.loadbalancer_stack.floating_ip_address,
lb_algorithm=self.listener_stack.lb_algorithm,
protocol=self.listener_stack.lb_protocol,
port=self.listener_stack.lb_port)
pool_id=self.pool.id,
ip_address=self.lb.vip_address,
lb_algorithm=self.pool.lb_algorithm,
protocol=self.listener.protocol,
port=self.listener.protocol_port)
break
except (octavia.RoundRobinException,
octavia.TrafficTimeoutError,
sh.ShellCommandFailed):
LOG.exception(f"Traffic didn't reach all members after "
f"#{attempt.number} attempts and "
f"{attempt.elapsed_time} seconds")
LOG.exception(
f"Traffic didn't reach all members after "
f"#{attempt.number} attempts and "
f"{attempt.elapsed_time} seconds")
if attempt.is_last:
raise
@property
def amphora_ssh_client(self) -> ssh.SSHClientType:
return self.listener_stack.amphora_ssh_client
def test_reboot_amphora_compute_node(self):
amphora_compute_host = octavia.get_amphora_compute_node(
load_balancer=self.loadbalancer_stack.loadbalancer_id,
port=self.listener_stack.lb_port,
protocol=self.listener_stack.lb_protocol,
ip_address=self.loadbalancer_stack.floating_ip_address)
load_balancer_id=self.lb.id,
port=self.listener.protocol_port,
protocol=self.listener.protocol,
ip_address=self.lb.vip_address)
LOG.debug('Rebooting compute node...')
@ -103,42 +96,25 @@ class OctaviaBasicFaultTest(testtools.TestCase):
# Wait for the LB to be updated
try:
self.loadbalancer_stack.wait_for_update_loadbalancer(timeout=30)
octavia.wait_for_status(object_id=self.lb.id)
# The reboot_overcloud_node function restarts other running nova
# vms/backend servers after the compute node reboot by default.
# Those restart operations may take longer than the LB transitions into
# PENDING_UPDATE and then into ACTIVE altogether. So if the restarted
# vms will finish their restart process after the LB reaches ACTIVE
# status, the lb will never reach PENDING_UPDATE
except tobiko.RetryTimeLimitError:
LOG.info('The restarted servers reached ACTIVE status after the'
' LB finished its update process, hence no exception is'
' being raised even though the update timeout was'
' reached.')
LOG.info('The restarted servers/backend members reached ACTIVE '
'status after the LB finished its update process, hence '
'no exception is being raised even though the update '
'timeout was reached.')
self.loadbalancer_stack.wait_for_active_loadbalancer()
octavia.wait_for_status(object_id=self.lb.id)
LOG.debug(f'Load Balancer {self.loadbalancer_stack.loadbalancer_id} is'
f' ACTIVE')
LOG.debug(f'Load Balancer {self.lb.id} is ACTIVE')
# Wait for Octavia objects' provisioning status to be ACTIVE
self.listener_stack.wait_for_active_members()
# Verify Octavia functionality
# For 5 minutes we ignore specific exceptions as we know
# that Octavia resources are being provisioned/migrated
for attempt in tobiko.retry(timeout=300.):
try:
octavia.check_members_balanced(
pool_id=self.listener_stack.pool_id,
ip_address=self.loadbalancer_stack.floating_ip_address,
lb_algorithm=self.listener_stack.lb_algorithm,
protocol=self.listener_stack.lb_protocol,
port=self.listener_stack.lb_port)
break
except (octavia.RoundRobinException,
octavia.TrafficTimeoutError,
sh.ShellCommandFailed):
LOG.exception(f"Traffic didn't reach all members after "
f"#{attempt.number} attempts and "
f"{attempt.elapsed_time} seconds")
if attempt.is_last:
raise
self._send_http_traffic()
def test_kill_amphora_agent(self):
"""Kill the MASTER amphora agent
@ -156,14 +132,14 @@ class OctaviaBasicFaultTest(testtools.TestCase):
"ps -ef | awk '/amphora/{print $2}' | head -n 1")
amp_agent_pid = octavia.run_command_on_amphora(
command=amp_agent_pid_command,
lb_id=self.loadbalancer_stack.loadbalancer_id,
lb_fip=self.loadbalancer_stack.floating_ip_address)
lb_id=self.lb.id,
lb_vip=self.lb.vip_address)
LOG.info(f'The amp_agent_pid is {amp_agent_pid}')
octavia.run_command_on_amphora(
command=f'kill -9 {amp_agent_pid}',
lb_id=self.loadbalancer_stack.loadbalancer_id,
lb_fip=self.loadbalancer_stack.floating_ip_address,
lb_id=self.lb.id,
lb_vip=self.lb.vip_address,
sudo=True)
self._wait_for_failover_and_test_functionality()
@ -183,8 +159,8 @@ class OctaviaBasicFaultTest(testtools.TestCase):
octavia.run_command_on_amphora(
command=stop_keepalived_cmd,
lb_id=self.loadbalancer_stack.loadbalancer_id,
lb_fip=self.loadbalancer_stack.floating_ip_address,
lb_id=self.lb.id,
lb_vip=self.lb.vip_address,
sudo=True)
self._wait_for_failover_and_test_functionality()
@ -205,14 +181,14 @@ class OctaviaBasicFaultTest(testtools.TestCase):
"systemctl list-units | awk '/haproxy-/{print $1}'")
amp_haproxy_unit = octavia.run_command_on_amphora(
command=amp_haproxy_unit_command,
lb_id=self.loadbalancer_stack.loadbalancer_id,
lb_fip=self.loadbalancer_stack.floating_ip_address)
lb_id=self.lb.id,
lb_vip=self.lb.vip_address)
LOG.info(f'The amp_haproxy_unit is {amp_haproxy_unit}')
octavia.run_command_on_amphora(
command=f'systemctl stop {amp_haproxy_unit}',
lb_id=self.loadbalancer_stack.loadbalancer_id,
lb_fip=self.loadbalancer_stack.floating_ip_address,
lb_id=self.lb.id,
lb_vip=self.lb.vip_address,
sudo=True)
self._wait_for_failover_and_test_functionality()
@ -220,40 +196,21 @@ class OctaviaBasicFaultTest(testtools.TestCase):
def _skip_if_not_active_standby(self):
"""Skip the test if Octavia doesn't use Active/standby topology
"""
if len(octavia.list_amphorae(
self.loadbalancer_stack.loadbalancer_id)) < 2:
skipping_stmt = 'Skipping the test as it requires ' \
'Active/standby topology.'
if len(list(octavia.list_amphorae(load_balancer_id=self.lb.id))) < 2:
skipping_stmt = ('Skipping the test as it requires '
'Active/standby topology.')
LOG.info(skipping_stmt)
self.skipTest(skipping_stmt)
def _wait_for_failover_and_test_functionality(self):
"""Wait for failover to end and test Octavia functionality"""
self.loadbalancer_stack.wait_for_update_loadbalancer()
self.loadbalancer_stack.wait_for_active_loadbalancer()
octavia.wait_for_status(object_id=self.lb.id,
status=octavia.PENDING_UPDATE,
timeout=30)
LOG.debug(f'Load Balancer {self.loadbalancer_stack.loadbalancer_id} is'
f' ACTIVE')
octavia.wait_for_status(object_id=self.lb.id)
# Wait for Octavia objects' provisioning status to be ACTIVE
self.listener_stack.wait_for_active_members()
LOG.debug(f'Load Balancer {self.lb.id} is ACTIVE')
# For 5 minutes we ignore specific exceptions as we know
# that Octavia resources are being reprovisioned (amphora during a
# failover)
for attempt in tobiko.retry(timeout=300.):
try:
octavia.check_members_balanced(
pool_id=self.listener_stack.pool_id,
ip_address=self.loadbalancer_stack.floating_ip_address,
lb_algorithm=self.listener_stack.lb_algorithm,
protocol=self.listener_stack.lb_protocol,
port=self.listener_stack.lb_port)
break
except octavia.RoundRobinException:
LOG.exception(f"Traffic didn't reach all members after "
f"#{attempt.number} attempts and "
f"{attempt.elapsed_time} seconds")
if attempt.is_last:
raise
self._send_http_traffic()

View File

@ -49,11 +49,13 @@ class OctaviaServicesFaultTest(testtools.TestCase):
Then we test that traffic which is being sent from the client to the LB
is received as expected.
"""
loadbalancer_stack = tobiko.required_fixture(
stacks.AmphoraIPv4LoadBalancerStack)
listener_stack = tobiko.required_fixture(
stacks.HttpRoundRobinAmphoraIpv4Listener)
lb = None
listener = None
pool = None
server_stack = tobiko.required_fixture(
stacks.UbuntuServerStackFixture)
other_server_stack = tobiko.required_fixture(
stacks.OctaviaOtherServerStackFixture)
# ssh clients of the participating TripleO nodes
ssh_clients: typing.List[ssh.SSHClientFixture] = list()
@ -79,34 +81,26 @@ class OctaviaServicesFaultTest(testtools.TestCase):
"service is necessary."
self.skipTest(skip_reason)
# Wait for Octavia objects to be active
LOG.info('Waiting for member '
f'{self.listener_stack.server_stack.stack_name} and '
f'for member '
f'{self.listener_stack.other_server_stack.stack_name} '
f'to be created...')
self.listener_stack.wait_for_active_members()
self.lb, self.listener, self.pool = octavia.deploy_ipv4_amphora_lb(
servers_stacks=[self.server_stack, self.other_server_stack]
)
self.loadbalancer_stack.wait_for_octavia_service()
self._send_http_traffic()
self.listener_stack.wait_for_members_to_be_reachable()
# For 5 minutes we ignore specific exceptions as we know
# that Octavia resources are being provisioned
# Sending initial traffic before we stop octavia services
for attempt in tobiko.retry(timeout=300.):
def _send_http_traffic(self):
# For 30 seconds we ignore the OctaviaClientException as we know
# that Octavia services are being stopped and restarted
for attempt in tobiko.retry(timeout=30.):
try:
octavia.check_members_balanced(
pool_id=self.listener_stack.pool_id,
ip_address=self.loadbalancer_stack.floating_ip_address,
lb_algorithm=self.listener_stack.lb_algorithm,
protocol=self.listener_stack.lb_protocol,
port=self.listener_stack.lb_port)
pool_id=self.pool.id,
ip_address=self.lb.vip_address,
lb_algorithm=self.pool.lb_algorithm,
protocol=self.listener.protocol,
port=self.listener.protocol_port)
break
except (octavia.RoundRobinException,
octavia.TrafficTimeoutError,
sh.ShellCommandFailed):
LOG.exception(f"Traffic didn't reach all members after "
except octavia.OctaviaClientException:
LOG.exception(f"Octavia service was unavailable after "
f"#{attempt.number} attempts and "
f"{attempt.elapsed_time} seconds")
if attempt.is_last:
@ -167,7 +161,7 @@ class OctaviaServicesFaultTest(testtools.TestCase):
if service.unit in octavia.OCTAVIA_SERVICES:
services_on_nodes[service.unit].append(ssh_client)
# Example of the curret services_on_nodes:
# Example of the current services_on_nodes:
# {
# 'tripleo_octavia_worker.service': [ssh_client_of_controller-0,
# ssh_client_of_controller-1],
@ -204,25 +198,9 @@ class OctaviaServicesFaultTest(testtools.TestCase):
sh.stop_systemd_units(service, ssh_client=ssh_client)
LOG.debug(f'We stopped {service} on {ssh_client.host}')
self.loadbalancer_stack.wait_for_octavia_service()
octavia.wait_for_octavia_service()
# For 30 seconds we ignore the OctaviaClientException as we know
# that Octavia services are being stopped and restarted
for attempt in tobiko.retry(timeout=30.):
try:
octavia.check_members_balanced(
pool_id=self.listener_stack.pool_id,
ip_address=self.loadbalancer_stack.floating_ip_address,
lb_algorithm=self.listener_stack.lb_algorithm,
protocol=self.listener_stack.lb_protocol,
port=self.listener_stack.lb_port)
break
except octavia.OctaviaClientException:
LOG.exception(f"Octavia service was unavailable after "
f"#{attempt.number} attempts and "
f"{attempt.elapsed_time} seconds")
if attempt.is_last:
raise
self._send_http_traffic()
def _start_octavia_main_services(self, services_to_stop: dict):
@ -240,9 +218,4 @@ class OctaviaServicesFaultTest(testtools.TestCase):
LOG.debug(f'We started {service} on {ssh_client.host}')
octavia.check_members_balanced(
pool_id=self.listener_stack.pool_id,
ip_address=self.loadbalancer_stack.floating_ip_address,
lb_algorithm=self.listener_stack.lb_algorithm,
protocol=self.listener_stack.lb_protocol,
port=self.listener_stack.lb_port)
self._send_http_traffic()

View File

@ -36,35 +36,29 @@ class OctaviaBasicTrafficScenarioTest(testtools.TestCase):
Create a client that is connected to the load balancer VIP port,
Generate network traffic from the client to the load balanacer.
"""
loadbalancer_stack = tobiko.required_fixture(
stacks.AmphoraIPv4LoadBalancerStack)
listener_stack = tobiko.required_fixture(
stacks.HttpRoundRobinAmphoraIpv4Listener)
lb = None
listener = None
pool = None
server_stack = tobiko.required_fixture(
stacks.UbuntuServerStackFixture)
other_server_stack = tobiko.required_fixture(
stacks.OctaviaOtherServerStackFixture)
def setUp(self):
# pylint: disable=no-member
super(OctaviaBasicTrafficScenarioTest, self).setUp()
# Wait for Octavia objects to be active
LOG.info('Waiting for member '
f'{self.listener_stack.server_stack.stack_name} and '
f'for member '
f'{self.listener_stack.other_server_stack.stack_name} '
f'to be created...')
self.listener_stack.wait_for_active_members()
self.loadbalancer_stack.wait_for_octavia_service()
self.listener_stack.wait_for_members_to_be_reachable()
self.lb, self.listener, self.pool = octavia.deploy_ipv4_amphora_lb(
servers_stacks=[self.server_stack, self.other_server_stack]
)
def test_round_robin_traffic(self):
_test_traffic(
pool_id=self.listener_stack.pool_id,
ip_address=self.loadbalancer_stack.floating_ip_address,
lb_algorithm=self.listener_stack.lb_algorithm,
protocol=self.listener_stack.lb_protocol,
port=self.listener_stack.lb_port)
pool_id=self.pool.id,
ip_address=self.lb.vip_address,
lb_algorithm=self.pool.lb_algorithm,
protocol=self.listener.protocol,
port=self.listener.protocol_port)
def _test_traffic(pool_id: str, ip_address: str, lb_algorithm: str,
@ -107,30 +101,28 @@ class OctaviaOVNProviderTrafficTest(testtools.TestCase):
Create a client that is connected to the load balancer VIP port via FIP,
Generate TCP network traffic from the client to the load balancer FIP.
"""
loadbalancer_stack = tobiko.required_fixture(
stacks.OVNIPv4LoadBalancerStack)
listener_stack = tobiko.required_fixture(
stacks.TcpSourceIpPortOvnIpv4Listener)
lb = None
listener = None
pool = None
server_stack = tobiko.required_fixture(
stacks.UbuntuServerStackFixture)
other_server_stack = tobiko.required_fixture(
stacks.OctaviaOtherServerStackFixture)
def setUp(self):
# pylint: disable=no-member
super(OctaviaOVNProviderTrafficTest, self).setUp()
# Wait for Octavia objects to be active
LOG.info(f'Waiting for member {self.listener_stack.member_id} and '
f'for member {self.listener_stack.other_member_id} '
f'to be created...')
self.listener_stack.wait_for_active_members()
self.loadbalancer_stack.wait_for_octavia_service()
self.lb, self.listener, self.pool = octavia.deploy_ipv4_ovn_lb(
servers_stacks=[self.server_stack, self.other_server_stack]
)
def test_source_ip_port_traffic(self):
"""Send traffic to the load balancer FIP to test source ip port
"""
_test_traffic(
pool_id=self.listener_stack.pool_id,
ip_address=self.loadbalancer_stack.floating_ip_address,
lb_algorithm=self.listener_stack.lb_algorithm,
protocol=self.listener_stack.lb_protocol,
port=self.listener_stack.lb_port)
pool_id=self.pool.id,
ip_address=self.lb.vip_address,
lb_algorithm=self.pool.lb_algorithm,
protocol=self.listener.protocol,
port=self.listener.protocol_port)