Merge "Add Octavia's main services manipulation test"
This commit is contained in:
commit
019a30867a
@ -26,9 +26,16 @@ octavia_client = _client.octavia_client
|
||||
OctaviaClientFixture = _client.OctaviaClientFixture
|
||||
get_loadbalancer = _client.get_loadbalancer
|
||||
get_member = _client.get_member
|
||||
list_amphorae = _client.list_amphorae
|
||||
get_amphora_vm = _client.get_amphora_vm
|
||||
list_amphoras_compute_nodes = _client.list_amphoras_compute_nodes
|
||||
get_amphoras_compute_nodes = _client.get_amphoras_compute_nodes
|
||||
|
||||
# Waiters
|
||||
wait_for_status = _waiters.wait_for_status
|
||||
wait_for_active_members_and_lb = _waiters.wait_for_active_members_and_lb
|
||||
wait_for_lb_to_be_updated_and_active = (
|
||||
_waiters.wait_for_lb_to_be_updated_and_active)
|
||||
|
||||
# Validators
|
||||
check_members_balanced = _validators.check_members_balanced
|
||||
@ -41,3 +48,16 @@ TimeoutException = _exceptions.TimeoutException
|
||||
PROVISIONING_STATUS = _constants.PROVISIONING_STATUS
|
||||
ACTIVE = _constants.ACTIVE
|
||||
ERROR = _constants.ERROR
|
||||
PENDING_UPDATE = _constants.PENDING_UPDATE
|
||||
WORKER_SERVICE = _constants.WORKER_SERVICE
|
||||
HOUSEKEEPING_SERVICE = _constants.HOUSEKEEPING_SERVICE
|
||||
HM_SERVICE = _constants.HM_SERVICE
|
||||
API_SERVICE = _constants.API_SERVICE
|
||||
WORKER_CONTAINER = _constants.WORKER_CONTAINER
|
||||
HOUSEKEEPING_CONTAINER = _constants.HOUSEKEEPING_CONTAINER
|
||||
HM_CONTAINER = _constants.HM_CONTAINER
|
||||
API_CONTAINER = _constants.API_CONTAINER
|
||||
OCTAVIA_SERVICES = [WORKER_SERVICE, HOUSEKEEPING_SERVICE, HM_SERVICE,
|
||||
API_SERVICE]
|
||||
OCTAVIA_CONTAINERS = [WORKER_CONTAINER, HOUSEKEEPING_CONTAINER, HM_CONTAINER,
|
||||
API_CONTAINER]
|
||||
|
@ -13,11 +13,15 @@
|
||||
# under the License.
|
||||
from __future__ import absolute_import
|
||||
|
||||
import typing
|
||||
|
||||
from octaviaclient.api.v2 import octavia
|
||||
|
||||
import tobiko
|
||||
from tobiko.openstack import _client
|
||||
from tobiko.openstack import keystone
|
||||
from tobiko.openstack import nova
|
||||
from tobiko.openstack import topology
|
||||
|
||||
|
||||
OCTAVIA_CLIENT_CLASSSES = octavia.OctaviaAPI,
|
||||
@ -76,3 +80,57 @@ def get_loadbalancer(loadbalancer_id, client=None):
|
||||
def get_member(pool_id, member_id, client=None):
|
||||
return octavia_client(client).member_show(pool_id=pool_id,
|
||||
member_id=member_id)
|
||||
|
||||
|
||||
def list_amphorae(loadbalancer_id: str, client=None):
|
||||
return octavia_client(client).amphora_list(
|
||||
loadbalancer_id=loadbalancer_id)['amphorae']
|
||||
|
||||
|
||||
def list_amphoras_compute_nodes(load_balancer_id: str, client=None):
|
||||
"""List the compute nodes which host the LB amphoras
|
||||
|
||||
This function finds the Overcloud compute nodes which
|
||||
host the amphoras and returns a list of their instances.
|
||||
"""
|
||||
|
||||
hostnames = set()
|
||||
for amphora in list_amphorae(loadbalancer_id=load_balancer_id,
|
||||
client=client):
|
||||
server = nova.get_server(amphora['compute_id'])
|
||||
hostnames.add(getattr(server, 'OS-EXT-SRV-ATTR:hypervisor_hostname'))
|
||||
return list(hostnames)
|
||||
|
||||
|
||||
def get_amphoras_compute_nodes(load_balancer_id: str, client=None):
|
||||
"""Gets the hostnames/compute nodes which host the LB amphoras
|
||||
|
||||
This function finds the Overcloud compute nodes which
|
||||
host the amphoras and returns a list of their instances.
|
||||
"""
|
||||
|
||||
hostnames = list_amphoras_compute_nodes(load_balancer_id=load_balancer_id,
|
||||
client=client)
|
||||
return topology.list_openstack_nodes(hostnames=hostnames)
|
||||
|
||||
|
||||
def get_amphora_vm(
|
||||
loadbalancer_id: str, client=None) -> typing.Optional[nova.NovaServer]:
|
||||
|
||||
"""Gets the LB's amphora virtual machine.
|
||||
|
||||
When the Octavia's topology is SINGLE, it returns
|
||||
the amphora's only vm.
|
||||
When the Octavia's topology is ACTIVE_STANDBY,
|
||||
it returns the first amphora's vm.
|
||||
It might be MASTER or BACKUP.
|
||||
"""
|
||||
|
||||
amphora_vm_id = list_amphorae(loadbalancer_id, client)[0]['compute_id']
|
||||
|
||||
err_msg = ("Could not find amphora_vm_id for any amphora in " +
|
||||
f"LB {loadbalancer_id}")
|
||||
tobiko_test_case = tobiko.get_test_case()
|
||||
tobiko_test_case.assertTrue(amphora_vm_id, err_msg)
|
||||
|
||||
return nova.get_server(server_id=amphora_vm_id)
|
||||
|
@ -19,3 +19,16 @@ PROVISIONING_STATUS = 'provisioning_status'
|
||||
# Octavia provisioning status
|
||||
ACTIVE = 'ACTIVE'
|
||||
ERROR = 'ERROR'
|
||||
PENDING_UPDATE = 'PENDING_UPDATE'
|
||||
|
||||
# Octavia services
|
||||
WORKER_SERVICE = 'tripleo_octavia_worker.service'
|
||||
HOUSEKEEPING_SERVICE = 'tripleo_octavia_housekeeping.service'
|
||||
HM_SERVICE = 'tripleo_octavia_health_manager.service'
|
||||
API_SERVICE = 'tripleo_octavia_api.service'
|
||||
|
||||
# Octavia containers
|
||||
WORKER_CONTAINER = 'octavia_worker'
|
||||
HOUSEKEEPING_CONTAINER = 'octavia_housekeeping'
|
||||
HM_CONTAINER = 'octavia_health_manager'
|
||||
API_CONTAINER = 'octavia_api'
|
||||
|
@ -66,3 +66,29 @@ def wait_for_status(status_key, status, get_client, object_id,
|
||||
|
||||
LOG.debug(f"Waiting for {get_client.__name__} {status_key} to get "
|
||||
f"from '{response[status_key]}' to '{status}'...")
|
||||
|
||||
|
||||
def wait_for_active_members_and_lb(members, pool_id, loadbalancer_id):
|
||||
for member_id in members:
|
||||
octavia.wait_for_status(status_key=octavia.PROVISIONING_STATUS,
|
||||
status=octavia.ACTIVE,
|
||||
get_client=octavia.get_member,
|
||||
object_id=pool_id, member_id=member_id)
|
||||
|
||||
# Wait for LB is provisioned and ACTIVE
|
||||
octavia.wait_for_status(status_key=octavia.PROVISIONING_STATUS,
|
||||
status=octavia.ACTIVE,
|
||||
get_client=octavia.get_loadbalancer,
|
||||
object_id=loadbalancer_id)
|
||||
|
||||
|
||||
def wait_for_lb_to_be_updated_and_active(loadbalancer_id):
|
||||
octavia.wait_for_status(status_key=octavia.PROVISIONING_STATUS,
|
||||
status=octavia.PENDING_UPDATE,
|
||||
get_client=octavia.get_loadbalancer,
|
||||
object_id=loadbalancer_id)
|
||||
|
||||
octavia.wait_for_status(status_key=octavia.PROVISIONING_STATUS,
|
||||
status=octavia.ACTIVE,
|
||||
get_client=octavia.get_loadbalancer,
|
||||
object_id=loadbalancer_id)
|
||||
|
0
tobiko/tests/faults/octavia/__init__.py
Normal file
0
tobiko/tests/faults/octavia/__init__.py
Normal file
211
tobiko/tests/faults/octavia/test_services.py
Normal file
211
tobiko/tests/faults/octavia/test_services.py
Normal file
@ -0,0 +1,211 @@
|
||||
# Copyright (c) 2021 Red Hat
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
from __future__ import absolute_import
|
||||
|
||||
import typing
|
||||
|
||||
import testtools
|
||||
from oslo_log import log
|
||||
|
||||
import tobiko
|
||||
from tobiko.openstack import keystone
|
||||
from tobiko.openstack import octavia
|
||||
from tobiko.openstack import stacks
|
||||
from tobiko.shell.ssh import SSHClientFixture
|
||||
from tobiko.openstack import topology
|
||||
from tobiko.shell import sh
|
||||
from tobiko.openstack.topology import OpenStackTopologyNode
|
||||
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
@keystone.skip_if_missing_service(name='octavia')
|
||||
class OctaviaServicesFaultTest(testtools.TestCase):
|
||||
"""Octavia services fault test.
|
||||
|
||||
Create a load balancer with 2 members that run a server application,
|
||||
Create a client that is connected to the load balancer VIP port,
|
||||
Generate network traffic from the client to the load balancer while
|
||||
stopping some of the Octavia's services - if the container runtime
|
||||
environment is podman.
|
||||
|
||||
Each service will be running as a single instance.
|
||||
E.g. only one WORKER_SERVICE will run in all controllers, the same for
|
||||
API_SERVICE etc.
|
||||
|
||||
Then we test that traffic which is being sent from the client to the LB
|
||||
is received as expected.
|
||||
"""
|
||||
loadbalancer_stack = tobiko.required_setup_fixture(
|
||||
stacks.OctaviaLoadbalancerStackFixture)
|
||||
|
||||
listener_stack = tobiko.required_setup_fixture(
|
||||
stacks.OctaviaListenerStackFixture)
|
||||
|
||||
pool_stack = tobiko.required_setup_fixture(
|
||||
stacks.OctaviaPoolStackFixture)
|
||||
|
||||
member1_stack = tobiko.required_setup_fixture(
|
||||
stacks.OctaviaMemberServerStackFixture)
|
||||
|
||||
member2_stack = tobiko.required_setup_fixture(
|
||||
stacks.OctaviaOtherMemberServerStackFixture)
|
||||
|
||||
client_stack = tobiko.required_setup_fixture(
|
||||
stacks.OctaviaClientServerStackFixture)
|
||||
|
||||
members_count = 2
|
||||
|
||||
list_octavia_active_units = ('systemctl list-units ' +
|
||||
'--state=active tripleo_octavia_*')
|
||||
|
||||
def setUp(self):
|
||||
# pylint: disable=no-member
|
||||
super(OctaviaServicesFaultTest, self).setUp()
|
||||
|
||||
# Wait for Octavia objects to be active
|
||||
octavia.wait_for_active_members_and_lb(
|
||||
members=[self.member1_stack.member_id,
|
||||
self.member2_stack.member_id],
|
||||
pool_id=self.pool_stack.pool_id,
|
||||
loadbalancer_id=self.loadbalancer_stack.loadbalancer_id)
|
||||
|
||||
# Sending initial traffic before we stop octavia services
|
||||
octavia.check_members_balanced(
|
||||
self.pool_stack, self.client_stack, self.members_count,
|
||||
self.loadbalancer_stack.loadbalancer_vip,
|
||||
self.listener_stack.lb_protocol, self.listener_stack.lb_port)
|
||||
|
||||
def test_services_fault(self):
|
||||
controllers = topology.list_openstack_nodes(group='controller')
|
||||
|
||||
if 3 != len(controllers):
|
||||
skip_reason = "The number of controllers should be 3 for this test"
|
||||
self.skipTest(skip_reason)
|
||||
|
||||
# excluded_services are the services which will be stopped
|
||||
# on each controller
|
||||
excluded_services = {
|
||||
"controller-0": [octavia.API_SERVICE],
|
||||
"controller-1": [octavia.WORKER_SERVICE],
|
||||
"controller-2": [octavia.HM_SERVICE, octavia.HOUSEKEEPING_SERVICE]
|
||||
}
|
||||
|
||||
try:
|
||||
for controller in controllers:
|
||||
self._make_sure_octavia_services_are_active(controller)
|
||||
|
||||
self._stop_octavia_main_services(
|
||||
controller, excluded_services[controller.name])
|
||||
|
||||
finally:
|
||||
self._start_octavia_main_services(controllers)
|
||||
|
||||
def _make_sure_octavia_services_are_active(
|
||||
self, controller: OpenStackTopologyNode):
|
||||
|
||||
actual_services = self._list_octavia_services(controller.ssh_client)
|
||||
for service in octavia.OCTAVIA_SERVICES:
|
||||
err_msg = (f'{service} is inactive on {controller.name}. '
|
||||
+ 'It should have been active')
|
||||
self.assertTrue(service in actual_services, err_msg)
|
||||
LOG.debug("All Octavia services are running")
|
||||
|
||||
def _list_octavia_services(self, ssh_client: SSHClientFixture) -> str:
|
||||
"""Return the octavia services status.
|
||||
|
||||
This method returns the OUTPUT of the command we run to enlist the
|
||||
services.
|
||||
"""
|
||||
|
||||
# Return "list Octavia services" command's output
|
||||
octavia_services = sh.execute(self.list_octavia_active_units,
|
||||
ssh_client=ssh_client, sudo=True).stdout
|
||||
octavia_services_output = f'Octavia units are:\n{octavia_services}'
|
||||
LOG.debug(octavia_services_output)
|
||||
return octavia_services
|
||||
|
||||
def _stop_octavia_main_services(self, controller: OpenStackTopologyNode,
|
||||
excluded_services: typing.List[str]):
|
||||
|
||||
"""Stops the provided octavia services.
|
||||
|
||||
This method stops the provided octavia services, except for the ones
|
||||
which are in excluded_services.
|
||||
After it runs the "stop command" (e.g. `systemctl stop`),
|
||||
it makes sure that the Octavia's stopped services do not appear on
|
||||
the running Octavia services.
|
||||
|
||||
It then sends traffic to validate the Octavia's functionality
|
||||
"""
|
||||
|
||||
# Preparing the services to stop
|
||||
services_to_stop = octavia.OCTAVIA_SERVICES
|
||||
|
||||
if excluded_services:
|
||||
services_to_stop = [service for service in services_to_stop if (
|
||||
service not in excluded_services)]
|
||||
|
||||
# Stopping the Octavia services
|
||||
for service in services_to_stop:
|
||||
command = f"systemctl stop {service}"
|
||||
|
||||
sh.execute(command, ssh_client=controller.ssh_client, sudo=True)
|
||||
|
||||
log_msg = f"Stopping {service} on {controller.name}"
|
||||
LOG.info(log_msg)
|
||||
|
||||
# Making sure the Octavia services were stopped
|
||||
octavia_active_units = self._list_octavia_services(
|
||||
controller.ssh_client)
|
||||
|
||||
for service in services_to_stop:
|
||||
err_msg = f'{service} was not stopped on {controller.name}'
|
||||
self.assertTrue(service not in octavia_active_units, err_msg)
|
||||
|
||||
octavia.check_members_balanced(
|
||||
self.pool_stack, self.client_stack, self.members_count,
|
||||
self.loadbalancer_stack.loadbalancer_vip,
|
||||
self.listener_stack.lb_protocol, self.listener_stack.lb_port)
|
||||
|
||||
def _start_octavia_main_services(
|
||||
self, controllers: typing.List[OpenStackTopologyNode] = None):
|
||||
|
||||
"""Starts the provided octavia services.
|
||||
|
||||
This method starts the provided octavia services.
|
||||
After it runs the "start command" (e.g. `systemctl start`), it makes
|
||||
sure that the Octavia services appear on the active Octavia units.
|
||||
|
||||
It then sends traffic to validate the Octavia's functionality
|
||||
"""
|
||||
|
||||
controllers = controllers or topology.list_openstack_nodes(
|
||||
group='controller')
|
||||
for controller in controllers:
|
||||
|
||||
# Starting the Octavia services
|
||||
for service in octavia.OCTAVIA_SERVICES:
|
||||
sh.execute(f"systemctl start {service}",
|
||||
ssh_client=controller.ssh_client, sudo=True)
|
||||
|
||||
# Making sure the Octavia services were started
|
||||
self._make_sure_octavia_services_are_active(controller)
|
||||
|
||||
octavia.check_members_balanced(
|
||||
self.pool_stack, self.client_stack, self.members_count,
|
||||
self.loadbalancer_stack.loadbalancer_vip,
|
||||
self.listener_stack.lb_protocol, self.listener_stack.lb_port)
|
Loading…
Reference in New Issue
Block a user