Refactor Octavia waiters module

This patch refactors the Octavia waiters module in a way of splitting
the waiter functions to have stronger cohesion.

Change-Id: Ic75539cf6f1a54d65cfb87ea312321d03a931a0f
changes/65/814565/13
Omer 1 year ago
parent bdaa011323
commit c5b78df864
  1. 4
      tobiko/openstack/octavia/__init__.py
  2. 42
      tobiko/openstack/octavia/_waiters.py
  3. 39
      tobiko/openstack/stacks/_octavia.py
  4. 33
      tobiko/tests/faults/octavia/test_faults.py
  5. 9
      tobiko/tests/faults/octavia/test_services.py
  6. 17
      tobiko/tests/scenario/octavia/test_traffic.py

@ -35,10 +35,6 @@ get_amphoras_compute_nodes = _client.get_amphoras_compute_nodes
# Waiters
wait_for_status = _waiters.wait_for_status
wait_for_members_to_be_reachable = _waiters.wait_for_members_to_be_reachable
wait_for_active_and_functional_members_and_lb = (
_waiters.wait_for_active_and_functional_members_and_lb)
wait_for_lb_to_be_updated_and_active = (
_waiters.wait_for_lb_to_be_updated_and_active)
wait_for_octavia_service = _waiters.wait_for_octavia_service
# Validators

@ -101,48 +101,6 @@ def wait_for_members_to_be_reachable(members,
raise RuntimeError("Members couldn't be reached!")
def wait_for_active_and_functional_members_and_lb(
members,
pool_id: str,
lb_protocol: str,
lb_port: int,
loadbalancer_id: str,
interval: tobiko.Seconds = None,
timeout: tobiko.Seconds = None):
# Wait for members to have an ACTIVE provisioning status
for member_stack in members:
octavia.wait_for_status(status_key=octavia.PROVISIONING_STATUS,
status=octavia.ACTIVE,
get_client=octavia.get_member,
object_id=pool_id,
member_id=member_stack.member_id)
# Wait for LB to have an ACTIVE provisioning status
octavia.wait_for_status(status_key=octavia.PROVISIONING_STATUS,
status=octavia.ACTIVE,
get_client=octavia.get_loadbalancer,
object_id=loadbalancer_id)
wait_for_members_to_be_reachable(members=members,
lb_protocol=lb_protocol,
lb_port=lb_port,
timeout=timeout,
interval=interval)
def wait_for_lb_to_be_updated_and_active(loadbalancer_id):
octavia.wait_for_status(status_key=octavia.PROVISIONING_STATUS,
status=octavia.PENDING_UPDATE,
get_client=octavia.get_loadbalancer,
object_id=loadbalancer_id)
octavia.wait_for_status(status_key=octavia.PROVISIONING_STATUS,
status=octavia.ACTIVE,
get_client=octavia.get_loadbalancer,
object_id=loadbalancer_id)
def wait_for_octavia_service(loadbalancer_id: str,
interval: tobiko.Seconds = None,
timeout: tobiko.Seconds = None,

@ -15,19 +15,16 @@
# under the License.
from __future__ import absolute_import
from oslo_log import log
import tobiko
from tobiko import config
from tobiko.openstack import heat
from tobiko.openstack import octavia
from tobiko.openstack.stacks import _centos
from tobiko.openstack.stacks import _cirros
from tobiko.openstack.stacks import _hot
from tobiko.openstack.stacks import _neutron
CONF = config.CONF
LOG = log.getLogger(__name__)
class OctaviaVipNetworkStackFixture(_neutron.NetworkStackFixture):
@ -108,6 +105,20 @@ class OctaviaLoadbalancerStackFixture(heat.HeatStackFixture):
else:
return self.vip_network.ipv6_subnet_id
def wait_for_active_loadbalancer(self, **kwargs):
octavia.wait_for_status(status_key=octavia.PROVISIONING_STATUS,
status=octavia.ACTIVE,
get_client=octavia.get_loadbalancer,
object_id=self.loadbalancer.loadbalancer_id,
**kwargs)
def wait_for_update_loadbalancer(self, **kwargs):
octavia.wait_for_status(status_key=octavia.PROVISIONING_STATUS,
status=octavia.PENDING_UPDATE,
get_client=octavia.get_loadbalancer,
object_id=self.loadbalancer.loadbalancer_id,
**kwargs)
class OctaviaListenerStackFixture(heat.HeatStackFixture):
template = _hot.heat_template_file('octavia/listener.yaml')
@ -147,6 +158,26 @@ class OctaviaPoolStackFixture(heat.HeatStackFixture):
def listener_id(self):
return self.listener.listener_id
def wait_for_active_members(self):
pool_id = self.stack.output_show('pool_id')['output']['output_value']
for member in octavia.list_members(pool_id=pool_id):
self.wait_for_active_member(pool_id=pool_id,
member_id=member['id'])
def wait_for_active_member(self, pool_id, member_id, **kwargs):
"""Wait for the member to be active
Waits for the member to have an ACTIVE provisioning status.
:param member_id: the member id.
:param pool_id: the pool id.
"""
octavia.wait_for_status(status_key=octavia.PROVISIONING_STATUS,
status=octavia.ACTIVE,
get_client=octavia.get_member,
object_id=pool_id,
member_id=member_id, **kwargs)
class OctaviaMemberServerStackFixture(heat.HeatStackFixture):
template = _hot.heat_template_file('octavia/member.yaml')

@ -65,11 +65,9 @@ class OctaviaBasicFaultTest(testtools.TestCase):
' implemented yet')
# Wait for Octavia objects to be active
octavia.wait_for_active_members_and_lb(
members=[self.member1_stack.member_id,
self.member2_stack.member_id],
pool_id=self.pool_stack.pool_id,
loadbalancer_id=self.loadbalancer_stack.loadbalancer_id)
LOG.info(f'Waiting for {self.member1_stack.stack_name} and '
f'{self.member2_stack.stack_name} to be created...')
self.pool_stack.wait_for_active_members()
octavia.wait_for_octavia_service(
loadbalancer_id=self.loadbalancer_stack.loadbalancer_id)
@ -93,21 +91,26 @@ class OctaviaBasicFaultTest(testtools.TestCase):
LOG.debug('Compute node has been rebooted')
# Wait for LB to be updated and active
octavia.wait_for_lb_to_be_updated_and_active(
self.loadbalancer_stack.loadbalancer_id)
# Wait for the LB to be updated
try:
self.loadbalancer_stack.wait_for_update_loadbalancer(
loadbalancer_id=self.loadbalancer_stack.loadbalancer_id,
timeout=30)
except tobiko.RetryTimeLimitError:
LOG.info('The restarted servers reached ACTIVE status after the'
' LB finished its update process, hence no exception is'
' being raised even though the update timeout was'
' reached.')
self.loadbalancer_stack.wait_for_active_loadbalancer(
loadbalancer_id=self.loadbalancer_stack.loadbalancer_id)
LOG.debug(f'Load Balancer {self.loadbalancer_stack.loadbalancer_id} is'
f' ACTIVE')
# Wait for Octavia objects' provisioning status to be ACTIVE
octavia.wait_for_active_and_functional_members_and_lb(
members=[self.member1_stack,
self.member2_stack],
pool_id=self.pool_stack.pool_id,
lb_protocol=self.listener_stack.lb_protocol,
lb_port=self.listener_stack.lb_port,
loadbalancer_id=self.loadbalancer_stack.loadbalancer_id)
self.pool_stack.wait_for_active_members()
# Verify Octavia functionality
octavia.check_members_balanced(

@ -80,12 +80,9 @@ class OctaviaServicesFaultTest(testtools.TestCase):
skip_reason = "The number of controllers should be 3 for this test"
self.skipTest(skip_reason)
# Wait for Octavia objects to be active
octavia.wait_for_active_members_and_lb(
members=[self.member1_stack.member_id,
self.member2_stack.member_id],
pool_id=self.pool_stack.pool_id,
loadbalancer_id=self.loadbalancer_stack.loadbalancer_id)
LOG.info(f'Waiting for {self.member1_stack.stack_name} and '
f'{self.member2_stack.stack_name} to be created...')
self.pool_stack.wait_for_active_members()
octavia.wait_for_octavia_service(
loadbalancer_id=self.loadbalancer_stack.loadbalancer_id)

@ -16,6 +16,7 @@ from __future__ import absolute_import
import pytest
import testtools
from oslo_log import log
import tobiko
from tobiko.openstack import keystone
@ -23,6 +24,9 @@ from tobiko.openstack import octavia
from tobiko.openstack import stacks
LOG = log.getLogger(__name__)
@keystone.skip_if_missing_service(name='octavia')
class OctaviaBasicTrafficScenarioTest(testtools.TestCase):
"""Octavia traffic scenario test.
@ -50,15 +54,10 @@ class OctaviaBasicTrafficScenarioTest(testtools.TestCase):
# pylint: disable=no-member
super(OctaviaBasicTrafficScenarioTest, self).setUp()
# Wait for Octavia objects' provisioning status to be ACTIVE
# and reachable
octavia.wait_for_active_and_functional_members_and_lb(
members=[self.member1_stack,
self.member2_stack],
pool_id=self.pool_stack.pool_id,
lb_protocol=self.listener_stack.lb_protocol,
lb_port=self.listener_stack.lb_port,
loadbalancer_id=self.loadbalancer_stack.loadbalancer_id)
# Wait for Octavia objects to be active
LOG.info(f'Waiting for {self.member1_stack.stack_name} and '
f'{self.member2_stack.stack_name} to be created...')
self.pool_stack.wait_for_active_members()
octavia.wait_for_octavia_service(
loadbalancer_id=self.loadbalancer_stack.loadbalancer_id)

Loading…
Cancel
Save