Merge "Restructure Octavia stacks"
This commit is contained in:
commit
91b92e684c
@ -34,8 +34,6 @@ get_master_amphora = _client.get_master_amphora
|
||||
|
||||
# Waiters
|
||||
wait_for_status = _waiters.wait_for_status
|
||||
wait_for_members_to_be_reachable = _waiters.wait_for_members_to_be_reachable
|
||||
wait_for_octavia_service = _waiters.wait_for_octavia_service
|
||||
|
||||
# Validators
|
||||
check_members_balanced = _validators.check_members_balanced
|
||||
|
@ -19,7 +19,6 @@ from oslo_log import log
|
||||
import tobiko
|
||||
from tobiko.openstack import octavia
|
||||
from tobiko import config
|
||||
from tobiko.shell import sh
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
@ -67,56 +66,3 @@ def wait_for_status(status_key, status, get_client, object_id,
|
||||
|
||||
LOG.debug(f"Waiting for {get_client.__name__} {status_key} to get "
|
||||
f"from '{response[status_key]}' to '{status}'...")
|
||||
|
||||
|
||||
def wait_for_members_to_be_reachable(members,
|
||||
lb_protocol: str,
|
||||
lb_port: int,
|
||||
interval: tobiko.Seconds = None,
|
||||
timeout: tobiko.Seconds = None,
|
||||
count: int = 10):
|
||||
|
||||
# Wait for members to be reachable from localhost
|
||||
last_reached_id = 0
|
||||
for attempt in tobiko.retry(timeout=timeout,
|
||||
count=count,
|
||||
interval=interval):
|
||||
try:
|
||||
for member in members[last_reached_id:]:
|
||||
octavia.check_members_balanced(
|
||||
members_count=1,
|
||||
ip_address=member.server_stack.ip_address,
|
||||
protocol=lb_protocol,
|
||||
port=lb_port,
|
||||
requests_count=1)
|
||||
last_reached_id += 1 # prevent retrying same member again
|
||||
except sh.ShellCommandFailed:
|
||||
LOG.info("Waiting for members to have HTTP service available...")
|
||||
else:
|
||||
break
|
||||
|
||||
if attempt.is_last:
|
||||
break
|
||||
else:
|
||||
raise RuntimeError("Members couldn't be reached!")
|
||||
|
||||
|
||||
def wait_for_octavia_service(loadbalancer_id: str,
|
||||
interval: tobiko.Seconds = None,
|
||||
timeout: tobiko.Seconds = None,
|
||||
client=None):
|
||||
for attempt in tobiko.retry(timeout=timeout,
|
||||
interval=interval,
|
||||
default_timeout=180.,
|
||||
default_interval=5.):
|
||||
try:
|
||||
octavia.list_amphorae(loadbalancer_id=loadbalancer_id,
|
||||
client=client)
|
||||
except octavia.OctaviaClientException as ex:
|
||||
LOG.debug(f"Error listing amphorae: {ex}")
|
||||
if attempt.is_last:
|
||||
raise
|
||||
LOG.info('Waiting for the LB to become functional again...')
|
||||
else:
|
||||
LOG.info('Octavia service is available!')
|
||||
break
|
||||
|
@ -81,24 +81,25 @@ AffinityServerGroupStackFixture = _nova.AffinityServerGroupStackFixture
|
||||
AntiAffinityServerGroupStackFixture = _nova.AntiAffinityServerGroupStackFixture
|
||||
CloudInitServerStackFixture = _nova.CloudInitServerStackFixture
|
||||
|
||||
OctaviaLoadbalancerStackFixture = _octavia.OctaviaLoadbalancerStackFixture
|
||||
OctaviaListenerStackFixture = _octavia.OctaviaListenerStackFixture
|
||||
OctaviaPoolStackFixture = _octavia.OctaviaPoolStackFixture
|
||||
OctaviaMemberServerStackFixture = _octavia.OctaviaMemberServerStackFixture
|
||||
OctaviaOtherServerStackFixture = _octavia.OctaviaOtherServerStackFixture
|
||||
OctaviaOtherMemberServerStackFixture = (
|
||||
_octavia.OctaviaOtherMemberServerStackFixture)
|
||||
# Ovn Provider resources
|
||||
OctaviaOvnProviderLoadbalancerStackFixture = (
|
||||
_octavia.OctaviaOvnProviderLoadbalancerStackFixture)
|
||||
OctaviaOvnProviderListenerStackFixture = (
|
||||
_octavia.OctaviaOvnProviderListenerStackFixture)
|
||||
OctaviaOvnProviderPoolStackFixture = (
|
||||
_octavia.OctaviaOvnProviderPoolStackFixture)
|
||||
OctaviaOvnProviderMemberServerStackFixture = (
|
||||
_octavia.OctaviaOvnProviderMemberServerStackFixture)
|
||||
OctaviaOvnProviderOtherMemberServerStackFixture = (
|
||||
_octavia.OctaviaOvnProviderOtherMemberServerStackFixture)
|
||||
# Octavia resources: Amphora provider resources
|
||||
AmphoraIPv4LoadBalancerStack = _octavia.AmphoraIPv4LoadBalancerStack
|
||||
AmphoraIPv6LoadBalancerStack = _octavia.AmphoraIPv6LoadBalancerStack
|
||||
|
||||
HttpRoundRobinAmphoraIpv4Listener = _octavia.HttpRoundRobinAmphoraIpv4Listener
|
||||
HttpRoundRobinAmphoraIpv6Listener = _octavia.HttpRoundRobinAmphoraIpv6Listener
|
||||
HttpLeastConnectionAmphoraIpv4Listener = (
|
||||
_octavia.HttpLeastConnectionAmphoraIpv4Listener)
|
||||
HttpLeastConnectionAmphoraIpv6Listener = (
|
||||
_octavia.HttpLeastConnectionAmphoraIpv6Listener)
|
||||
HttpSourceIpAmphoraIpv4Listener = _octavia.HttpSourceIpAmphoraIpv4Listener
|
||||
HttpSourceIpAmphoraIpv6Listener = _octavia.HttpSourceIpAmphoraIpv6Listener
|
||||
|
||||
# Octavia resources: Ovn provider resources
|
||||
OVNIPv4LoadBalancerStack = _octavia.OVNIPv4LoadBalancerStack
|
||||
OVNIPv6LoadBalancerStack = _octavia.OVNIPv6LoadBalancerStack
|
||||
|
||||
TcpSourceIpPortOvnIpv4Listener = _octavia.TcpSourceIpPortOvnIpv4Listener
|
||||
TcpSourceIpPortOvnIpv6Listener = _octavia.TcpSourceIpPortOvnIpv6Listener
|
||||
|
||||
QosNetworkStackFixture = _qos.QosNetworkStackFixture
|
||||
QosPolicyStackFixture = _qos.QosPolicyStackFixture
|
||||
|
@ -15,6 +15,8 @@
|
||||
# under the License.
|
||||
from __future__ import absolute_import
|
||||
|
||||
from oslo_log import log
|
||||
|
||||
import tobiko
|
||||
from tobiko import config
|
||||
from tobiko.openstack import heat
|
||||
@ -22,11 +24,13 @@ from tobiko.openstack import octavia
|
||||
from tobiko.openstack.stacks import _hot
|
||||
from tobiko.openstack.stacks import _neutron
|
||||
from tobiko.openstack.stacks import _ubuntu
|
||||
from tobiko.shell import sh
|
||||
|
||||
CONF = config.CONF
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class OctaviaLoadbalancerStackFixture(heat.HeatStackFixture):
|
||||
class AmphoraIPv4LoadBalancerStack(heat.HeatStackFixture):
|
||||
template = _hot.heat_template_file('octavia/load_balancer.yaml')
|
||||
|
||||
vip_network = tobiko.required_setup_fixture(_neutron.NetworkStackFixture)
|
||||
@ -67,12 +71,40 @@ class OctaviaLoadbalancerStackFixture(heat.HeatStackFixture):
|
||||
object_id=self.loadbalancer_id,
|
||||
timeout=timeout)
|
||||
|
||||
def wait_for_octavia_service(self,
|
||||
interval: tobiko.Seconds = None,
|
||||
timeout: tobiko.Seconds = None,
|
||||
client=None):
|
||||
for attempt in tobiko.retry(timeout=timeout,
|
||||
interval=interval,
|
||||
default_timeout=180.,
|
||||
default_interval=5.):
|
||||
try:
|
||||
octavia.list_amphorae(loadbalancer_id=self.loadbalancer_id,
|
||||
client=client)
|
||||
except octavia.OctaviaClientException as ex:
|
||||
LOG.debug(f"Error listing amphorae: {ex}")
|
||||
if attempt.is_last:
|
||||
raise
|
||||
LOG.info('Waiting for the LB to become functional again...')
|
||||
else:
|
||||
LOG.info('Octavia service is available!')
|
||||
break
|
||||
|
||||
class OctaviaListenerStackFixture(heat.HeatStackFixture):
|
||||
|
||||
class AmphoraIPv6LoadBalancerStack(AmphoraIPv4LoadBalancerStack):
|
||||
ip_version = 6
|
||||
|
||||
|
||||
class OctaviaOtherServerStackFixture(_ubuntu.UbuntuServerStackFixture):
|
||||
pass
|
||||
|
||||
|
||||
class HttpRoundRobinAmphoraIpv4Listener(heat.HeatStackFixture):
|
||||
template = _hot.heat_template_file('octavia/listener.yaml')
|
||||
|
||||
loadbalancer = tobiko.required_setup_fixture(
|
||||
OctaviaLoadbalancerStackFixture)
|
||||
AmphoraIPv4LoadBalancerStack)
|
||||
|
||||
lb_port = 80
|
||||
|
||||
@ -86,20 +118,14 @@ class OctaviaListenerStackFixture(heat.HeatStackFixture):
|
||||
def loadbalancer_provider(self):
|
||||
return self.loadbalancer.provider
|
||||
|
||||
|
||||
class OctaviaPoolStackFixture(heat.HeatStackFixture):
|
||||
template = _hot.heat_template_file('octavia/pool.yaml')
|
||||
|
||||
listener = tobiko.required_setup_fixture(
|
||||
OctaviaListenerStackFixture)
|
||||
|
||||
# Pool attributes
|
||||
pool_protocol = 'HTTP'
|
||||
|
||||
lb_algorithm = 'ROUND_ROBIN'
|
||||
|
||||
# healthmonitor attributes
|
||||
hm_type = 'HTTP'
|
||||
|
||||
# healthmonitor attributes
|
||||
hm_delay = 3
|
||||
|
||||
hm_max_retries = 4
|
||||
@ -114,9 +140,8 @@ class OctaviaPoolStackFixture(heat.HeatStackFixture):
|
||||
return self.listener.listener_id
|
||||
|
||||
def wait_for_active_members(self):
|
||||
pool_id = self.stack.output_show('pool_id')['output']['output_value']
|
||||
for member in octavia.list_members(pool_id=pool_id):
|
||||
self.wait_for_active_member(pool_id=pool_id,
|
||||
for member in octavia.list_members(pool_id=self.pool_id):
|
||||
self.wait_for_active_member(pool_id=self.pool_id,
|
||||
member_id=member['id'])
|
||||
|
||||
def wait_for_active_member(self, pool_id, member_id, **kwargs):
|
||||
@ -133,14 +158,48 @@ class OctaviaPoolStackFixture(heat.HeatStackFixture):
|
||||
object_id=pool_id,
|
||||
member_id=member_id, **kwargs)
|
||||
|
||||
def wait_for_members_to_be_reachable(self,
|
||||
interval: tobiko.Seconds = None,
|
||||
timeout: tobiko.Seconds = None):
|
||||
|
||||
class OctaviaMemberServerStackFixture(heat.HeatStackFixture):
|
||||
template = _hot.heat_template_file('octavia/member.yaml')
|
||||
members = [self.server_stack, self.other_server_stack]
|
||||
|
||||
pool = tobiko.required_fixture(OctaviaPoolStackFixture)
|
||||
if len(members) < 1:
|
||||
return
|
||||
|
||||
# Wait for members to be reachable from localhost
|
||||
last_reached_id = 0
|
||||
for attempt in tobiko.retry(
|
||||
timeout=timeout,
|
||||
interval=interval,
|
||||
default_interval=5.,
|
||||
default_timeout=150.):
|
||||
try:
|
||||
for member in members[last_reached_id:]:
|
||||
octavia.check_members_balanced(
|
||||
members_count=1,
|
||||
ip_address=member.ip_address,
|
||||
protocol=self.lb_protocol,
|
||||
port=self.lb_port,
|
||||
requests_count=1)
|
||||
last_reached_id += 1 # prevent retrying same member again
|
||||
except sh.ShellCommandFailed:
|
||||
if attempt.is_last:
|
||||
raise
|
||||
LOG.info(
|
||||
"Waiting for members to have HTTP service available...")
|
||||
continue
|
||||
else:
|
||||
break
|
||||
else:
|
||||
raise RuntimeError("Members couldn't be reached!")
|
||||
|
||||
# Members attributes
|
||||
server_stack = tobiko.required_fixture(_ubuntu.UbuntuServerStackFixture)
|
||||
|
||||
other_server_stack = tobiko.required_setup_fixture(
|
||||
OctaviaOtherServerStackFixture)
|
||||
|
||||
application_port = 80
|
||||
|
||||
ip_version = 4
|
||||
@ -151,63 +210,65 @@ class OctaviaMemberServerStackFixture(heat.HeatStackFixture):
|
||||
|
||||
@property
|
||||
def subnet_id(self):
|
||||
network_stack = self.server_stack.network_stack
|
||||
if self.ip_version == 4:
|
||||
return self.server_stack.network_stack.ipv4_subnet_id
|
||||
return network_stack.ipv4_subnet_id
|
||||
else:
|
||||
return self.server_stack.network_stack.ipv6_subnet_id
|
||||
return network_stack.ipv6_subnet_id
|
||||
|
||||
@property
|
||||
def member_address(self) -> str:
|
||||
return str(self.server_stack.find_fixed_ip(ip_version=self.ip_version))
|
||||
return self.get_member_address(self.server_stack)
|
||||
|
||||
@property
|
||||
def other_member_address(self) -> str:
|
||||
return self.get_member_address(self.other_server_stack)
|
||||
|
||||
def get_member_address(self, server_stack):
|
||||
return str(server_stack.find_fixed_ip(ip_version=self.ip_version))
|
||||
|
||||
|
||||
class OctaviaOtherServerStackFixture(_ubuntu.UbuntuServerStackFixture):
|
||||
pass
|
||||
class HttpRoundRobinAmphoraIpv6Listener(HttpRoundRobinAmphoraIpv4Listener):
|
||||
ip_version = 6
|
||||
|
||||
|
||||
class OctaviaOtherMemberServerStackFixture(
|
||||
OctaviaMemberServerStackFixture):
|
||||
server_stack = tobiko.required_setup_fixture(
|
||||
OctaviaOtherServerStackFixture)
|
||||
class HttpLeastConnectionAmphoraIpv4Listener(
|
||||
HttpRoundRobinAmphoraIpv4Listener):
|
||||
lb_algorithm = 'LEAST_CONNECTIONS'
|
||||
|
||||
|
||||
# OVN provider stack fixtures
|
||||
class OctaviaOvnProviderLoadbalancerStackFixture(
|
||||
OctaviaLoadbalancerStackFixture):
|
||||
|
||||
provider = 'ovn'
|
||||
class HttpLeastConnectionAmphoraIpv6Listener(
|
||||
HttpLeastConnectionAmphoraIpv4Listener):
|
||||
ip_version = 6
|
||||
|
||||
|
||||
class OctaviaOvnProviderListenerStackFixture(OctaviaListenerStackFixture):
|
||||
class HttpSourceIpAmphoraIpv4Listener(HttpRoundRobinAmphoraIpv4Listener):
|
||||
lb_algorithm = 'SOURCE_IP'
|
||||
|
||||
loadbalancer = tobiko.required_setup_fixture(
|
||||
OctaviaOvnProviderLoadbalancerStackFixture)
|
||||
|
||||
lb_port = 22
|
||||
|
||||
class HttpSourceIpAmphoraIpv6Listener(HttpSourceIpAmphoraIpv4Listener):
|
||||
ip_version = 6
|
||||
lb_protocol = 'TCP'
|
||||
|
||||
|
||||
class OctaviaOvnProviderPoolStackFixture(OctaviaPoolStackFixture):
|
||||
listener = tobiko.required_setup_fixture(
|
||||
OctaviaOvnProviderListenerStackFixture)
|
||||
# OVN provider stack fixtures
|
||||
class OVNIPv4LoadBalancerStack(AmphoraIPv4LoadBalancerStack):
|
||||
provider = 'ovn'
|
||||
|
||||
pool_protocol = 'TCP'
|
||||
|
||||
lb_algorithm = 'SOURCE_IP_PORT'
|
||||
class OVNIPv6LoadBalancerStack(OVNIPv4LoadBalancerStack):
|
||||
ip_version = 6
|
||||
|
||||
#: There is any health monitor available for OVN provider
|
||||
|
||||
class TcpSourceIpPortOvnIpv4Listener(HttpRoundRobinAmphoraIpv4Listener):
|
||||
loadbalancer = tobiko.required_setup_fixture(OVNIPv4LoadBalancerStack)
|
||||
lb_protocol = 'TCP'
|
||||
lb_port = 22
|
||||
has_monitor = False
|
||||
|
||||
|
||||
class OctaviaOvnProviderMemberServerStackFixture(
|
||||
OctaviaMemberServerStackFixture):
|
||||
pool = tobiko.required_setup_fixture(OctaviaOvnProviderPoolStackFixture)
|
||||
|
||||
lb_algorithm = 'SOURCE_IP_PORT'
|
||||
pool_protocol = 'TCP'
|
||||
application_port = 22
|
||||
|
||||
|
||||
class OctaviaOvnProviderOtherMemberServerStackFixture(
|
||||
OctaviaOvnProviderMemberServerStackFixture):
|
||||
server_stack = tobiko.required_setup_fixture(
|
||||
OctaviaOtherServerStackFixture)
|
||||
class TcpSourceIpPortOvnIpv6Listener(TcpSourceIpPortOvnIpv4Listener):
|
||||
ip_version = 6
|
||||
|
@ -1,4 +1,4 @@
|
||||
heat_template_version: 2015-10-15
|
||||
heat_template_version: newton
|
||||
|
||||
description: A Listener for a Load Balancer
|
||||
|
||||
@ -17,6 +17,73 @@ parameters:
|
||||
type: string
|
||||
description: ID of the load balancer
|
||||
|
||||
# Pool attributes
|
||||
lb_algorithm:
|
||||
type: string
|
||||
default: ROUND_ROBIN
|
||||
description: Load balancing algorithm
|
||||
|
||||
pool_protocol:
|
||||
type: string
|
||||
default: HTTP
|
||||
description: Protocol used by the pool members
|
||||
|
||||
# Health monitor parameters
|
||||
has_monitor:
|
||||
type: boolean
|
||||
description: whenever to create the health monitor
|
||||
default: true
|
||||
|
||||
hm_delay:
|
||||
type: number
|
||||
default: 10
|
||||
description: >
|
||||
The minimum time in seconds between regular connections of the
|
||||
member.
|
||||
|
||||
hm_max_retries:
|
||||
type: number
|
||||
default: 4
|
||||
description: >
|
||||
Number of permissible connection failures before changing the member
|
||||
status to INACTIVE.
|
||||
|
||||
hm_timeout:
|
||||
type: number
|
||||
default: 3
|
||||
description: >
|
||||
Maximum number of seconds for a monitor to wait for a connection
|
||||
to be established before it times out.
|
||||
|
||||
hm_type:
|
||||
type: string
|
||||
default: HTTP
|
||||
description: Type of health-monitor
|
||||
|
||||
# Member attributes
|
||||
member_address:
|
||||
type: string
|
||||
description: The IP address of the member
|
||||
|
||||
other_member_address:
|
||||
type: string
|
||||
description: The IP address of the member
|
||||
|
||||
subnet_id:
|
||||
type: string
|
||||
description: the ID of the subnet used by member
|
||||
constraints:
|
||||
- custom_constraint: neutron.subnet
|
||||
|
||||
application_port:
|
||||
type: number
|
||||
default: 80
|
||||
description: The port number of the member's application
|
||||
|
||||
conditions:
|
||||
has_monitor:
|
||||
get_param: has_monitor
|
||||
|
||||
resources:
|
||||
listener:
|
||||
type: OS::Octavia::Listener
|
||||
@ -25,7 +92,57 @@ resources:
|
||||
protocol: { get_param: lb_protocol }
|
||||
protocol_port: { get_param: lb_port }
|
||||
|
||||
pool:
|
||||
type: OS::Octavia::Pool
|
||||
properties:
|
||||
lb_algorithm: { get_param: lb_algorithm }
|
||||
protocol: { get_param: pool_protocol }
|
||||
listener: { get_resource: listener }
|
||||
|
||||
monitor:
|
||||
type: OS::Octavia::HealthMonitor
|
||||
condition: has_monitor
|
||||
properties:
|
||||
delay: { get_param: hm_delay }
|
||||
type: { get_param: hm_type }
|
||||
timeout: { get_param: hm_timeout }
|
||||
max_retries: { get_param: hm_max_retries }
|
||||
pool: { get_resource: pool }
|
||||
|
||||
member:
|
||||
type: OS::Octavia::PoolMember
|
||||
properties:
|
||||
pool: { get_resource: pool }
|
||||
address: { get_param: member_address }
|
||||
subnet: { get_param: subnet_id }
|
||||
protocol_port: { get_param: application_port }
|
||||
|
||||
other_member:
|
||||
type: OS::Octavia::PoolMember
|
||||
properties:
|
||||
pool: { get_resource: pool }
|
||||
address: { get_param: other_member_address }
|
||||
subnet: { get_param: subnet_id }
|
||||
protocol_port: { get_param: application_port }
|
||||
|
||||
outputs:
|
||||
listener_id:
|
||||
description: Listener ID
|
||||
value: { get_resource: listener }
|
||||
|
||||
pool_id:
|
||||
description: Pool ID
|
||||
value: { get_resource: pool }
|
||||
|
||||
monitor_id:
|
||||
description: Healthmonitor ID
|
||||
value: { get_resource: monitor }
|
||||
condition: has_monitor
|
||||
|
||||
member_id:
|
||||
description: member ID
|
||||
value: { get_resource: member }
|
||||
|
||||
other_member_id:
|
||||
description: member ID
|
||||
value: { get_resource: other_member }
|
||||
|
@ -40,37 +40,32 @@ class OctaviaBasicFaultTest(testtools.TestCase):
|
||||
Generate network traffic again to verify Octavia functionality.
|
||||
"""
|
||||
loadbalancer_stack = tobiko.required_setup_fixture(
|
||||
stacks.OctaviaLoadbalancerStackFixture)
|
||||
stacks.AmphoraIPv4LoadBalancerStack)
|
||||
|
||||
listener_stack = tobiko.required_setup_fixture(
|
||||
stacks.OctaviaListenerStackFixture)
|
||||
|
||||
pool_stack = tobiko.required_setup_fixture(
|
||||
stacks.OctaviaPoolStackFixture)
|
||||
|
||||
member1_stack = tobiko.required_setup_fixture(
|
||||
stacks.OctaviaMemberServerStackFixture)
|
||||
|
||||
member2_stack = tobiko.required_setup_fixture(
|
||||
stacks.OctaviaOtherMemberServerStackFixture)
|
||||
stacks.HttpRoundRobinAmphoraIpv4Listener)
|
||||
|
||||
def setUp(self):
|
||||
# pylint: disable=no-member
|
||||
super(OctaviaBasicFaultTest, self).setUp()
|
||||
|
||||
# Wait for Octavia objects to be active
|
||||
LOG.info(f'Waiting for {self.member1_stack.stack_name} and '
|
||||
f'{self.member2_stack.stack_name} to be created...')
|
||||
self.pool_stack.wait_for_active_members()
|
||||
LOG.info('Waiting for member '
|
||||
f'{self.listener_stack.server_stack.stack_name} and '
|
||||
f'for member '
|
||||
f'{self.listener_stack.other_server_stack.stack_name} '
|
||||
f'to be created...')
|
||||
self.listener_stack.wait_for_active_members()
|
||||
|
||||
octavia.wait_for_octavia_service(
|
||||
loadbalancer_id=self.loadbalancer_stack.loadbalancer_id)
|
||||
self.loadbalancer_stack.wait_for_octavia_service()
|
||||
|
||||
self.listener_stack.wait_for_members_to_be_reachable()
|
||||
|
||||
# Send traffic
|
||||
octavia.check_members_balanced(
|
||||
pool_id=self.pool_stack.pool_id,
|
||||
pool_id=self.listener_stack.pool_id,
|
||||
ip_address=self.loadbalancer_stack.floating_ip_address,
|
||||
lb_algorithm=self.pool_stack.lb_algorithm,
|
||||
lb_algorithm=self.listener_stack.lb_algorithm,
|
||||
protocol=self.listener_stack.lb_protocol,
|
||||
port=self.listener_stack.lb_port)
|
||||
|
||||
@ -104,12 +99,12 @@ class OctaviaBasicFaultTest(testtools.TestCase):
|
||||
f' ACTIVE')
|
||||
|
||||
# Wait for Octavia objects' provisioning status to be ACTIVE
|
||||
self.pool_stack.wait_for_active_members()
|
||||
self.listener_stack.wait_for_active_members()
|
||||
|
||||
# Verify Octavia functionality
|
||||
octavia.check_members_balanced(
|
||||
pool_id=self.pool_stack.pool_id,
|
||||
pool_id=self.listener_stack.pool_id,
|
||||
ip_address=self.loadbalancer_stack.floating_ip_address,
|
||||
lb_algorithm=self.pool_stack.lb_algorithm,
|
||||
lb_algorithm=self.listener_stack.lb_algorithm,
|
||||
protocol=self.listener_stack.lb_protocol,
|
||||
port=self.listener_stack.lb_port)
|
||||
|
@ -50,19 +50,10 @@ class OctaviaServicesFaultTest(testtools.TestCase):
|
||||
is received as expected.
|
||||
"""
|
||||
loadbalancer_stack = tobiko.required_setup_fixture(
|
||||
stacks.OctaviaLoadbalancerStackFixture)
|
||||
stacks.AmphoraIPv4LoadBalancerStack)
|
||||
|
||||
listener_stack = tobiko.required_setup_fixture(
|
||||
stacks.OctaviaListenerStackFixture)
|
||||
|
||||
pool_stack = tobiko.required_setup_fixture(
|
||||
stacks.OctaviaPoolStackFixture)
|
||||
|
||||
member1_stack = tobiko.required_setup_fixture(
|
||||
stacks.OctaviaMemberServerStackFixture)
|
||||
|
||||
member2_stack = tobiko.required_setup_fixture(
|
||||
stacks.OctaviaOtherMemberServerStackFixture)
|
||||
stacks.HttpRoundRobinAmphoraIpv4Listener)
|
||||
|
||||
list_octavia_active_units = ('systemctl list-units ' +
|
||||
'--state=active tripleo_octavia_*')
|
||||
@ -80,18 +71,23 @@ class OctaviaServicesFaultTest(testtools.TestCase):
|
||||
skip_reason = "The number of controllers should be 3 for this test"
|
||||
self.skipTest(skip_reason)
|
||||
|
||||
LOG.info(f'Waiting for {self.member1_stack.stack_name} and '
|
||||
f'{self.member2_stack.stack_name} to be created...')
|
||||
self.pool_stack.wait_for_active_members()
|
||||
# Wait for Octavia objects to be active
|
||||
LOG.info('Waiting for member '
|
||||
f'{self.listener_stack.server_stack.stack_name} and '
|
||||
f'for member '
|
||||
f'{self.listener_stack.other_server_stack.stack_name} '
|
||||
f'to be created...')
|
||||
self.listener_stack.wait_for_active_members()
|
||||
|
||||
octavia.wait_for_octavia_service(
|
||||
loadbalancer_id=self.loadbalancer_stack.loadbalancer_id)
|
||||
self.loadbalancer_stack.wait_for_octavia_service()
|
||||
|
||||
self.listener_stack.wait_for_members_to_be_reachable()
|
||||
|
||||
# Sending initial traffic before we stop octavia services
|
||||
octavia.check_members_balanced(
|
||||
pool_id=self.pool_stack.pool_id,
|
||||
pool_id=self.listener_stack.pool_id,
|
||||
ip_address=self.loadbalancer_stack.floating_ip_address,
|
||||
lb_algorithm=self.pool_stack.lb_algorithm,
|
||||
lb_algorithm=self.listener_stack.lb_algorithm,
|
||||
protocol=self.listener_stack.lb_protocol,
|
||||
port=self.listener_stack.lb_port)
|
||||
|
||||
@ -176,10 +172,12 @@ class OctaviaServicesFaultTest(testtools.TestCase):
|
||||
err_msg = f'{service} was not stopped on {controller.name}'
|
||||
self.assertTrue(service not in octavia_active_units, err_msg)
|
||||
|
||||
self.loadbalancer_stack.wait_for_octavia_service()
|
||||
|
||||
octavia.check_members_balanced(
|
||||
pool_id=self.pool_stack.pool_id,
|
||||
pool_id=self.listener_stack.pool_id,
|
||||
ip_address=self.loadbalancer_stack.floating_ip_address,
|
||||
lb_algorithm=self.pool_stack.lb_algorithm,
|
||||
lb_algorithm=self.listener_stack.lb_algorithm,
|
||||
protocol=self.listener_stack.lb_protocol,
|
||||
port=self.listener_stack.lb_port)
|
||||
|
||||
@ -208,8 +206,8 @@ class OctaviaServicesFaultTest(testtools.TestCase):
|
||||
self._make_sure_octavia_services_are_active(controller)
|
||||
|
||||
octavia.check_members_balanced(
|
||||
pool_id=self.pool_stack.pool_id,
|
||||
pool_id=self.listener_stack.pool_id,
|
||||
ip_address=self.loadbalancer_stack.floating_ip_address,
|
||||
lb_algorithm=self.pool_stack.lb_algorithm,
|
||||
lb_algorithm=self.listener_stack.lb_algorithm,
|
||||
protocol=self.listener_stack.lb_protocol,
|
||||
port=self.listener_stack.lb_port)
|
||||
|
@ -42,37 +42,26 @@ class OctaviaBasicTrafficScenarioTest(testtools.TestCase):
|
||||
Generate network traffic from the client to the load balanacer.
|
||||
"""
|
||||
loadbalancer_stack = tobiko.required_setup_fixture(
|
||||
stacks.OctaviaLoadbalancerStackFixture)
|
||||
stacks.AmphoraIPv4LoadBalancerStack)
|
||||
|
||||
listener_stack = tobiko.required_setup_fixture(
|
||||
stacks.OctaviaListenerStackFixture)
|
||||
|
||||
pool_stack = tobiko.required_setup_fixture(
|
||||
stacks.OctaviaPoolStackFixture)
|
||||
|
||||
member1_stack = tobiko.required_setup_fixture(
|
||||
stacks.OctaviaMemberServerStackFixture)
|
||||
|
||||
member2_stack = tobiko.required_setup_fixture(
|
||||
stacks.OctaviaOtherMemberServerStackFixture)
|
||||
stacks.HttpRoundRobinAmphoraIpv4Listener)
|
||||
|
||||
def setUp(self):
|
||||
# pylint: disable=no-member
|
||||
super(OctaviaBasicTrafficScenarioTest, self).setUp()
|
||||
|
||||
# Wait for Octavia objects to be active
|
||||
LOG.info(f'Waiting for {self.member1_stack.stack_name} and '
|
||||
f'{self.member2_stack.stack_name} to be created...')
|
||||
self.pool_stack.wait_for_active_members()
|
||||
LOG.info('Waiting for member '
|
||||
f'{self.listener_stack.server_stack.stack_name} and '
|
||||
f'for member '
|
||||
f'{self.listener_stack.other_server_stack.stack_name} '
|
||||
f'to be created...')
|
||||
self.listener_stack.wait_for_active_members()
|
||||
|
||||
octavia.wait_for_octavia_service(
|
||||
loadbalancer_id=self.loadbalancer_stack.loadbalancer_id)
|
||||
self.loadbalancer_stack.wait_for_octavia_service()
|
||||
|
||||
octavia.wait_for_members_to_be_reachable(
|
||||
members=[self.member1_stack, self.member2_stack],
|
||||
lb_protocol=self.listener_stack.lb_protocol,
|
||||
lb_port=self.listener_stack.lb_port
|
||||
)
|
||||
self.listener_stack.wait_for_members_to_be_reachable()
|
||||
|
||||
def test_round_robin_traffic(self):
|
||||
# For 5 minutes seconds we ignore specific exceptions as we know
|
||||
@ -80,9 +69,9 @@ class OctaviaBasicTrafficScenarioTest(testtools.TestCase):
|
||||
for attempt in tobiko.retry(timeout=300.):
|
||||
try:
|
||||
octavia.check_members_balanced(
|
||||
pool_id=self.pool_stack.pool_id,
|
||||
pool_id=self.listener_stack.pool_id,
|
||||
ip_address=self.loadbalancer_stack.floating_ip_address,
|
||||
lb_algorithm=self.pool_stack.lb_algorithm,
|
||||
lb_algorithm=self.listener_stack.lb_algorithm,
|
||||
protocol=self.listener_stack.lb_protocol,
|
||||
port=self.listener_stack.lb_port)
|
||||
break
|
||||
@ -107,31 +96,22 @@ class OctaviaOVNProviderTrafficTest(testtools.TestCase):
|
||||
Generate network traffic from the client to the load balanacer via ssh.
|
||||
"""
|
||||
loadbalancer_stack = tobiko.required_setup_fixture(
|
||||
stacks.OctaviaOvnProviderLoadbalancerStackFixture)
|
||||
stacks.OVNIPv4LoadBalancerStack)
|
||||
|
||||
listener_stack = tobiko.required_setup_fixture(
|
||||
stacks.OctaviaOvnProviderListenerStackFixture)
|
||||
|
||||
pool_stack = tobiko.required_setup_fixture(
|
||||
stacks.OctaviaOvnProviderPoolStackFixture)
|
||||
|
||||
member1_stack = tobiko.required_setup_fixture(
|
||||
stacks.OctaviaOvnProviderMemberServerStackFixture)
|
||||
|
||||
member2_stack = tobiko.required_setup_fixture(
|
||||
stacks.OctaviaOvnProviderOtherMemberServerStackFixture)
|
||||
stacks.TcpSourceIpPortOvnIpv4Listener)
|
||||
|
||||
def setUp(self):
|
||||
# pylint: disable=no-member
|
||||
super(OctaviaOVNProviderTrafficTest, self).setUp()
|
||||
|
||||
# Wait for Octavia objects to be active
|
||||
LOG.info(f'Waiting for {self.member1_stack.stack_name} and '
|
||||
f'{self.member2_stack.stack_name} to be created...')
|
||||
self.pool_stack.wait_for_active_members()
|
||||
LOG.info(f'Waiting for member {self.listener_stack.member_id} and '
|
||||
f'for member {self.listener_stack.other_member_id} '
|
||||
f'to be created...')
|
||||
self.listener_stack.wait_for_active_members()
|
||||
|
||||
octavia.wait_for_octavia_service(
|
||||
loadbalancer_id=self.loadbalancer_stack.loadbalancer_id)
|
||||
self.loadbalancer_stack.wait_for_octavia_service()
|
||||
|
||||
def test_ssh_traffic(self):
|
||||
"""SSH every member server to get its hostname using a load balancer
|
||||
@ -139,22 +119,23 @@ class OctaviaOVNProviderTrafficTest(testtools.TestCase):
|
||||
username: typing.Optional[str] = None
|
||||
password: typing.Optional[str] = None
|
||||
missing_replies = set()
|
||||
for member_stack in [self.member1_stack, self.member2_stack]:
|
||||
ssh_client = member_stack.server_stack.ssh_client
|
||||
|
||||
for member_server in [self.listener_stack.server_stack,
|
||||
self.listener_stack.other_server_stack]:
|
||||
ssh_client = member_server.ssh_client
|
||||
hostname = sh.get_hostname(ssh_client=ssh_client)
|
||||
missing_replies.add(hostname)
|
||||
if username is None:
|
||||
username = member_stack.server_stack.username
|
||||
username = member_server.username
|
||||
else:
|
||||
self.assertEqual(username,
|
||||
member_stack.server_stack.username,
|
||||
member_server.username,
|
||||
"Not all member servers have the same "
|
||||
"username to login with")
|
||||
if password is None:
|
||||
password = member_stack.server_stack.password
|
||||
password = member_server.password
|
||||
else:
|
||||
self.assertEqual(password,
|
||||
member_stack.server_stack.password,
|
||||
self.assertEqual(password, member_server.password,
|
||||
"Not all member servers have the same "
|
||||
"password to login with")
|
||||
|
||||
@ -182,8 +163,7 @@ class OctaviaOVNProviderTrafficTest(testtools.TestCase):
|
||||
LOG.debug('Reached member server(s):\n'
|
||||
f'{pretty_replies(replies)}')
|
||||
if attempt.is_last:
|
||||
self.fail('Unreached member server(s): '
|
||||
f'{missing_replies}')
|
||||
self.fail('Unreached member server(s): {missing_replies}')
|
||||
else:
|
||||
LOG.debug('Waiting for reaching remaining server(s)... '
|
||||
f'{missing_replies}')
|
||||
|
Loading…
x
Reference in New Issue
Block a user