Separate listener and pool stacks

So far the listener stack and the pool stack were under the same
class (Listener), which caused an unnecessary coupling between
them as these are separate entities.

This patch separates between the listener stack and the pool
stack by moving the pool part to a new file:
        tobiko/openstack/stacks/octavia/pool.yaml

Change-Id: Ia5e3cca6b543e5907cc875d2e7dbb4f7424d9c9e
This commit is contained in:
oschwart 2021-02-21 17:33:21 +02:00 committed by Federico Ressi
parent 9944076232
commit b9868d44ca
7 changed files with 67 additions and 42 deletions

View File

@ -83,6 +83,7 @@ UbuntuExternalServerStackFixture = _ubuntu.UbuntuExternalServerStackFixture
OctaviaLoadbalancerStackFixture = _octavia.OctaviaLoadbalancerStackFixture
OctaviaListenerStackFixture = _octavia.OctaviaListenerStackFixture
OctaviaPoolStackFixture = _octavia.OctaviaPoolStackFixture
OctaviaMemberServerStackFixture = _octavia.OctaviaMemberServerStackFixture
OctaviaServerStackFixture = _octavia.OctaviaServerStackFixture
OctaviaClientServerStackFixture = _octavia.OctaviaClientServerStackFixture

View File

@ -106,23 +106,34 @@ class OctaviaListenerStackFixture(heat.HeatStackFixture):
lb_port = 80
pool_protocol = 'HTTP'
lb_protocol = 'HTTP'
lb_algorithm = 'ROUND_ROBIN'
hm_type = 'HTTP'
@property
def loadbalancer_id(self):
return self.loadbalancer.loadbalancer_id
class OctaviaPoolStackFixture(heat.HeatStackFixture):
template = _hot.heat_template_file('octavia/pool.yaml')
listener = tobiko.required_setup_fixture(
OctaviaListenerStackFixture)
pool_protocol = 'HTTP'
lb_algorithm = 'ROUND_ROBIN'
hm_type = 'HTTP'
@property
def listener_id(self):
return self.listener.listener_id
class OctaviaMemberServerStackFixture(heat.HeatStackFixture):
template = _hot.heat_template_file('octavia/member.yaml')
listener = tobiko.required_setup_fixture(OctaviaListenerStackFixture)
pool = tobiko.required_setup_fixture(OctaviaPoolStackFixture)
server_stack = tobiko.required_setup_fixture(OctaviaServerStackFixture)
@ -132,7 +143,7 @@ class OctaviaMemberServerStackFixture(heat.HeatStackFixture):
@property
def pool_id(self):
return self.listener.pool_id
return self.pool.pool_id
@property
def subnet_id(self):

View File

@ -1,6 +1,6 @@
heat_template_version: 2015-10-15
description: A Listener and a pool for a Load Balancer
description: A Listener for a Load Balancer
parameters:
lb_port:
@ -13,33 +13,11 @@ parameters:
default: HTTP
description: Public protocol exposed by the listener
lb_algorithm:
type: string
default: ROUND_ROBIN
description: Load balancing algorithm
pool_protocol:
type: string
default: HTTP
description: Protocol used by the pool members
hm_type:
type: string
default: HTTP
description: Type of health-monitor
loadbalancer_id:
type: string
description: ID of the load balancer
resources:
pool:
type: OS::Octavia::Pool
properties:
lb_algorithm: { get_param: lb_algorithm }
protocol: { get_param: pool_protocol }
listener: { get_resource: listener }
listener:
type: OS::Octavia::Listener
properties:
@ -51,7 +29,3 @@ outputs:
listener_id:
description: Listener ID
value: { get_resource: listener }
pool_id:
description: Pool ID
value: { get_resource: pool }

View File

@ -0,0 +1,36 @@
heat_template_version: 2015-10-15
description: A pool for a Load Balancer
parameters:
lb_algorithm:
type: string
default: ROUND_ROBIN
description: Load balancing algorithm
pool_protocol:
type: string
default: HTTP
description: Protocol used by the pool members
hm_type:
type: string
default: HTTP
description: Type of health-monitor
listener_id:
type: string
description: ID of the listener
resources:
pool:
type: OS::Octavia::Pool
properties:
lb_algorithm: { get_param: lb_algorithm }
protocol: { get_param: pool_protocol }
listener: { get_param: listener_id }
outputs:
pool_id:
description: Pool ID
value: { get_resource: pool }

View File

@ -34,6 +34,9 @@ class OctaviaBasicTrafficScenarioTest(octavia_base.OctaviaTest):
listener_stack = tobiko.required_setup_fixture(
stacks.OctaviaListenerStackFixture)
pool_stack = tobiko.required_setup_fixture(
stacks.OctaviaPoolStackFixture)
member1_stack = tobiko.required_setup_fixture(
stacks.OctaviaMemberServerStackFixture)
@ -55,10 +58,10 @@ class OctaviaBasicTrafficScenarioTest(octavia_base.OctaviaTest):
# Wait for members
waiters.wait_for_member_functional(self.client_stack,
self.listener_stack,
self.pool_stack,
self.member1_stack, self.request)
waiters.wait_for_member_functional(self.client_stack,
self.listener_stack,
self.pool_stack,
self.member2_stack, self.request)
# Wait for LB is provisioned and ACTIVE
@ -77,7 +80,7 @@ class OctaviaBasicTrafficScenarioTest(octavia_base.OctaviaTest):
return self.loadbalancer_stack
def test_traffic(self):
self.check_members_balanced(self.listener_stack,
self.check_members_balanced(self.pool_stack,
self.client_stack,
self.members_count,
self.loadbalancer_vip,

View File

@ -52,7 +52,7 @@ class Validators(base.TobikoTest):
return ret.stdout
def check_members_balanced(self, listener_stack, client_stack,
def check_members_balanced(self, pool_stack, client_stack,
members_count,
loadbalancer_vip, loadbalancer_protocol,
loadbalancer_port):
@ -80,7 +80,7 @@ class Validators(base.TobikoTest):
'The number of detected active members:{} is not '
'as expected:{}'.format(len(replies), members_count))
if listener_stack.lb_algorithm == 'ROUND_ROBIN':
if pool_stack.lb_algorithm == 'ROUND_ROBIN':
# assert that requests have been fairly dispatched (each server
# received the same number of requests)
self.assertEqual(1, len(set(replies.values())),

View File

@ -124,13 +124,13 @@ def wait_for_loadbalancer_functional(loadbalancer_stack, client_stack,
request_function)
def wait_for_member_functional(client_stack, listener_stack, member_stack,
def wait_for_member_functional(client_stack, pool_stack, member_stack,
request_function):
"""Wait until a member server is functional."""
member_ip = member_stack.server_stack.floating_ip_address
member_port = member_stack.application_port
member_protocol = listener_stack.pool_protocol
member_protocol = pool_stack.pool_protocol
wait_for_request_data(client_stack, member_ip, member_protocol,
member_port, request_function)