Remove tests requiring Neutron LBaaS

Neutron LBaaS was retired during train cycle and was replaced by
octavia. Because we no longer support train, it does not make any
sense to maintain tests requiring that removed component.

Change-Id: I9cf7532d51843698b63bae2ee825f683f07e2df8
This commit is contained in:
Takashi Kajinami 2023-09-22 15:01:50 +09:00
parent 26f59bd29a
commit 5047e210dc
7 changed files with 0 additions and 769 deletions

View File

@ -1,159 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from heat_tempest_plugin.tests.functional import functional_base
class LoadBalancerv2Test(functional_base.FunctionalTestsBase):
create_template = '''
heat_template_version: 2016-04-08
parameters:
subnet:
type: string
resources:
loadbalancer:
type: OS::Neutron::LBaaS::LoadBalancer
properties:
description: aLoadBalancer
vip_subnet: { get_param: subnet }
listener:
type: OS::Neutron::LBaaS::Listener
properties:
description: aListener
loadbalancer: { get_resource: loadbalancer }
protocol: HTTP
protocol_port: 80
connection_limit: 5555
pool:
type: OS::Neutron::LBaaS::Pool
properties:
description: aPool
lb_algorithm: ROUND_ROBIN
protocol: HTTP
listener: { get_resource: listener }
poolmember:
type: OS::Neutron::LBaaS::PoolMember
properties:
address: 1.1.1.1
pool: { get_resource: pool }
protocol_port: 1111
subnet: { get_param: subnet }
weight: 255
# pm2
healthmonitor:
type: OS::Neutron::LBaaS::HealthMonitor
properties:
delay: 3
type: HTTP
timeout: 3
max_retries: 3
pool: { get_resource: pool }
outputs:
loadbalancer:
value: { get_attr: [ loadbalancer, show ] }
pool:
value: { get_attr: [ pool, show ] }
poolmember:
value: { get_attr: [ poolmember, show ] }
listener:
value: { get_attr: [ listener, show ] }
healthmonitor:
value: { get_attr: [ healthmonitor, show ] }
'''
add_member = '''
poolmember2:
type: OS::Neutron::LBaaS::PoolMember
properties:
address: 2.2.2.2
pool: { get_resource: pool }
protocol_port: 2222
subnet: { get_param: subnet }
weight: 222
'''
def setUp(self):
super(LoadBalancerv2Test, self).setUp()
if not self.is_network_extension_supported('lbaasv2'):
self.skipTest('LBaasv2 extension not available, skipping')
@decorators.idempotent_id('2f4a476c-cba7-448b-a7c1-85f7284f0293')
def test_create_update_loadbalancer(self):
parameters = {
'subnet': self.conf.fixed_subnet_name,
}
stack_identifier = self.stack_create(template=self.create_template,
parameters=parameters)
stack = self.client.stacks.get(stack_identifier)
output = self._stack_output(stack, 'loadbalancer')
self.assertEqual('ONLINE', output['operating_status'])
template = self.create_template.replace('ROUND_ROBIN', 'SOURCE_IP')
template = template.replace('3', '6')
template = template.replace('255', '256')
template = template.replace('5555', '7777')
template = template.replace('aLoadBalancer', 'updatedLoadBalancer')
template = template.replace('aPool', 'updatedPool')
template = template.replace('aListener', 'updatedListener')
self.update_stack(stack_identifier, template=template,
parameters=parameters)
stack = self.client.stacks.get(stack_identifier)
output = self._stack_output(stack, 'loadbalancer')
self.assertEqual('ONLINE', output['operating_status'])
self.assertEqual('updatedLoadBalancer', output['description'])
output = self._stack_output(stack, 'pool')
self.assertEqual('SOURCE_IP', output['lb_algorithm'])
self.assertEqual('updatedPool', output['description'])
output = self._stack_output(stack, 'poolmember')
self.assertEqual(256, output['weight'])
output = self._stack_output(stack, 'healthmonitor')
self.assertEqual(6, output['delay'])
self.assertEqual(6, output['timeout'])
self.assertEqual(6, output['max_retries'])
output = self._stack_output(stack, 'listener')
self.assertEqual(7777, output['connection_limit'])
self.assertEqual('updatedListener', output['description'])
@decorators.idempotent_id('104f59ae-a3c8-4c12-98e5-a7bc0007878d')
def test_add_delete_poolmember(self):
parameters = {
'subnet': self.conf.fixed_subnet_name,
}
stack_identifier = self.stack_create(template=self.create_template,
parameters=parameters)
stack = self.client.stacks.get(stack_identifier)
output = self._stack_output(stack, 'loadbalancer')
self.assertEqual('ONLINE', output['operating_status'])
output = self._stack_output(stack, 'pool')
self.assertEqual(1, len(output['members']))
# add pool member
template = self.create_template.replace('# pm2', self.add_member)
self.update_stack(stack_identifier, template=template,
parameters=parameters)
stack = self.client.stacks.get(stack_identifier)
output = self._stack_output(stack, 'loadbalancer')
self.assertEqual('ONLINE', output['operating_status'])
output = self._stack_output(stack, 'pool')
self.assertEqual(2, len(output['members']))
# delete pool member
self.update_stack(stack_identifier, template=self.create_template,
parameters=parameters)
stack = self.client.stacks.get(stack_identifier)
output = self._stack_output(stack, 'loadbalancer')
self.assertEqual('ONLINE', output['operating_status'])
output = self._stack_output(stack, 'pool')
self.assertEqual(1, len(output['members']))

View File

@ -1,76 +0,0 @@
heat_template_version: 2015-10-15
description: |
App server that is a member of Neutron Pool.
parameters:
image:
type: string
flavor:
type: string
net:
type: string
sec_group:
type: string
pool:
type: string
app_port:
type: number
timeout:
type: number
subnet:
type: string
wc_extra_args:
type: string
default: ""
resources:
config:
type: OS::Test::WebAppConfig
properties:
app_port: { get_param: app_port }
wc_curl_cli:
list_join:
- " "
- [ get_attr: [ handle, curl_cli ], get_param: wc_extra_args ]
server:
type: OS::Nova::Server
properties:
image: { get_param: image }
flavor: { get_param: flavor }
networks:
- network: { get_param: net }
security_groups:
- { get_param: sec_group }
user_data_format: RAW
user_data: { get_resource: config }
handle:
type: OS::Heat::WaitConditionHandle
waiter:
type: OS::Heat::WaitCondition
depends_on: server
properties:
timeout: { get_param: timeout }
handle: { get_resource: handle }
pool_member:
type: OS::Neutron::LBaaS::PoolMember
depends_on: waiter
properties:
address: { get_attr: [ server, networks, { get_param: net }, 0 ] }
pool: { get_param: pool }
protocol_port: { get_param: app_port }
subnet: { get_param: subnet }

View File

@ -1,69 +0,0 @@
heat_template_version: pike
parameters:
image:
type: string
flavor:
type: string
network:
type: string
sec_group:
type: string
pool:
type: string
app_port:
type: number
timeout:
type: number
default: 120
subnet:
type: string
resources:
server:
type: OS::Nova::Server
properties:
image: {get_param: image}
flavor: {get_param: flavor}
networks:
- network: {get_param: network}
security_groups:
- {get_param: sec_group}
user_data_format: RAW
user_data:
str_replace:
template: |
#! /bin/sh -v
Body=$(hostname)
Response="HTTP/1.1 200 OK\r\nContent-Length: ${#Body}\r\n\r\n$Body"
while true; do
wc_notify --data-binary '{"status": "SUCCESS"}'
if [ $? -eq 0 ]; then
break
fi
sleep 10
done
while true ; do
echo -e $Response | nc -llp PORT
done
params:
PORT: {get_param: app_port}
wc_notify: { get_attr: [handle, curl_cli]}
handle:
type: OS::Heat::WaitConditionHandle
waiter:
type: OS::Heat::WaitCondition
depends_on: server
properties:
timeout: {get_param: timeout}
handle: {get_resource: handle}
pool_member:
type: OS::Octavia::PoolMember
depends_on: waiter
properties:
address: {get_attr: [server, networks, {get_param: network}, 0]}
pool: {get_param: pool}
protocol_port: {get_param: app_port}
subnet: {get_param: subnet}

View File

@ -1,117 +0,0 @@
heat_template_version: 2015-04-30
description: |
Template which tests Neutron load balancing requests to members of
Heat AutoScalingGroup.
Instances must be running some webserver on a given app_port
producing HTTP response that is different between servers
but stable over time for given server.
parameters:
flavor:
type: string
image:
type: string
net:
type: string
subnet:
type: string
public_net:
type: string
app_port:
type: number
default: 8080
lb_port:
type: number
default: 80
timeout:
type: number
default: 600
wc_extra_args:
type: string
default: ""
resources:
sec_group:
type: OS::Neutron::SecurityGroup
properties:
rules:
- remote_ip_prefix: 0.0.0.0/0
protocol: tcp
port_range_min: { get_param: app_port }
port_range_max: { get_param: app_port }
asg:
type: OS::Heat::AutoScalingGroup
properties:
desired_capacity: 1
max_size: 2
min_size: 1
resource:
type: OS::Test::NeutronAppServer
properties:
image: { get_param: image }
flavor: { get_param: flavor }
net: { get_param: net}
sec_group: { get_resource: sec_group }
app_port: { get_param: app_port }
pool_id: { get_resource: pool }
timeout: { get_param: timeout }
wc_extra_args: { get_param: wc_extra_args }
scale_up:
type: OS::Heat::ScalingPolicy
properties:
adjustment_type: change_in_capacity
auto_scaling_group_id: { get_resource: asg }
scaling_adjustment: 1
scale_down:
type: OS::Heat::ScalingPolicy
properties:
adjustment_type: change_in_capacity
auto_scaling_group_id: { get_resource: asg }
scaling_adjustment: -1
health_monitor:
type: OS::Neutron::HealthMonitor
properties:
delay: 3
type: HTTP
timeout: 3
max_retries: 3
pool:
type: OS::Neutron::Pool
properties:
lb_method: ROUND_ROBIN
protocol: HTTP
subnet: { get_param: subnet }
monitors:
- { get_resource: health_monitor }
vip:
protocol_port: { get_param: lb_port }
floating_ip:
type: OS::Neutron::FloatingIP
properties:
floating_network: { get_param: public_net }
port_id:
{ get_attr: [pool, vip, 'port_id'] }
loadbalancer:
type: OS::Neutron::LoadBalancer
properties:
pool_id: { get_resource: pool }
protocol_port: { get_param: app_port }
outputs:
lburl:
description: URL of the loadbalanced app
value:
str_replace:
template: http://IP_ADDRESS:PORT
params:
IP_ADDRESS: { get_attr: [ floating_ip, floating_ip_address ] }
PORT: { get_param: lb_port }

View File

@ -1,120 +0,0 @@
heat_template_version: 2015-04-30
description: |
Template which tests Neutron load balancing requests to members of
Heat AutoScalingGroup. This uses LBaas V2.
Instances must be running some webserver on a given app_port
producing HTTP response that is different between servers
but stable over time for given server.
parameters:
flavor:
type: string
image:
type: string
net:
type: string
subnet:
type: string
public_net:
type: string
app_port:
type: number
default: 8080
lb_port:
type: number
default: 80
timeout:
type: number
default: 600
wc_extra_args:
type: string
default: ""
resources:
sec_group:
type: OS::Neutron::SecurityGroup
properties:
rules:
- remote_ip_prefix: 0.0.0.0/0
protocol: tcp
port_range_min: { get_param: app_port }
port_range_max: { get_param: app_port }
asg:
type: OS::Heat::AutoScalingGroup
properties:
desired_capacity: 1
max_size: 2
min_size: 1
resource:
type: OS::Test::NeutronAppServer
properties:
image: { get_param: image }
flavor: { get_param: flavor }
net: { get_param: net}
sec_group: { get_resource: sec_group }
app_port: { get_param: app_port }
pool: { get_resource: pool }
subnet: { get_param: subnet }
timeout: { get_param: timeout }
wc_extra_args: { get_param: wc_extra_args }
scale_up:
type: OS::Heat::ScalingPolicy
properties:
adjustment_type: change_in_capacity
auto_scaling_group_id: { get_resource: asg }
scaling_adjustment: 1
scale_down:
type: OS::Heat::ScalingPolicy
properties:
adjustment_type: change_in_capacity
auto_scaling_group_id: { get_resource: asg }
scaling_adjustment: -1
health_monitor:
type: OS::Neutron::LBaaS::HealthMonitor
properties:
delay: 3
type: HTTP
timeout: 3
max_retries: 3
pool: { get_resource: pool }
pool:
type: OS::Neutron::LBaaS::Pool
properties:
lb_algorithm: ROUND_ROBIN
protocol: HTTP
listener: { get_resource: listener }
listener:
type: OS::Neutron::LBaaS::Listener
properties:
loadbalancer: { get_resource: loadbalancer }
protocol: HTTP
protocol_port: { get_param: lb_port }
loadbalancer:
type: OS::Neutron::LBaaS::LoadBalancer
properties:
vip_subnet: { get_param: subnet }
floating_ip:
type: OS::Neutron::FloatingIP
properties:
floating_network: { get_param: public_net }
port_id: { get_attr: [loadbalancer, vip_port_id] }
outputs:
lburl:
description: URL of the loadbalanced app
value:
str_replace:
template: http://IP_ADDRESS:PORT
params:
IP_ADDRESS: { get_attr: [ floating_ip, floating_ip_address ] }
PORT: { get_param: lb_port }

View File

@ -1,114 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import requests
from tempest.lib import decorators
from heat_tempest_plugin.common import test
from heat_tempest_plugin.tests.scenario import scenario_base
class AutoscalingLoadBalancerTest(scenario_base.ScenarioTestsBase):
"""The class is responsible for testing ASG + LBv1 scenario.
The very common use case tested is an autoscaling group
of some web application servers behind a loadbalancer.
"""
def setUp(self):
super(AutoscalingLoadBalancerTest, self).setUp()
self.template_name = 'test_autoscaling_lb_neutron.yaml'
self.app_server_template_name = 'app_server_neutron.yaml'
self.webapp_template_name = 'netcat-webapp.yaml'
if not self.is_network_extension_supported('lbaas'):
self.skipTest('LBaas v1 extension not available, skipping')
def check_num_responses(self, url, expected_num, retries=10):
resp = set()
for count in range(retries):
time.sleep(1)
try:
r = requests.get(url, verify=self.verify_cert)
except requests.exceptions.ConnectionError:
# The LB may not be up yet, let's retry
continue
# skip unsuccessful requests
if r.status_code == 200:
resp.add(r.text)
self.assertEqual(expected_num, len(resp))
@decorators.idempotent_id('48ddbc45-cef6-4640-acd6-7efc281833b9')
def test_autoscaling_loadbalancer_neutron(self):
"""Check work of AutoScaing and Neutron LBaaS v1 resource in Heat.
The scenario is the following:
1. Launch a stack with a load balancer and autoscaling group
of one server, wait until stack create is complete.
2. Check that there is only one distinctive response from
loadbalanced IP.
3. Signal the scale_up policy, wait until all resources in
autoscaling group are complete.
4. Check that now there are two distinctive responses from
loadbalanced IP.
"""
parameters = {
'flavor': self.conf.minimal_instance_type,
'image': self.conf.minimal_image_ref,
'net': self.conf.fixed_network_name,
'subnet': self.conf.fixed_subnet_name,
'public_net': self.conf.floating_network_name,
'app_port': 8080,
'lb_port': 80,
'timeout': 600
}
if self.conf.vm_to_heat_api_insecure:
parameters['wc_extra_args'] = '--insecure'
app_server_template = self._load_template(
__file__, self.app_server_template_name, self.sub_dir
)
webapp_template = self._load_template(
__file__, self.webapp_template_name, self.sub_dir
)
files = {'appserver.yaml': app_server_template,
'webapp.yaml': webapp_template}
env = {'resource_registry':
{'OS::Test::NeutronAppServer': 'appserver.yaml',
'OS::Test::WebAppConfig': 'webapp.yaml'}}
# Launch stack
sid = self.launch_stack(
template_name=self.template_name,
parameters=parameters,
files=files,
environment=env
)
stack = self.client.stacks.get(sid)
lb_url = self._stack_output(stack, 'lburl')
# Check number of distinctive responces, must be 1
self.check_num_responses(lb_url, 1)
# Signal the scaling hook
self.client.resources.signal(sid, 'scale_up')
# Wait for AutoScalingGroup update to finish
asg = self.client.resources.get(sid, 'asg')
test.call_until_true(self.conf.build_timeout,
self.conf.build_interval,
self.check_autoscale_complete,
asg.physical_resource_id, 2, sid, 'asg')
# Check number of distinctive responses, must now be 2
self.check_num_responses(lb_url, 2)

View File

@ -1,114 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import requests
from tempest.lib import decorators
from heat_tempest_plugin.common import test
from heat_tempest_plugin.tests.scenario import scenario_base
class AutoscalingLoadBalancerv2Test(scenario_base.ScenarioTestsBase):
"""The class is responsible for testing ASG + LBv2 scenario.
The very common use case tested is an autoscaling group
of some web application servers behind a loadbalancer.
"""
def setUp(self):
super(AutoscalingLoadBalancerv2Test, self).setUp()
self.template_name = 'test_autoscaling_lbv2_neutron.yaml'
self.app_server_template_name = 'app_server_lbv2_neutron.yaml'
self.webapp_template_name = 'netcat-webapp.yaml'
if not self.is_network_extension_supported('lbaasv2'):
self.skipTest('LBaasv2 extension not available, skipping')
def check_num_responses(self, url, expected_num, retries=20):
resp = set()
for count in range(retries):
time.sleep(2)
try:
r = requests.get(url, verify=self.verify_cert)
except requests.exceptions.ConnectionError:
# The LB may not be up yet, let's retry
continue
# skip unsuccessful requests
if r.status_code == 200:
resp.add(r.text)
if len(resp) == expected_num:
break
self.assertEqual(expected_num, len(resp))
@decorators.idempotent_id('89459930-aa61-4557-989b-3429d3b3b612')
def test_autoscaling_loadbalancer_neutron(self):
"""Check work of AutoScaing and Neutron LBaaS v2 resource in Heat.
The scenario is the following:
1. Launch a stack with a load balancer and autoscaling group
of one server, wait until stack create is complete.
2. Check that there is only one distinctive response from
loadbalanced IP.
3. Signal the scale_up policy, wait until all resources in
autoscaling group are complete.
4. Check that now there are two distinctive responses from
loadbalanced IP.
"""
parameters = {
'flavor': self.conf.minimal_instance_type,
'image': self.conf.minimal_image_ref,
'net': self.conf.fixed_network_name,
'subnet': self.conf.fixed_subnet_name,
'public_net': self.conf.floating_network_name
}
if self.conf.vm_to_heat_api_insecure:
parameters['wc_extra_args'] = '--insecure'
app_server_template = self._load_template(
__file__, self.app_server_template_name, self.sub_dir
)
webapp_template = self._load_template(
__file__, self.webapp_template_name, self.sub_dir
)
files = {'appserver.yaml': app_server_template,
'webapp.yaml': webapp_template}
env = {'resource_registry':
{'OS::Test::NeutronAppServer': 'appserver.yaml',
'OS::Test::WebAppConfig': 'webapp.yaml'}}
# Launch stack
sid = self.launch_stack(
template_name=self.template_name,
parameters=parameters,
files=files,
environment=env
)
stack = self.client.stacks.get(sid)
lb_url = self._stack_output(stack, 'lburl')
# Check number of distinctive responces, must be 1
self.check_num_responses(lb_url, 1)
# Signal the scaling hook
self.client.resources.signal(sid, 'scale_up')
# Wait for AutoScalingGroup update to finish
asg = self.client.resources.get(sid, 'asg')
test.call_until_true(self.conf.build_timeout,
self.conf.build_interval,
self.check_autoscale_complete,
asg.physical_resource_id, 2, sid, 'asg')
# Check number of distinctive responses, must now be 2
self.check_num_responses(lb_url, 2)