vmware-nsx-tempest-plugin/vmware_nsx_tempest_plugin/tests/nsxv/scenario/test_octavia_loadbalancers.py

1870 lines
95 KiB
Python

# Copyright 2019 VMware Inc
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from tempest.common.utils.linux import remote_client
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest import test
from vmware_nsx_tempest_plugin.common import constants
from vmware_nsx_tempest_plugin.lib import feature_manager
from vmware_nsx_tempest_plugin.services import nsxv_client
LOG = constants.log.getLogger(__name__)
CONF = config.CONF
class OctaviaRoundRobin(feature_manager.FeatureManager):
"""Base class to support LBaaS ROUND-ROBIN test.
It provides the methods to create loadbalancer network, and
start web servers.
Default lb_algorithm is ROUND_ROBIND.
"""
@classmethod
def setup_clients(cls):
super(OctaviaRoundRobin, cls).setup_clients()
cls.cmgr_adm = cls.get_client_manager('admin')
cls.cmgr_alt = cls.get_client_manager('alt')
cls.cmgr_adm = cls.get_client_manager('admin')
@classmethod
def skip_checks(cls):
super(OctaviaRoundRobin, cls).skip_checks()
cfg = CONF.network
if not test.is_extension_enabled('lbaasv2', 'network'):
msg = 'lbaasv2 extension is not enabled.'
raise cls.skipException(msg)
if not (cfg.project_networks_reachable or cfg.public_network_id):
msg = ('Either project_networks_reachable must be "true", or '
'public_network_id must be defined.')
raise cls.skipException(msg)
@classmethod
def resource_setup(cls):
super(OctaviaRoundRobin, cls).resource_setup()
if CONF.network.backend == "nsxv":
manager_ip = re.search(r"(\d{1,3}\.){3}\d{1,3}",
CONF.nsxv.manager_uri).group(0)
cls.vsm = nsxv_client.VSMClient(
manager_ip, CONF.nsxv.user, CONF.nsxv.password)
@classmethod
def setup_credentials(cls):
# Ask framework to not create network resources for these tests.
cls.set_network_resources()
super(OctaviaRoundRobin, cls).setup_credentials()
def setUp(self):
super(OctaviaRoundRobin, self).setUp()
CONF.validation.ssh_shell_prologue = ''
self.vip_ip_address = ''
self.namestart = 'lbaas-ops'
self.poke_counters = 12
self.hm_delay = 4
self.hm_max_retries = 3
self.hm_timeout = 10
self.server_names = []
self.loadbalancer = None
self.vip_fip = None
self.web_service_start_delay = 2.5
def tearDown(self):
if self.vip_fip:
LOG.debug("tearDown lbass vip fip")
self.disassociate_floatingip(self.vip_fip, and_delete=True)
if self.loadbalancer:
LOG.debug("tearDown lbass")
lb_id = self.loadbalancer['id']
lbs = self.octavia_admin_client.\
list_octavia_load_balancers()['loadbalancers']
lb_names = [lb['name'] for lb in lbs]
if self.loadbalancer['name'] in lb_names:
self.delete_octavia_lb_resources(lb_id)
else:
LOG.debug("tearDown skipped as lb already deleted")
LOG.debug("tearDown lbaas exiting...")
super(OctaviaRoundRobin, self).tearDown()
def start_netcat_session(self, client_ip, server_ip, protocol_port=1212):
private_key = self.keypair['private_key']
ssh_client1 = self.get_remote_client(server_ip,
private_key=private_key)
vip = self.loadbalancer['vip_address']
cmd = ('nc -l -p %s &' % (protocol_port))
ssh_client1.exec_command(cmd)
ssh_client2 = self.get_remote_client(client_ip,
private_key=private_key)
cmd = ('nc %s %s &' % (vip, protocol_port))
ssh_client2.exec_command(cmd)
def verify_sessioin_edge(self, vip, router):
router_info = \
router['external_gateway_info']['external_fixed_ips']
router_ip = \
router_info[0]['ip_address']
ssh_client = remote_client.RemoteClient(router_ip, 'admin',
'Admin!23Admin')
cmd = 'show flowtable topN 20 '
output = ssh_client.exec_command(cmd)
self.assertIn(vip, output)
def _assign_floating_ip_to_vip(self):
vip_port = self.loadbalancer['vip_port_id']
sg_id = self.sg['id']
self.cmgr_adm.ports_client.update_port(vip_port,
security_groups=[sg_id])
fip_client = self.cmgr_adm.floating_ips_client
vip_port = self.loadbalancer['vip_port_id']
vip_fip = self.create_floatingip(self.loadbalancer, client=fip_client,
port_id=vip_port)
self.vip_ip_address = vip_fip['floating_ip_address']
def _find_listener(self, vsm_listeners, listener):
port = listener['protocol_port']
if listener['protocol'] == 'TCP':
proto = 'tcp'
if listener['protocol'] == 'HTTP':
proto = 'http'
if listener['protocol'] == 'HTTPS':
proto = 'https'
for l_1 in vsm_listeners:
if (l_1['name'], l_1['protocol'], l_1['port']) == \
('vip_' + listener['id'], proto, str(port)):
return l_1
else:
return False
def _find_application_profile(self, l_1, lbaas_config, sp):
vsm_app_profiles = lbaas_config['applicationProfile']
for app in vsm_app_profiles:
if l_1['applicationProfileId'] == app['applicationProfileId']:
try:
if app['persistence']['method'] == sp:
return app
except Exception:
return False
else:
return False
def _find_pool(self, l1, lbaas_config, pool):
pool_vsm = lbaas_config['pool']
if pool['lb_algorithm'] == 'ROUND_ROBIN':
lb_algo = 'round-robin'
if pool['lb_algorithm'] == 'LEAST_CONNECTIONS':
lb_algo = 'leastconn'
if pool['lb_algorithm'] == 'SOURCE_IP':
lb_algo = 'ip-hash'
for p in pool_vsm:
try:
if l1['defaultPoolId'] == p['poolId'] and \
('pool_' + pool['id'], lb_algo) == \
(p['name'], p['algorithm']):
return p
except Exception:
return False
else:
return False
def _verify_lbaas_on_edge(self, lb_id, listener=None, pool=None,
member=None, hmonitor=None,
session_persistence=None, cleanup=[]):
if 'ALL' in cleanup:
cleanup = ['hm', 'member', 'pool', 'listener']
lbaas_config = self.vsm.get_lbaas_config_from_edge(lb_id)
if lbaas_config:
if hmonitor:
lbaas_hm_config = lbaas_config['monitor']
if hmonitor['type'] == 'PING':
h_type = 'icmp'
if hmonitor['type'] == 'TCP':
h_type = 'tcp'
if hmonitor['type'] == 'HTTP':
h_type = 'http'
hms_vsm = [(hm['name'], hm['type']) for hm in lbaas_hm_config]
if 'hm' in cleanup:
msg = hmonitor['id'] + ' hm isnt deleted in backend'
self.assertFalse((hmonitor['id'], h_type) in hms_vsm)
else:
msg = hmonitor['id'] + ' hm is delete or not present'
self.assertTrue((hmonitor['id'], h_type) in hms_vsm)
if pool:
l1 = self.\
_find_listener(vsm_listeners=lbaas_config['virtualServer'],
listener=listener)
found_pool = self._find_pool(l1, lbaas_config, pool)
if 'pool' in cleanup:
msg = pool['id'] + ' pool isnt deleted in backend'
self.assertFalse(found_pool)
else:
msg = pool['id'] + ' pool isnt with listener or deleted'
self.assertTrue(found_pool, msg)
if listener:
l1 = self.\
_find_listener(vsm_listeners=lbaas_config['virtualServer'],
listener=listener)
if 'listener' in cleanup:
msg = listener['id'] + ' listener isnt deleted in backend'
self.assertFalse(l1)
else:
msg = listener['id'] + ' listener is deleted or not exist'
self.assertTrue(l1)
if session_persistence:
sp = None
if session_persistence == 'SOURCE_IP':
sp = 'sourceip'
app = self._find_application_profile(l1, lbaas_config, sp)
if session_persistence != "None":
msg = 'session persistence value is not in backend'
self.assertTrue(app, msg)
else:
msg = ' session persistence value is not set as None'
self.assertFalse(app)
if member:
pool_name = 'pool_' + pool['id']
for p in lbaas_config['pool']:
if pool_name == p['name']:
members_vsm = \
[(m['name'], m['ipAddress']) for m in p['member']]
address = member['address']
if 'member' in cleanup:
self.assertFalse(('member-' + member['id'],
address) in members_vsm)
else:
self.assertTrue(('member-' + member['id'],
address) in members_vsm)
else:
LOG.debug('lbaas_config is not present,'
'either edge deleted to config is deleted')
def _update_lb_components(self, lb_id, hm, member, pool, listener):
# Update healthmonitor & verify
hm_data = {'name': 'new_hm', 'timeout': 20}
self.healthmonitor = self.octavia_hm_client.\
update_octavia_hm(hm['id'],
hm_data)['healthmonitor']
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
if CONF.network.backend == "nsxv":
self._verify_lbaas_on_edge(lb_id, listener=listener,
pool=pool, hmonitor=self.healthmonitor,
member=member, cleanup=[])
# Update members & revert oneof member & verify
member_data = {'name': 'member0_new', 'weight': 100}
member_id = member['id']
member = self.octavia_admin_members_client.\
update_octavia_member(pool['id'], member_id,
member_data)['member']
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
if CONF.network.backend == "nsxv":
self._verify_lbaas_on_edge(lb_id, listener=listener,
pool=pool,
member=member,
cleanup=[])
# Update pool with name & lb_algo, revert same change & verify
pool_data = {'name': 'newPool', 'lb_algorithm': 'LEAST_CONNECTIONS',
'session_persistence': None}
self.pool = self.octavia_admin_pools_client.\
update_octavia_pool(pool['id'], pool_data)['pool']
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
if CONF.network.backend == "nsxv":
self._verify_lbaas_on_edge(lb_id, listener=listener,
pool=self.pool,
member=member,
session_persistence="None",
cleanup=[])
# Update listener with another pool & verify
self.pool1 = self.octavia_admin_pools_client.\
create_octavia_pool(loadbalancer_id=lb_id,
lb_algorithm='ROUND_ROBIN',
protocol='TCP',
name='pool2',
session_persistence=None)['pool']
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
listener_data = {'name': 'new_listner',
'default_pool_id': self.pool1['id']}
self.listener = self.octavia_admin_listener_client.\
update_octavia_listener(listener['id'],
listener_data=listener_data)['listener']
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
if CONF.network.backend == "nsxv":
self._verify_lbaas_on_edge(lb_id, listener=self.listener,
pool=self.pool1,
cleanup=[])
listener_data = {'name': 'new_listner',
'default_pool_id': pool['id']}
self.listener = self.octavia_admin_listener_client.\
update_octavia_listener(listener['id'],
listener_data=listener_data)['listener']
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
self.octavia_admin_pools_client.\
delete_octavia_pool(self.pool1['id'])
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
def deploy_octavia_topology(self, no_of_servers=2, image_id=None):
router_name = data_utils.rand_name('router_lbaas')
kwargs = {'name': router_name, 'router_type': 'exclusive',
'external_gateway_info':
{"network_id": CONF.network.public_network_id}}
router_lbaas = self.cmgr_adm.routers_client.create_router(**kwargs)
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.routers_client.delete_router,
router_lbaas['router']['id'])
networks_client = self.cmgr_adm.networks_client
name = "network_lbaas_1"
network_lbaas_1 = self.\
create_topology_network(name,
networks_client=networks_client)
sec_rule_client = self.cmgr_adm.security_group_rules_client
sec_client = self.cmgr_adm.security_groups_client
kwargs = dict(tenant_id=network_lbaas_1['tenant_id'],
security_group_rules_client=sec_rule_client,
security_groups_client=sec_client)
self.sg = self.create_topology_security_group(**kwargs)
lbaas_rules = [dict(direction='ingress', protocol='tcp',
port_range_min=constants.HTTP_PORT,
port_range_max=constants.HTTP_PORT, ),
dict(direction='ingress', protocol='tcp',
port_range_min=1212,
port_range_max=1212, ),
dict(direction='ingress', protocol='tcp',
port_range_min=443, port_range_max=443, )]
t_id = network_lbaas_1['tenant_id']
for rule in lbaas_rules:
self.add_security_group_rule(self.sg, rule,
secclient=sec_client,
ruleclient=sec_rule_client,
tenant_id=t_id)
body = {"network_id": network_lbaas_1['id'],
"allocation_pools": [{"start": "2.0.0.2", "end": "2.0.0.254"}],
"ip_version": 4, "cidr": "2.0.0.0/24"}
subnet_client = self.cmgr_adm.subnets_client
subnet_lbaas = subnet_client.create_subnet(**body)
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
subnet_client.delete_subnet,
subnet_lbaas['subnet']['id'])
self.cmgr_adm.routers_client.\
add_router_interface(router_lbaas['router']['id'],
subnet_id=subnet_lbaas['subnet']['id'])
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.cmgr_adm.routers_client.remove_router_interface,
router_lbaas['router']['id'],
subnet_id=subnet_lbaas['subnet']['id'])
for instance in range(0, no_of_servers):
self.create_topology_instance(
"server_lbaas_%s" % instance, [network_lbaas_1],
security_groups=[{'name': self.sg['name']}],
image_id=image_id, clients=self.cmgr_adm)
return dict(router=router_lbaas, subnet=subnet_lbaas,
network=network_lbaas_1)
def deploy_octavia_topology_with_multi_network(self, no_of_servers=2,
image_id=None):
router_name = data_utils.rand_name('router_lbaas')
kwargs = {'name': router_name,
'router_type': 'exclusive',
'external_gateway_info':
{"network_id": CONF.network.public_network_id}}
router_lbaas = self.cmgr_adm.routers_client.create_router(**kwargs)
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.routers_client.delete_router,
router_lbaas['router']['id'])
networks_client = self.cmgr_adm.networks_client
name = "network_lbaas_1"
network_lbaas_1 = self.\
create_topology_network(name,
networks_client=networks_client)
name = "network_lbaas_2"
network_lbaas_2 = self.\
create_topology_network(name,
networks_client=networks_client)
sec_rule_client = self.cmgr_adm.security_group_rules_client
sec_client = self.cmgr_adm.security_groups_client
kwargs = dict(tenant_id=network_lbaas_1['tenant_id'],
security_group_rules_client=sec_rule_client,
security_groups_client=sec_client)
self.sg = self.create_topology_security_group(**kwargs)
lbaas_rules = [dict(direction='ingress', protocol='tcp',
port_range_min=constants.HTTP_PORT,
port_range_max=constants.HTTP_PORT, ),
dict(direction='ingress', protocol='tcp',
port_range_min=443, port_range_max=443, )]
t_id = network_lbaas_1['tenant_id']
for rule in lbaas_rules:
self.add_security_group_rule(self.sg, rule,
secclient=sec_client,
ruleclient=sec_rule_client,
tenant_id=t_id)
body = {"network_id": network_lbaas_1['id'],
"allocation_pools": [{"start": "2.0.0.2", "end": "2.0.0.254"}],
"ip_version": 4, "cidr": "2.0.0.0/24"}
subnet_client = self.cmgr_adm.subnets_client
subnet_lbaas1 = subnet_client.create_subnet(**body)
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
subnet_client.delete_subnet,
subnet_lbaas1['subnet']['id'])
body = {"network_id": network_lbaas_2['id'],
"allocation_pools": [{"start": "3.0.0.2", "end": "3.0.0.254"}],
"ip_version": 4, "cidr": "3.0.0.0/24"}
subnet_client = self.cmgr_adm.subnets_client
subnet_lbaas2 = subnet_client.create_subnet(**body)
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
subnet_client.delete_subnet,
subnet_lbaas2['subnet']['id'])
self.cmgr_adm.routers_client.\
add_router_interface(router_lbaas['router']['id'],
subnet_id=subnet_lbaas1['subnet']['id'])
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.cmgr_adm.routers_client.remove_router_interface,
router_lbaas['router']['id'],
subnet_id=subnet_lbaas1['subnet']['id'])
self.cmgr_adm.routers_client.\
add_router_interface(router_lbaas['router']['id'],
subnet_id=subnet_lbaas2['subnet']['id'])
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.cmgr_adm.routers_client.remove_router_interface,
router_lbaas['router']['id'],
subnet_id=subnet_lbaas2['subnet']['id'])
self.keypair = self.create_keypair(self.cmgr_adm.keypairs_client)
for instance in range(0, no_of_servers):
self.create_topology_instance(
"server_lbaas1_%s" % instance, [network_lbaas_1],
security_groups=[{'name': self.sg['name']}],
image_id=image_id, clients=self.cmgr_adm,
keypair=self.keypair)
self.topology_servers1 = self.topology_servers
for instance in range(0, no_of_servers):
self.create_topology_instance(
"server_lbaas2_%s" % instance, [network_lbaas_2],
security_groups=[{'name': self.sg['name']}],
image_id=image_id, clients=self.cmgr_adm,
keypair=self.keypair)
self.topology_servers2 = self.topology_servers
return dict(router=router_lbaas, subnet1=subnet_lbaas1,
subnet2=subnet_lbaas2, network1=network_lbaas_1,
network2=network_lbaas_2)
def _create_multi_listener_topology(self, lb_id=None, lb_name=None):
self.listener_1 = self.octavia_admin_listener_client.\
create_octavia_listener(loadbalancer_id=lb_id,
protocol='HTTP',
protocol_port='80',
allowed_cidrs=None,
name=lb_name + 'listener_1')['listener']
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
self.listener_2 = self.octavia_admin_listener_client.\
create_octavia_listener(loadbalancer_id=lb_id,
protocol='TCP',
protocol_port='1212',
allowed_cidrs=None,
name=lb_name + 'listener_2')['listener']
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
self.listener_3 = self.octavia_admin_listener_client.\
create_octavia_listener(loadbalancer_id=lb_id,
protocol='HTTPS',
protocol_port='443',
allowed_cidrs=None,
name=lb_name + 'listener_3')['listener']
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
self.pool_1 = self.octavia_admin_pools_client.\
create_octavia_pool(listener_id=self.listener_1['id'],
lb_algorithm='ROUND_ROBIN',
protocol='HTTP',
name=lb_name + 'pool_1',
session_persistence=None)['pool']
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
self.pool_2 = self.octavia_admin_pools_client.\
create_octavia_pool(listener_id=self.listener_2['id'],
lb_algorithm='LEAST_CONNECTIONS',
protocol='TCP',
name=lb_name + 'pool_2',
session_persistence=None)['pool']
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
self.pool_3 = self.octavia_admin_pools_client.\
create_octavia_pool(listener_id=self.listener_3['id'],
lb_algorithm='SOURCE_IP',
protocol='TCP',
name=lb_name + 'pool_3',
session_persistence=None)['pool']
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
pool_1_id = self.pool_1['id']
pool_2_id = self.pool_2['id']
pool_3_id = self.pool_3['id']
self.healthmonitor_1 = self.octavia_hm_client.\
create_octavia_hm(pool_id=pool_1_id, type='PING', delay=2,
timeout=10, max_retries=5,
name=lb_name)['healthmonitor']
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
self.healthmonitor_2 = self.octavia_hm_client.\
create_octavia_hm(pool_id=pool_2_id, type='TCP', delay=2,
timeout=10, max_retries=5,
name=lb_name)['healthmonitor']
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
self.healthmonitor_3 = self.octavia_hm_client.\
create_octavia_hm(pool_id=pool_3_id, type='HTTP', delay=2,
timeout=10, max_retries=5,
name=lb_name)['healthmonitor']
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
@decorators.attr(type='nsxv')
@decorators.idempotent_id('c5ac8546-6867-4b7a-8704-3844b11b1a30')
def test_create_verify_octavia_lb_with_vip_subnet_id_rr(self):
"""
This testcase creates an octavia Loadbalancer with vip-subnet-ip
option, and verifies the traffic on the loadbalancer vip
ROUND_ROBIN algorithm
"""
diction = self.deploy_octavia_topology()
self.start_web_servers(constants.HTTP_PORT)
subnet_id = diction['subnet']['subnet']['id']
self.create_project_octavia(protocol_type="HTTP", protocol_port="80",
lb_algorithm="ROUND_ROBIN",
vip_subnet_id=subnet_id)
self.check_project_lbaas()
@decorators.attr(type='nsxv')
@decorators.idempotent_id('c5ac8546-6867-4b7a-8704-3844b11b1a31')
def test_create_verify_octavia_lb_with_vip_subnet_id_lc(self):
"""
This testcase creates an octavia Loadbalancer with vip-subnet-ip
option, and verifies the traffic on the loadbalancer vip,
LEAST_CONNECTIONS algorithm
"""
diction = self.deploy_octavia_topology()
self.start_web_servers(constants.HTTP_PORT)
subnet_id = diction['subnet']['subnet']['id']
self.create_project_octavia(protocol_type="HTTP", protocol_port="80",
lb_algorithm="LEAST_CONNECTIONS",
vip_subnet_id=subnet_id)
self.check_project_lbaas()
@decorators.attr(type='nsxv')
@decorators.idempotent_id('c5ac8546-6867-4b7a-8704-3844b11b1a32')
def test_create_verify_octavia_lb_with_vip_subnet_id_si(self):
"""
This testcase creates an octavia Loadbalancer with vip-subnet-ip
option, and verifies the traffic on the loadbalancer vip,
SOURCE_IP algorithm
"""
diction = self.deploy_octavia_topology()
self.start_web_servers(constants.HTTP_PORT)
subnet_id = diction['subnet']['subnet']['id']
self.create_project_octavia(protocol_type="HTTP", protocol_port="80",
lb_algorithm="SOURCE_IP",
vip_subnet_id=subnet_id)
self.check_project_lbaas()
@decorators.attr(type='nsxv')
@decorators.idempotent_id('c5ac8546-6867-4b7a-8704-3844b11b1a33')
def test_create_verify_octavia_lb_with_vip_subnet_id_rr_tcp(self):
"""
This testcase creates an octavia Loadbalancer with vip-subnet-ip
option, and verifies the traffic on the loadbalancer vip
ROUND_ROBIN algorithm
"""
diction = self.deploy_octavia_topology()
self.start_web_servers(constants.HTTP_PORT)
subnet_id = diction['subnet']['subnet']['id']
self.create_project_octavia(protocol_type="TCP", protocol_port="80",
lb_algorithm="ROUND_ROBIN",
vip_subnet_id=subnet_id)
self.check_project_lbaas()
@decorators.attr(type='nsxv')
@decorators.idempotent_id('c5ac8546-6867-4b7a-8704-3844b11b1a34')
def test_create_verify_octavia_lb_with_vip_subnet_id_lc_tcp(self):
"""
This testcase creates an octavia Loadbalancer with vip-subnet-ip
option, and verifies the traffic on the loadbalancer vip,
LEAST_CONNECTIONS algorithm
"""
diction = self.deploy_octavia_topology()
self.start_web_servers(constants.HTTP_PORT)
subnet_id = diction['subnet']['subnet']['id']
self.create_project_octavia(protocol_type="TCP", protocol_port="80",
lb_algorithm="LEAST_CONNECTIONS",
vip_subnet_id=subnet_id)
self.check_project_lbaas()
@decorators.attr(type='nsxv')
@decorators.idempotent_id('c5ac8546-6867-4b7a-8704-3844b11b1a35')
def test_create_verify_octavia_lb_with_vip_subnet_id_si_tcp(self):
"""
This testcase creates an octavia Loadbalancer with vip-subnet-ip
option, and verifies the traffic on the loadbalancer vip,
SOURCE_IP algorithm
"""
diction = self.deploy_octavia_topology()
self.start_web_servers(constants.HTTP_PORT)
subnet_id = diction['subnet']['subnet']['id']
self.create_project_octavia(protocol_type="TCP", protocol_port="80",
lb_algorithm="SOURCE_IP",
vip_subnet_id=subnet_id)
self.check_project_lbaas()
@decorators.attr(type='nsxv')
@decorators.idempotent_id('c5ac8546-6867-4b7a-8704-3844b11b1a36')
def test_create_verify_octavia_lb_with_vip_net_id_rr(self):
"""
This testcase creates an octavia Loadbalancer with vip-net-ip
option, and verifies the traffic on the loadbalancer vip
"""
diction = self.deploy_octavia_topology()
self.start_web_servers(constants.HTTP_PORT)
net_id = diction['network']['id']
self.create_project_octavia(protocol_type="HTTP", protocol_port="80",
lb_algorithm="ROUND_ROBIN",
vip_net_id=net_id)
self.check_project_lbaas()
@decorators.attr(type='nsxv')
@decorators.idempotent_id('c5ac8546-6867-4b7a-8704-3844b11b1a37')
def test_create_verify_octavia_lb_with_vip_net_id_lc(self):
"""
This testcase creates an octavia Loadbalancer with vip-net-ip
option, and verifies the traffic on the loadbalancer vip
"""
diction = self.deploy_octavia_topology()
self.start_web_servers(constants.HTTP_PORT)
net_id = diction['network']['id']
self.create_project_octavia(protocol_type="HTTP", protocol_port="80",
lb_algorithm="LEAST_CONNECTIONS",
vip_net_id=net_id)
self.check_project_lbaas()
@decorators.attr(type='nsxv')
@decorators.idempotent_id('c5ac8546-6867-4b7a-8704-3844b11b1a38')
def test_create_verify_octavia_lb_with_vip_net_id_si(self):
"""
This testcase creates an octavia Loadbalancer with vip-net-ip
option, and verifies the traffic on the loadbalancer vip
"""
diction = self.deploy_octavia_topology()
self.start_web_servers(constants.HTTP_PORT)
net_id = diction['network']['id']
self.create_project_octavia(protocol_type="HTTP", protocol_port="80",
lb_algorithm="SOURCE_IP",
vip_net_id=net_id)
self.check_project_lbaas()
@decorators.attr(type='nsxv')
@decorators.idempotent_id('c5ac8546-6867-4b7a-8704-3844b11b1a39')
def test_create_verify_octavia_lb_with_vip_net_id_rr_tcp(self):
"""
This testcase creates an octavia Loadbalancer with vip-net-ip
option, and verifies the traffic on the loadbalancer vip
"""
diction = self.deploy_octavia_topology()
self.start_web_servers(constants.HTTP_PORT)
net_id = diction['network']['id']
self.create_project_octavia(protocol_type="TCP", protocol_port="80",
lb_algorithm="ROUND_ROBIN",
vip_net_id=net_id)
self.check_project_lbaas()
@decorators.attr(type='nsxv')
@decorators.idempotent_id('c5ac8546-6867-4b7a-8704-3844b11b1a40')
def test_create_verify_octavia_lb_with_vip_net_id_lc_tcp(self):
"""
This testcase creates an octavia Loadbalancer with vip-net-ip
option, and verifies the traffic on the loadbalancer vip
"""
diction = self.deploy_octavia_topology()
self.start_web_servers(constants.HTTP_PORT)
net_id = diction['network']['id']
self.create_project_octavia(protocol_type="TCP", protocol_port="80",
lb_algorithm="LEAST_CONNECTIONS",
vip_net_id=net_id)
self.check_project_lbaas()
@decorators.attr(type='nsxv')
@decorators.idempotent_id('c5ac8546-6867-4b7a-8704-3844b11b1a41')
def test_create_verify_octavia_lb_with_vip_net_id_si_tcp(self):
"""
This testcase creates an octavia Loadbalancer with vip-net-ip
option, and verifies the traffic on the loadbalancer vip
"""
diction = self.deploy_octavia_topology()
self.start_web_servers(constants.HTTP_PORT)
net_id = diction['network']['id']
self.create_project_octavia(protocol_type="TCP", protocol_port="80",
lb_algorithm="SOURCE_IP",
vip_net_id=net_id)
self.check_project_lbaas()
@decorators.attr(type='nsxv')
@decorators.idempotent_id('c5ac8546-5677-4b7a-8704-3843a12b1a42')
def test_verify_octavia_http_lb_port_id_round_robin(self):
"""
This testcase is for verifying the loadbalancer with port-id and
the pool is created using lb option and attached to a listener
source_ip algorithm, http protocol.
"""
diction = self.deploy_octavia_topology()
self.start_web_servers(constants.HTTP_PORT)
net_id = diction['network']['id']
port_id = self.cmgr_adm.ports_client.create_port(
network_id=net_id)['port']['id']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.cmgr_adm.ports_client.delete_port, port_id)
self.create_project_octavia(protocol_type="HTTP", protocol_port="80",
lb_algorithm="ROUND_ROBIN",
vip_port_id=port_id, hm_type='PING',
timeout=self.hm_timeout,
max_retries=self.hm_max_retries,
delay=self.hm_delay)
self.check_project_lbaas()
@decorators.attr(type='nsxv')
@decorators.idempotent_id('c5ac8546-5677-4b7a-8704-3843a12b1a43')
def test_verify_octavia_http_lb_port_id_lc(self):
"""
This testcase is for verifying the loadbalancer with port-id and
the pool is created using lb option and attached to a listener
source_ip algorithm, http protocol.
"""
diction = self.deploy_octavia_topology()
self.start_web_servers(constants.HTTP_PORT)
net_id = diction['network']['id']
port_id = self.cmgr_adm.ports_client.create_port(
network_id=net_id)['port']['id']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.cmgr_adm.ports_client.delete_port, port_id)
self.create_project_octavia(protocol_type="HTTP", protocol_port="80",
lb_algorithm="LEAST_CONNECTIONS",
vip_port_id=port_id, hm_type='PING',
timeout=self.hm_timeout,
max_retries=self.hm_max_retries,
delay=self.hm_delay)
self.check_project_lbaas()
@decorators.attr(type='nsxv')
@decorators.idempotent_id('c5ac8546-5677-4b7a-8704-3843a12b1a44')
def test_verify_octavia_http_lb_port_id_si(self):
"""
This testcase is for verifying the loadbalancer with port-id and
the pool is created using lb option and attached to a listener
source_ip algorithm, http protocol.
"""
diction = self.deploy_octavia_topology()
self.start_web_servers(constants.HTTP_PORT)
net_id = diction['network']['id']
port_id = self.cmgr_adm.ports_client.create_port(
network_id=net_id)['port']['id']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.cmgr_adm.ports_client.delete_port, port_id)
self.create_project_octavia(protocol_type="HTTP", protocol_port="80",
lb_algorithm="SOURCE_IP",
vip_port_id=port_id, hm_type='PING',
timeout=self.hm_timeout,
max_retries=self.hm_max_retries,
delay=self.hm_delay)
self.check_project_lbaas()
@decorators.attr(type='nsxv')
@decorators.idempotent_id('c5ac8546-5677-4b7a-8704-3843a12b1a47')
def test_verify_octavia_http_lb_port_id_rr(self):
"""
This testcase is for verifying the loadbalancer with port-id and
the pool is created using lb option and attached to a listener
source_ip algorithm, http protocol.
"""
diction = self.deploy_octavia_topology()
self.start_web_servers(constants.HTTP_PORT)
net_id = diction['network']['id']
port_id = self.cmgr_adm.ports_client.create_port(
network_id=net_id)['port']['id']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.cmgr_adm.ports_client.delete_port, port_id)
self.create_project_octavia(protocol_type="TCP", protocol_port="80",
lb_algorithm="ROUND_ROBIN",
vip_port_id=port_id, hm_type='PING',
timeout=self.hm_timeout,
max_retries=self.hm_max_retries,
delay=self.hm_delay)
@decorators.attr(type='nsxv')
@decorators.idempotent_id('c5ac8546-6867-4b7a-8544-3843a11b1a48')
def test_verify_octavia_https_lb_port_id_rr_default_pool(self):
"""
This testcase is for verifying the loadbalancer with port-id and
the pool is created using lb option and attached to a listener
with default-pool option, https traffic
"""
diction = self.deploy_octavia_topology()
self.start_web_servers(constants.HTTP_PORT)
net_id = diction['network']['id']
port_id = self.cmgr_adm.ports_client.create_port(
network_id=net_id)['port']['id']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.cmgr_adm.ports_client.delete_port, port_id)
self.create_project_octavia(protocol_type="HTTP", protocol_port="80",
lb_algorithm="ROUND_ROBIN",
vip_port_id=port_id, hm_type='PING',
timeout=self.hm_timeout,
max_retries=self.hm_max_retries,
delay=self.hm_delay, default_pool=True)
self.check_project_lbaas()
@decorators.attr(type='nsxv')
@decorators.idempotent_id('c5ac8546-6867-4b7a-8544-3843a11b1a49')
def test_verify_octavia_https_lb_port_id_lc_default_pool(self):
"""
This testcase is for verifying the loadbalancer with port-id and
the pool is created using lb option and attached to a listener
with default-pool option, https traffic
"""
diction = self.deploy_octavia_topology()
self.start_web_servers(constants.HTTP_PORT)
net_id = diction['network']['id']
port_id = self.cmgr_adm.ports_client.create_port(
network_id=net_id)['port']['id']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.cmgr_adm.ports_client.delete_port, port_id)
self.create_project_octavia(protocol_type="HTTP", protocol_port="80",
lb_algorithm="LEAST_CONNECTIONS",
vip_port_id=port_id, hm_type='PING',
timeout=self.hm_timeout,
max_retries=self.hm_max_retries,
delay=self.hm_delay, default_pool=True)
self.check_project_lbaas()
@decorators.attr(type='nsxv')
@decorators.idempotent_id('c5ac8546-6867-4b7a-8544-3843a11b1a50')
def test_verify_octavia_https_lb_port_id_si_default_pool(self):
"""
This testcase is for verifying the loadbalancer with port-id and
the pool is created using lb option and attached to a listener
with default-pool option, https traffic
"""
diction = self.deploy_octavia_topology()
self.start_web_servers(constants.HTTP_PORT)
net_id = diction['network']['id']
port_id = self.cmgr_adm.ports_client.create_port(
network_id=net_id)['port']['id']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.cmgr_adm.ports_client.delete_port, port_id)
self.create_project_octavia(protocol_type="HTTP", protocol_port="80",
lb_algorithm="SOURCE_IP",
vip_port_id=port_id, hm_type='PING',
timeout=self.hm_timeout,
max_retries=self.hm_max_retries,
delay=self.hm_delay, default_pool=True)
self.check_project_lbaas()
@decorators.attr(type='nsxv')
@decorators.idempotent_id('c5ac8546-6867-4b7a-8544-3843a11b1a51')
def test_verify_octavia_https_lb_port_id_rr_default_pool_tcp(self):
"""
This testcase is for verifying the loadbalancer with port-id and
the pool is created using lb option and attached to a listener
with default-pool option, tcp traffic
"""
diction = self.deploy_octavia_topology()
self.start_web_servers(constants.HTTP_PORT)
net_id = diction['network']['id']
port_id = self.cmgr_adm.ports_client.create_port(
network_id=net_id)['port']['id']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.cmgr_adm.ports_client.delete_port, port_id)
self.create_project_octavia(protocol_type="TCP", protocol_port="80",
lb_algorithm="ROUND_ROBIN",
vip_port_id=port_id, hm_type='PING',
timeout=self.hm_timeout,
max_retries=self.hm_max_retries,
delay=self.hm_delay, default_pool=True)
self.check_project_lbaas()
@decorators.attr(type='nsxv')
@decorators.idempotent_id('c5ac8546-6867-4b7a-8544-3843a11b1a52')
def test_verify_octavia_https_lb_port_id_lc_default_pool_tcp(self):
"""
This testcase is for verifying the loadbalancer with port-id and
the pool is created using lb option and attached to a listener
with default-pool option, tcp traffic
"""
diction = self.deploy_octavia_topology()
self.start_web_servers(constants.HTTP_PORT)
net_id = diction['network']['id']
port_id = self.cmgr_adm.ports_client.create_port(
network_id=net_id)['port']['id']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.cmgr_adm.ports_client.delete_port, port_id)
self.create_project_octavia(protocol_type="TCP", protocol_port="80",
lb_algorithm="LEAST_CONNECTIONS",
vip_port_id=port_id, hm_type='PING',
timeout=self.hm_timeout,
max_retries=self.hm_max_retries,
delay=self.hm_delay, default_pool=True)
self.check_project_lbaas()
@decorators.attr(type='nsxv')
@decorators.idempotent_id('c5ac8546-6867-4b7a-8544-3843a11b1a55')
def test_verify_octavia_https_lb_port_id_lc_default_pool_qos(self):
"""
This testcase is for verifying the loadbalancer with port-id and
the pool is created using lb option and attached to a listener
with default-pool option,least connections https traffic
"""
diction = self.deploy_octavia_topology()
self.start_web_servers(constants.HTTP_PORT)
net_id = diction['network']['id']
policy = self.cmgr_adm.qos_client.create_qos_policy(
name='test-policy', description='test policy desc1',
shared=False)
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.cmgr_adm.qos_client.delete_qos_policy,
policy['policy']['id'])
port_id = self.cmgr_adm.ports_client.create_port(
network_id=net_id)['port']['id']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.cmgr_adm.ports_client.delete_port, port_id)
self.create_project_octavia(protocol_type="HTTP", protocol_port="80",
lb_algorithm="LEAST_CONNECTIONS",
vip_port_id=port_id, hm_type='PING',
timeout=self.hm_timeout,
max_retries=self.hm_max_retries,
delay=self.hm_delay, default_pool=True,
qos_policy_id=policy['policy']['id'])
self.check_project_lbaas()
@decorators.attr(type='nsxv')
@decorators.idempotent_id('c5ac8546-6867-4b7a-8544-3843a11b1a56')
def test_verify_octavia_https_lb_port_id_rr_default_pool_qos(self):
"""
This testcase is for verifying the loadbalancer with port-id and
the pool is created using lb option and attached to a listener
with default-pool option,least connections https traffic
"""
diction = self.deploy_octavia_topology()
self.start_web_servers(constants.HTTP_PORT)
net_id = diction['network']['id']
policy = self.cmgr_adm.qos_client.create_qos_policy(
name='test-policy', description='test policy desc1',
shared=False)
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.cmgr_adm.qos_client.delete_qos_policy,
policy['policy']['id'])
port_id = self.cmgr_adm.ports_client.create_port(
network_id=net_id)['port']['id']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.cmgr_adm.ports_client.delete_port, port_id)
self.create_project_octavia(protocol_type="HTTP", protocol_port="80",
lb_algorithm="ROUND_ROBIN",
vip_port_id=port_id, hm_type='PING',
timeout=self.hm_timeout,
max_retries=self.hm_max_retries,
delay=self.hm_delay, default_pool=True,
qos_policy_id=policy['policy']['id'])
self.check_project_lbaas()
@decorators.attr(type='nsxv')
@decorators.idempotent_id('c5ac8546-6867-4b7a-8544-3843a11b1a57')
def test_verify_octavia_https_lb_port_id_lc_default_pool_qos_tcp(self):
"""
This testcase is for verifying the loadbalancer with port-id and
the pool is created using lb option and attached to a listener
with default-pool option,least connections https traffic
"""
diction = self.deploy_octavia_topology()
self.start_web_servers(constants.HTTP_PORT)
net_id = diction['network']['id']
policy = self.cmgr_adm.qos_client.create_qos_policy(
name='test-policy', description='test policy desc1',
shared=False)
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.cmgr_adm.qos_client.delete_qos_policy,
policy['policy']['id'])
port_id = self.cmgr_adm.ports_client.create_port(
network_id=net_id)['port']['id']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.cmgr_adm.ports_client.delete_port, port_id)
self.create_project_octavia(protocol_type="TCP", protocol_port="80",
lb_algorithm="LEAST_CONNECTIONS",
vip_port_id=port_id, hm_type='PING',
timeout=self.hm_timeout,
max_retries=self.hm_max_retries,
delay=self.hm_delay, default_pool=True,
qos_policy_id=policy['policy']['id'])
self.check_project_lbaas()
@decorators.attr(type='nsxv')
@decorators.idempotent_id('c5ac8546-6867-4b7a-8704-3843a11b1a58')
def test_verify_octavia_lb_port_id_rr_default_pool_https(self):
"""
This testcase is for verifying the loadbalancer with port-id and
the pool is created using lb option and attached to a listener
with default-pool option
"""
diction = self.deploy_octavia_topology()
self.start_web_servers(constants.HTTPS_PORT)
net_id = diction['network']['id']
port_id = self.cmgr_adm.ports_client.create_port(
network_id=net_id)['port']['id']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.cmgr_adm.ports_client.delete_port, port_id)
self.create_project_octavia(protocol_type="HTTPS", protocol_port="443",
lb_algorithm="ROUND_ROBIN",
vip_port_id=port_id, hm_type='PING',
timeout=self.hm_timeout,
max_retries=self.hm_max_retries,
delay=self.hm_delay, default_pool=True)
self.check_project_lbaas()
@decorators.attr(type='nsxv')
@decorators.idempotent_id('c5ac8546-6867-4b7b-8704-3843a11c1a58')
def test_octavia_check_certificate_at_backend(self):
"""
Create octavia loadbalancer with http traffic with barbican enabled.
"""
diction = self.deploy_octavia_topology()
subnet_id = diction['subnet']['subnet']['id']
if not CONF.nsxv3.ens:
self.start_web_servers(constants.HTTP_PORT)
barbican_secrets = self.create_barbican_secret_conatainer(
constants.CERT_FILE, constants.KEY_FILE)
barbican_container = barbican_secrets['secret_container']
self.create_project_octavia(protocol_type="TERMINATED_HTTPS",
protocol_port="443",
lb_algorithm="ROUND_ROBIN",
vip_subnet_id=subnet_id,
hm_type='HTTP',
member_count=2,
weight=5,
pool_protocol='HTTP',
pool_port='80',
barbican_container=barbican_container,
count=0, barbican=True,
delay=self.hm_delay,
max_retries=self.hm_max_retries,
timeout=self.hm_timeout)
self.check_lbaas_project_weight_values(HTTPS=True)
cert_file = open(constants.CERT_FILE, "r")
cert_content = cert_file.read()
if CONF.network.backend == "nsxv":
self.vsm.check_cert_at_backend(lb_id=self.loadbalancer['id'],
cert_conent=cert_content.rstrip())
@decorators.attr(type='nsxv')
@decorators.idempotent_id('c5ac8546-6867-4b7a-8704-3844b11b1a61')
def test_create_verify_octavia_lb_with_vip_subnet_id(self):
"""
This testcase creates an octavia Loadbalancer with vip-subnet-ip
option, and verifies the traffic on the loadbalancer vip
"""
diction = self.deploy_octavia_topology()
self.start_web_servers(constants.HTTP_PORT)
subnet_id = diction['subnet']['subnet']['id']
self.create_project_octavia(protocol_type="HTTP", protocol_port="80",
lb_algorithm="LEAST_CONNECTIONS",
vip_subnet_id=subnet_id)
self.check_project_lbaas()
get_lb = self.octavia_admin_client.list_octavia_load_balancers()
lb_id = get_lb['loadbalancers'][0]['id']
stat = self.octavia_admin_client.\
show_octavia_load_balancer_stats(lb_id)
assert (stat['stats']['bytes_in'] == 0 and
stat['stats']['bytes_out'] == 0)
self.check_lbaas_project_weight_values(constants.NO_OF_VMS_2)
stat = self.octavia_admin_client.\
show_octavia_load_balancer_stats(lb_id)
assert (stat['stats']['bytes_in'] >= 0 and
stat['stats']['bytes_out'] >= 0)
@decorators.attr(type='nsxv')
@decorators.idempotent_id('60e9ecaf-b8d6-48a9-b0d2-942e5bb38f62')
def test_octavia_http_round_robin_with_session_persistence(self):
"""
To verify the server count for LB pool with SOURCE_IP
session persistence and ROUND_ROBIN lb-algorithm
expected outcome is only one server responds to the
client requests
"""
diction = self.deploy_octavia_topology()
self.start_web_servers(constants.HTTP_PORT)
subnet_id = diction['subnet']['subnet']['id']
self.create_project_octavia(protocol_type="HTTP", protocol_port="80",
lb_algorithm="ROUND_ROBIN",
vip_subnet_id=subnet_id, persistence=True,
persistence_type="SOURCE_IP")
self.check_lbaas_project_weight_values(constants.NO_OF_VMS_2,
hash_persistence=True)
@decorators.attr(type='nsxv')
@decorators.idempotent_id('60e9ecaf-b8d6-48a9-b0d2-942e5bb38f63')
def test_octavia_http_round_robin_with_net_id_session_persistence(self):
"""
To verify the server count for LB pool with SOURCE_IP
session persistence and ROUND_ROBIN lb-algorithm,
expected outcome is only one server responds to the
client requests.
"""
diction = self.deploy_octavia_topology()
self.start_web_servers(constants.HTTP_PORT)
net_id = diction['network']['id']
self.create_project_octavia(protocol_type="HTTP", protocol_port="80",
lb_algorithm="ROUND_ROBIN",
vip_net_id=net_id, persistence=True,
persistence_type="SOURCE_IP")
self.check_lbaas_project_weight_values(constants.NO_OF_VMS_2,
hash_persistence=True)
@decorators.attr(type='nsxv')
@decorators.idempotent_id('ca5c5468-6768-4b7a-8704-3844b11b1a64')
def test_create_REJECT_l7policies_listeneres(self):
"""
The Loadbalancer listener is created with allowed_cidrs specified
"""
diction = self.deploy_octavia_topology()
if not CONF.nsxv3.ens:
self.start_web_servers(constants.HTTP_PORT)
subnet_id = diction['subnet']['subnet']['id']
self.create_project_octavia(protocol_type="HTTP",
protocol_port="80",
lb_algorithm="ROUND_ROBIN",
vip_subnet_id=subnet_id,
l7policy=True, action='REJECT')
@decorators.attr(type='nsxv')
@decorators.idempotent_id('ca5c4368-6768-4b7a-8704-3844b11b1a65')
def test_create_REDIRECT_TO_URL_l7policies_listeneres(self):
"""
The Loadbalancer listener is created with redirect_url l7policy
with no url specified.
"""
diction = self.deploy_octavia_topology()
if not CONF.nsxv3.ens:
self.start_web_servers(constants.HTTP_PORT)
subnet_id = diction['subnet']['subnet']['id']
self.create_project_octavia(protocol_type="HTTP",
protocol_port="80",
lb_algorithm="ROUND_ROBIN",
vip_subnet_id=subnet_id,
l7policy=True,
action='REDIRECT_TO_URL',
redirect_url='http://www.vmware.com')
@decorators.attr(type='nsxv')
@decorators.idempotent_id('ca5c4368-6768-4a7b-8704-3844b11b1b66')
def test_create_REDIRECT_TO_POOL_l7policies_listeneres(self):
"""
The Loadbalancer listener is created with redirect_pool l7policy
with url specified.
"""
diction = self.deploy_octavia_topology()
if not CONF.nsxv3.ens:
self.start_web_servers(constants.HTTP_PORT)
subnet_id = diction['subnet']['subnet']['id']
lb = self.create_project_octavia(protocol_type="HTTP",
protocol_port="80",
lb_algorithm="ROUND_ROBIN",
vip_subnet_id=subnet_id)
listener = lb['listener_id']
self.octavia_admin_l7policies_client.create_octavia_l7policies(
listener_id=listener, action='REDIRECT_TO_POOL',
redirect_pool_id=lb['pool_id'])
self.octavia_admin_client.wait_for_load_balancer_status(lb['lb_id'])
l7p = self.octavia_admin_l7policies_client.list_octavia_l7policies(
lb['listener_id'])
for i in l7p['l7policies']:
if lb['listener_id'] == i['listener_id']:
l7p_id = i['id']
self.octavia_admin_l7policies_client.delete_octavia_l7policy(
l7p_id)
@decorators.attr(type='nsxv')
@decorators.idempotent_id('ca5c4368-6769-4a7b-8704-3844b11b1b66')
def test_delete_lb_with_cascade_without_member(self):
diction = self.deploy_octavia_topology()
subnet_id = diction['subnet']['subnet']['id']
lb_name = data_utils.rand_name(self.namestart)
self.loadbalancer = self.octavia_admin_client.\
create_octavia_load_balancer(name=lb_name,
vip_subnet_id=subnet_id,
admin_state_up=True)['loadbalancer']
lb_id = self.loadbalancer['id']
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
self.listener = self.octavia_admin_listener_client.\
create_octavia_listener(loadbalancer_id=lb_id,
protocol='TCP',
protocol_port='1212',
allowed_cidrs=None,
name=lb_name)['listener']
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
self.pool = self.octavia_admin_pools_client.\
create_octavia_pool(listener_id=self.listener['id'],
lb_algorithm='ROUND_ROBIN',
protocol='TCP',
name=lb_name,
session_persistence=None)['pool']
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
pool_id = self.pool['id']
self.healthmonitor = self.octavia_hm_client.\
create_octavia_hm(pool_id=pool_id, type='PING', delay=2,
timeout=10, max_retries=5,
name=lb_name)['healthmonitor']
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
self.octavia_admin_client.\
delete_octavia_load_balancer_with_cascade(lb_id)
self.octavia_admin_client.\
wait_for_load_balancer_status(lb_id, is_delete_op=True)
if CONF.network.backend == "nsxv":
self._verify_lbaas_on_edge(lb_id, listener=self.listener,
pool=self.pool,
hmonitor=self.healthmonitor,
cleanup=["listener", "pool", "hm"])
lbs = self.octavia_admin_client.\
list_octavia_load_balancers()['loadbalancers']
lb_names = [lb['name'] for lb in lbs]
self.assertNotIn(lb_name, lb_names)
@decorators.attr(type='nsxv')
@decorators.idempotent_id('ca5c4368-6770-4a7b-8704-3844b11b1b66')
def test_delete_lb_with_cascade_with_member(self):
diction = self.deploy_octavia_topology()
subnet_id = diction['subnet']['subnet']['id']
self.start_web_servers('1212')
self.create_project_octavia(protocol_type="TCP", protocol_port="1212",
lb_algorithm="LEAST_CONNECTIONS",
hm_type='PING', vip_subnet_id=subnet_id,
default_pool=True,
timeout=self.hm_timeout, clean_up=False,
max_retries=self.hm_max_retries,
delay=self.hm_delay)
lb_id = self.loadbalancer['id']
self.octavia_admin_client.\
delete_octavia_load_balancer_with_cascade(lb_id)
self.octavia_admin_client.\
wait_for_load_balancer_status(lb_id, is_delete_op=True)
lbs = self.octavia_admin_client.\
list_octavia_load_balancers()['loadbalancers']
lb_names = [lb['name'] for lb in lbs]
self.assertNotIn(self.loadbalancer['name'], lb_names)
@decorators.attr(type='nsxv')
@decorators.idempotent_id('ca5d4368-6770-4a7b-8704-3845b11b1b66')
def test_delete_lb_with_cascade_when_pool_without_attaching_listener(self):
diction = self.deploy_octavia_topology()
subnet_id = diction['subnet']['subnet']['id']
self.create_project_octavia(protocol_type="TCP", protocol_port="1212",
lb_algorithm="LEAST_CONNECTIONS",
hm_type='PING', vip_subnet_id=subnet_id,
default_pool=True,
timeout=self.hm_timeout, clean_up=False,
max_retries=self.hm_max_retries,
delay=self.hm_delay)
lb_id = self.loadbalancer['id']
self.octavia_admin_pools_client.\
delete_octavia_pool(self.pool['pool']['id'])
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
self.pool = self.octavia_admin_pools_client.\
create_octavia_pool(loadbalancer_id=lb_id,
lb_algorithm='ROUND_ROBIN',
protocol='TCP',
name='NewPool',
session_persistence=None)['pool']
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
self.octavia_admin_client.\
delete_octavia_load_balancer_with_cascade(lb_id)
self.octavia_admin_client.\
wait_for_load_balancer_status(lb_id, is_delete_op=True)
lbs = self.octavia_admin_client.\
list_octavia_load_balancers()['loadbalancers']
lb_names = [lb['name'] for lb in lbs]
self.assertNotIn(self.loadbalancer['name'], lb_names)
@decorators.attr(type='nsxv')
@decorators.idempotent_id('ca5c4368-6770-4a7b-8704-3844b11b1b66')
def test_lb_crud_with_backend_verification(self):
diction = self.deploy_octavia_topology()
subnet_id = diction['subnet']['subnet']['id']
self.start_web_servers('1212')
self.create_project_octavia(protocol_type="TCP", protocol_port="1212",
lb_algorithm="LEAST_CONNECTIONS",
hm_type='PING', vip_subnet_id=subnet_id,
default_pool=True,
timeout=self.hm_timeout, clean_up=False,
max_retries=self.hm_max_retries,
delay=self.hm_delay)
self.vip_ip_address = self.vip_ip_address + ':1212'
self.check_project_lbaas()
lb_id = self.loadbalancer['id']
self.pool = self.pool['pool']
self.healthmonitor = self.healthmonitor['healthmonitor']
if CONF.network.backend == "nsxv":
self._verify_lbaas_on_edge(lb_id, listener=self.listener,
pool=self.pool,
hmonitor=self.healthmonitor,
member=self.members[0]['member'],
cleanup=[])
# Update all components
self._update_lb_components(lb_id, self.healthmonitor,
self.members[0]['member'], self.pool,
self.listener)
# Delete & verify
self.octavia_hm_client.\
delete_octavia_hm(self.healthmonitor['id'])
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
self.octavia_admin_pools_client.\
delete_octavia_pool(self.pool['id'])
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
self.octavia_admin_listener_client.\
delete_octavia_listener(self.listener['id'])
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
# verify health monitor, pool, listener got deleted from edge
if CONF.network.backend == "nsxv":
self._verify_lbaas_on_edge(lb_id, listener=self.listener,
pool=self.pool,
hmonitor=self.healthmonitor,
member=self.members[0]['member'],
cleanup=["ALL"])
self.octavia_admin_client.delete_octavia_load_balancer(lb_id)
self.octavia_admin_client.\
wait_for_load_balancer_status(lb_id, is_delete_op=True)
lbs = self.octavia_admin_client.\
list_octavia_load_balancers()['loadbalancers']
lb_names = [lb['name'] for lb in lbs]
self.assertNotIn(self.loadbalancer['name'], lb_names)
@decorators.attr(type='nsxv')
@decorators.idempotent_id('ca5c4368-6770-4a7b-8704-3844b11b1b66')
def test_deletion_pool_remove_hm_with_backend_verification(self):
diction = self.deploy_octavia_topology()
subnet_id = diction['subnet']['subnet']['id']
self.create_project_octavia(protocol_type="TCP", protocol_port="1212",
lb_algorithm="ROUND_ROBIN",
hm_type='PING', vip_subnet_id=subnet_id,
default_pool=True,
timeout=self.hm_timeout, clean_up=False,
max_retries=self.hm_max_retries,
delay=self.hm_delay)
self.pool = self.pool['pool']
self.healthmonitor = self.healthmonitor['healthmonitor']
lb_id = self.loadbalancer['id']
if CONF.network.backend == "nsxv":
self._verify_lbaas_on_edge(lb_id, listener=self.listener,
pool=self.pool,
hmonitor=self.healthmonitor,
member=self.members[0]['member'],
cleanup=[])
# Delete pools member then pool & listener,
# should remove healthmonitor
for member in self.members:
self.octavia_admin_members_client.\
delete_octavia_member(self.pool['id'], member['member']['id'])
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
self.octavia_admin_pools_client.\
delete_octavia_pool(self.pool['id'])
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
self.octavia_admin_listener_client.\
delete_octavia_listener(self.listener['id'])
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
# verify health monitor, pool, listener got deleted from edge
if CONF.network.backend == "nsxv":
self._verify_lbaas_on_edge(lb_id, listener=self.listener,
pool=self.pool,
hmonitor=self.healthmonitor,
member=self.members[0]['member'],
cleanup=["ALL"])
self.octavia_admin_client.delete_octavia_load_balancer(lb_id)
self.octavia_admin_client.\
wait_for_load_balancer_status(lb_id, is_delete_op=True)
lbs = self.octavia_admin_client.\
list_octavia_load_balancers()['loadbalancers']
lb_names = [lb['name'] for lb in lbs]
self.assertNotIn(self.loadbalancer['name'], lb_names)
@decorators.attr(type='nsxv')
@decorators.idempotent_id('ca6c4368-6770-4a7b-8704-3844b11b1b61')
def test_vip_memebr_in_external_subnet(self):
diction = self.deploy_octavia_topology()
self.start_web_servers('1212')
ext_gw = diction['router']['router']['external_gateway_info']
ext_subnet_id = ext_gw['external_fixed_ips'][0]['subnet_id']
lb_name = data_utils.rand_name(self.namestart)
self.loadbalancer = self.octavia_admin_client.\
create_octavia_load_balancer(name=lb_name,
vip_subnet_id=ext_subnet_id,
admin_state_up=True)['loadbalancer']
lb_id = self.loadbalancer['id']
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
self.listener = self.octavia_admin_listener_client.\
create_octavia_listener(loadbalancer_id=lb_id,
protocol='TCP',
protocol_port='1212',
allowed_cidrs=None,
name=lb_name)['listener']
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
self.pool = self.octavia_admin_pools_client.\
create_octavia_pool(listener_id=self.listener['id'],
lb_algorithm='ROUND_ROBIN',
protocol='TCP',
name=lb_name,
session_persistence=None)['pool']
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
pool_id = self.pool['id']
self.members = []
for s in self.topology_servers.keys():
fip_data = self.servers_details[s].floating_ips[0]
floating_ip_address = fip_data['floating_ip_address']
member = self.octavia_admin_members_client.\
create_octavia_member(pool_id,
subnet_id=ext_subnet_id,
address=floating_ip_address,
protocol_port='1212',
weight=1)['member']
self.members.append(member)
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
self.healthmonitor = self.octavia_hm_client.\
create_octavia_hm(pool_id=pool_id, type='PING', delay=2,
timeout=10, max_retries=5,
name=lb_name)['healthmonitor']
self.check_project_lbaas()
if CONF.network.backend == "nsxv":
self._verify_lbaas_on_edge(lb_id, listener=self.listener,
pool=self.pool, member=self.members[0],
hmonitor=self.healthmonitor,
cleanup=[])
self.octavia_admin_client.\
delete_octavia_load_balancer_with_cascade(lb_id)
self.octavia_admin_client.\
wait_for_load_balancer_status(lb_id, is_delete_op=True)
lbs = self.octavia_admin_client.\
list_octavia_load_balancers()['loadbalancers']
lb_names = [lb['name'] for lb in lbs]
self.assertNotIn(lb_name, lb_names)
@decorators.attr(type='nsxv')
@decorators.idempotent_id('ca5c4368-6770-4a7b-8704-3844b11b1b66')
def test_lb_crud_with_multi_listeners(self):
"""
create lb with three listener, each listener with one poool
each pool has two memebrs, each pool has health monitor
verify traffic with each listener
Update each each component and verify lb sttatus
Delete one of listenr & respecitve pool, should not impact
another listner , then lb sith cascade option which should
delete all componets
"""
diction = self.deploy_octavia_topology(no_of_servers=6)
self.start_web_servers(constants.HTTP_PORT)
subnet_id = diction['subnet']['subnet']['id']
lb_name = data_utils.rand_name(self.namestart)
self.loadbalancer = self.octavia_admin_client.\
create_octavia_load_balancer(name=lb_name,
vip_subnet_id=subnet_id,
admin_state_up=True)['loadbalancer']
lb_id = self.loadbalancer['id']
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
self._create_multi_listener_topology(lb_id=lb_id, lb_name=lb_name)
# update two per each pool member
self.members = []
for s in self.topology_servers.keys():
fip_data = self.servers_details[s].floating_ips[0]
fixed_ip_address = fip_data['fixed_ip_address']
servers = list(self.topology_servers.keys())
if servers.index(s) <= 1:
member = self.octavia_admin_members_client.\
create_octavia_member(self.pool_1['id'],
subnet_id=subnet_id,
address=fixed_ip_address,
protocol_port='80',
weight=1)['member']
elif servers.index(s) == 2 or servers.index(s) == 3:
member = self.octavia_admin_members_client.\
create_octavia_member(self.pool_2['id'],
subnet_id=subnet_id,
address=fixed_ip_address,
protocol_port='80',
weight=2)['member']
else:
member = self.octavia_admin_members_client.\
create_octavia_member(self.pool_3['id'],
subnet_id=subnet_id,
address=fixed_ip_address,
protocol_port='80',
weight=2)['member']
self.members.append(member)
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
if CONF.network.backend == "nsxv":
self._verify_lbaas_on_edge(lb_id, listener=self.listener_1,
pool=self.pool_1,
member=self.members[0],
hmonitor=self.healthmonitor_1,
cleanup=[])
if CONF.network.backend == "nsxv":
self._verify_lbaas_on_edge(lb_id, listener=self.listener_3,
pool=self.pool_3,
member=self.members[4],
hmonitor=self.healthmonitor_3,
cleanup=[])
# Assign floating ip to vip
self._assign_floating_ip_to_vip()
# verify listener1 traffic
self.check_project_lbaas()
# verify listener2 traffic
self.vip_ip_address = self.vip_ip_address + ':1212'
self.do_http_request(vip=self.vip_ip_address,
send_counts=self.poke_counters)
self.assertTrue(len(self.http_cnt) == 2)
# Update all components
self._update_lb_components(lb_id, self.healthmonitor_1,
self.members[0], self.pool_1,
self.listener_1)
self.listener_1, self.pool_1, self.healthmonitor_1 =\
self.listener, self.pool, self.healthmonitor
# Delete third listener & pool
self.octavia_admin_members_client.\
delete_octavia_member(self.pool_3['id'], self.members[4]['id'])
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
self.octavia_admin_members_client.\
delete_octavia_member(self.pool_3['id'], self.members[5]['id'])
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
self.octavia_admin_pools_client.\
delete_octavia_pool(self.pool_3['id'])
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
self.octavia_admin_listener_client.\
delete_octavia_listener(self.listener_3['id'])
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
# verify health monitor, pool, listener got deleted from edge
if CONF.network.backend == "nsxv":
self._verify_lbaas_on_edge(lb_id, listener=self.listener_3,
pool=self.pool_3,
hmonitor=self.healthmonitor_3,
member=self.members[5],
cleanup=["ALL"])
# verify listener_2 present
if CONF.network.backend == "nsxv":
self._verify_lbaas_on_edge(lb_id, listener=self.listener_1,
pool=self.pool_1,
member=self.members[1],
hmonitor=self.healthmonitor_1,
cleanup=[])
self.octavia_admin_client.\
delete_octavia_load_balancer_with_cascade(lb_id)
self.octavia_admin_client.\
wait_for_load_balancer_status(lb_id,
is_delete_op=True)
lbs = self.octavia_admin_client.\
list_octavia_load_balancers()['loadbalancers']
lb_names = [lb['name'] for lb in lbs]
self.assertNotIn(self.loadbalancer['name'], lb_names)
@decorators.attr(type='nsxv')
@decorators.idempotent_id('ca5c4368-6770-4a7b-8704-3844b11c1b66')
def test_delete_second_lb_verify_tcp_connection_with_first_lb(self):
diction = \
self.deploy_octavia_topology_with_multi_network(no_of_servers=1)
subnet_id = diction['subnet1']['subnet']['id']
router = diction['router']['router']
# Create first lb
lb_name = data_utils.rand_name(self.namestart)
self.loadbalancer = self.octavia_admin_client.\
create_octavia_load_balancer(name=lb_name,
vip_subnet_id=subnet_id,
admin_state_up=True)['loadbalancer']
lb_id = self.loadbalancer['id']
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
self.listener = self.octavia_admin_listener_client.\
create_octavia_listener(loadbalancer_id=lb_id,
protocol='TCP',
protocol_port='1212',
allowed_cidrs=None,
name=lb_name)['listener']
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
self.pool = self.octavia_admin_pools_client.\
create_octavia_pool(listener_id=self.listener['id'],
lb_algorithm='ROUND_ROBIN',
protocol='TCP',
name=lb_name,
session_persistence=None)['pool']
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
pool_id = self.pool['id']
for s in self.topology_servers.keys():
fip_data = self.servers_details[s].floating_ips[0]
fixed_ip_address = fip_data['fixed_ip_address']
servers = list(self.topology_servers.keys())
# Adding one VM as member
if servers.index(s) == 0:
self.octavia_admin_members_client.\
create_octavia_member(pool_id,
subnet_id=subnet_id,
address=fixed_ip_address,
protocol_port='1212',
weight=1)['member']
server1_fip = fip_data['floating_ip_address']
else:
client1_fip = fip_data['floating_ip_address']
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
self.healthmonitor = self.octavia_hm_client.\
create_octavia_hm(pool_id=pool_id, type='PING', delay=2,
timeout=10, max_retries=5,
name=lb_name)['healthmonitor']
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
# LB creation is done, start netcat session and verify on edge
self.start_netcat_session(client1_fip, server1_fip,
protocol_port=1212)
vip = self.loadbalancer['vip_address']
self.vsm.enable_ssh_on_edge(router['name'], router['id'])
self.verify_session_edge(vip, router)
lb2_name = data_utils.rand_name(self.namestart)
self.loadbalancer_2 = self.octavia_admin_client.\
create_octavia_load_balancer(name=lb2_name,
vip_subnet_id=subnet_id,
admin_state_up=True)['loadbalancer']
lb2_id = self.loadbalancer_2['id']
self.octavia_admin_client.wait_for_load_balancer_status(lb2_id)
self.octavia_admin_client.\
delete_octavia_load_balancer_with_cascade(lb2_id)
self.verify_session_edge(vip, router)
self.octavia_admin_client.\
delete_octavia_load_balancer_with_cascade(lb_id)
self.octavia_admin_client.\
wait_for_load_balancer_status(lb_id,
is_delete_op=True)
lbs = self.octavia_admin_client.\
list_octavia_load_balancers()['loadbalancers']
lb_names = [lb['name'] for lb in lbs]
self.assertNotIn(self.loadbalancer['name'], lb_names)
@decorators.attr(type='nsxv')
@decorators.idempotent_id('ca5c4368-6770-4a7b-8704-3844b11b1b66')
def test_delete_member_when_vip_member_diff_subnet(self):
diction = self.deploy_octavia_topology_with_multi_network()
self.start_web_servers('1212')
subnet_id = diction['subnet1']['subnet']['id']
subnet_id2 = diction['subnet2']['subnet']['id']
lb_name = data_utils.rand_name(self.namestart)
self.loadbalancer = self.octavia_admin_client.\
create_octavia_load_balancer(name=lb_name,
vip_subnet_id=subnet_id,
admin_state_up=True)['loadbalancer']
lb_id = self.loadbalancer['id']
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
self.listener = self.octavia_admin_listener_client.\
create_octavia_listener(loadbalancer_id=lb_id,
protocol='TCP',
protocol_port='1212',
allowed_cidrs=None,
name=lb_name)['listener']
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
self.pool = self.octavia_admin_pools_client.\
create_octavia_pool(listener_id=self.listener['id'],
lb_algorithm='ROUND_ROBIN',
protocol='TCP',
name=lb_name,
session_persistence=None)['pool']
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
pool_id = self.pool['id']
self.members = []
for s in self.topology_servers.keys():
fip_data = self.servers_details[s].floating_ips[0]
fixed_ip_address = fip_data['fixed_ip_address']
servers = list(self.topology_servers.keys())
if servers.index(s) <= 1:
member = self.octavia_admin_members_client.\
create_octavia_member(pool_id,
subnet_id=subnet_id,
address=fixed_ip_address,
protocol_port='1212',
weight=1)['member']
else:
member = self.octavia_admin_members_client.\
create_octavia_member(pool_id,
subnet_id=subnet_id2,
address=fixed_ip_address,
protocol_port='1212',
weight=1)['member']
self.members.append(member)
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
self.healthmonitor = self.octavia_hm_client.\
create_octavia_hm(pool_id=pool_id, type='PING', delay=2,
timeout=10, max_retries=5,
name=lb_name)['healthmonitor']
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
if CONF.network.backend == "nsxv":
self._verify_lbaas_on_edge(lb_id, listener=self.listener,
pool=self.pool,
member=self.members[0],
hmonitor=self.healthmonitor,
cleanup=[])
# Delete one member of same subnet as vip
self.octavia_admin_members_client.\
delete_octavia_member(pool_id, self.members[0]['id'])
if CONF.network.backend == "nsxv":
self._verify_lbaas_on_edge(lb_id, listener=self.listener,
pool=self.pool,
member=self.members[0],
hmonitor=self.healthmonitor,
cleanup=['member'])
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
# Delete one member of diff subnet as vip
self.octavia_admin_members_client.\
delete_octavia_member(pool_id, self.members[3]['id'])
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
if CONF.network.backend == "nsxv":
self._verify_lbaas_on_edge(lb_id, listener=self.listener,
pool=self.pool,
member=self.members[3],
hmonitor=self.healthmonitor,
cleanup=['member'])
# Delete pool, listenr
self.octavia_admin_pools_client.\
delete_octavia_pool(pool_id)
self.octavia_admin_listener_client.\
delete_octavia_listener(self.listener['id'])
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
# verify health monitor, pool, listener got deleted from edge
if CONF.network.backend == "nsxv":
self._verify_lbaas_on_edge(lb_id, listener=self.listener,
pool=self.pool, member=self.members[0],
hmonitor=self.healthmonitor,
cleanup=['ALL'])
self.octavia_admin_client.delete_octavia_load_balancer(lb_id)
@decorators.attr(type='nsxv')
@decorators.idempotent_id('60e9ecaf-b8d7-48a9-b0d2-942e5bb38f63')
def test_update_to_None_verify_octavia_session_persistence(self):
"""
To verify the server count for LB pool with SOURCE_IP
session persistence and ROUND_ROBIN lb-algorithm,
expected outcome is only one server responds to the
client requests.
Set session Persitence as None, verify backend and now
server count in traffic should be 2
"""
diction = self.deploy_octavia_topology()
self.start_web_servers(constants.HTTP_PORT)
net_id = diction['network']['id']
self.create_project_octavia(protocol_type="HTTP", protocol_port="80",
lb_algorithm="ROUND_ROBIN",
vip_net_id=net_id, persistence=True,
persistence_type="SOURCE_IP",
clean_up=False)
self.check_lbaas_project_weight_values(constants.NO_OF_VMS_2,
hash_persistence=True)
# verify health monitor, pool, listener got deleted from edge
self.pool = self.pool['pool']
lb_id, lb_name = self.loadbalancer['id'], self.loadbalancer['name']
if CONF.network.backend == "nsxv":
self._verify_lbaas_on_edge(lb_id, listener=self.listener,
pool=self.pool,
member=self.members[0]['member'],
session_persistence="SOURCE_IP",
cleanup=[])
# Update pool to change name & disable session persistence
pool_data = {'name': 'newPool', 'lb_algorithm': 'LEAST_CONNECTIONS',
'session_persistence': None}
self.pool = self.octavia_admin_pools_client.\
update_octavia_pool(self.pool['id'], pool_data)['pool']
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
if CONF.network.backend == "nsxv":
self._verify_lbaas_on_edge(lb_id, listener=self.listener,
pool=self.pool,
member=self.members[0]['member'],
session_persistence="None",
cleanup=[])
# Update pool to enable session persistence
pool_data = {'name': 'newPool', 'lb_algorithm': 'LEAST_CONNECTIONS',
"session_persistence": {"type": "SOURCE_IP"}}
self.pool = self.octavia_admin_pools_client.\
update_octavia_pool(self.pool['id'], pool_data)['pool']
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
if CONF.network.backend == "nsxv":
self._verify_lbaas_on_edge(lb_id, listener=self.listener,
pool=self.pool,
member=self.members[0]['member'],
session_persistence="SOURCE_IP",
cleanup=[])
self.octavia_admin_pools_client.\
delete_octavia_pool(self.pool['id'])
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
self.octavia_admin_listener_client.\
delete_octavia_listener(self.listener['id'])
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
self.octavia_admin_client.delete_octavia_load_balancer(lb_id)
self.octavia_admin_client.\
wait_for_load_balancer_status(lb_id, is_delete_op=True)
lbs = self.octavia_admin_client.\
list_octavia_load_balancers()['loadbalancers']
lb_names = [lb['name'] for lb in lbs]
self.assertNotIn(lb_name, lb_names)
@decorators.attr(type='nsxv')
@decorators.idempotent_id('c5ac8546-6867-4b7a-8544-3843a11b1a34')
def test_verify_rbac_network_octavia_lb_admin(self):
"""
Fetch the status of loadbalancer resources which uses the
api of openstack loadbalancer status show <lb>
"""
diction = self.deploy_octavia_topology()
self.start_web_servers(constants.HTTP_PORT)
net_id = diction['network']['id']
port_id = self.cmgr_adm.ports_client.create_port(
network_id=net_id)['port']['id']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.cmgr_adm.ports_client.delete_port, port_id)
self.rbac_client.create_rbac_policy(action="access_as_shared",
object_type="network",
object_id=net_id,
target_tenant="admin")
self.create_project_octavia(protocol_type="HTTPS", protocol_port="80",
lb_algorithm="ROUND_ROBIN",
vip_port_id=port_id, hm_type='PING',
timeout=self.hm_timeout,
max_retries=self.hm_max_retries,
delay=self.hm_delay, default_pool=True)
@decorators.attr(type='nsxv')
@decorators.idempotent_id('d5ac8546-6867-4b7a-8544-3843a11b1a34')
def test_verify_octavia_lb_resource_status(self):
"""
Fetch the status of loadbalancer resources which uses the
api of openstack loadbalancer status show <lb>
"""
diction = self.deploy_octavia_topology()
self.start_web_servers(constants.HTTP_PORT)
net_id = diction['network']['id']
port_id = self.cmgr_adm.ports_client.create_port(
network_id=net_id)['port']['id']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.cmgr_adm.ports_client.delete_port, port_id)
self.create_project_octavia(protocol_type="HTTPS", protocol_port="80",
lb_algorithm="ROUND_ROBIN",
vip_port_id=port_id, hm_type='PING',
timeout=self.hm_timeout,
max_retries=self.hm_max_retries,
delay=self.hm_delay, default_pool=True)
lb_id = self.loadbalancer['id']
noerr, status_dict = self.get_status_lb_resources(lb_id)
self.assertTrue(noerr, status_dict)