319 lines
15 KiB
Python
319 lines
15 KiB
Python
# Copyright 2019 VMware Inc
|
|
# All Rights Reserved
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
# not use this file except in compliance with the License. You may obtain
|
|
# a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
# License for the specific language governing permissions and limitations
|
|
# under the License.
|
|
import time
|
|
|
|
from tempest import config
|
|
from tempest.lib.common.utils import data_utils
|
|
|
|
from tempest.lib import decorators
|
|
from tempest.lib import exceptions
|
|
|
|
from vmware_nsx_tempest_plugin.common import constants
|
|
from vmware_nsx_tempest_plugin.lib import feature_manager
|
|
from vmware_nsx_tempest_plugin.services import nsxv3_client
|
|
|
|
CONF = config.CONF
|
|
|
|
|
|
class TestSRAntiAffinity(feature_manager.FeatureManager):
|
|
|
|
"""Test SR Anti Affinity
|
|
|
|
"""
|
|
@classmethod
|
|
def setup_clients(cls):
|
|
super(TestSRAntiAffinity, cls).setup_clients()
|
|
cls.cmgr_adm = cls.get_client_manager('admin')
|
|
cls.cmgr_alt = cls.get_client_manager('alt')
|
|
cls.cmgr_adm = cls.get_client_manager('admin')
|
|
cls.routers_client = cls.cmgr_adm.routers_client
|
|
cls.networks_client = cls.cmgr_adm.networks_client
|
|
cls.subnets_client = cls.cmgr_adm.subnets_client
|
|
cls.sec_rule_client = cls.cmgr_adm.security_group_rules_client
|
|
cls.sec_client = cls.cmgr_adm.security_groups_client
|
|
|
|
@classmethod
|
|
def resource_setup(cls):
|
|
cls.nsx = nsxv3_client.NSXV3Client(CONF.nsxv3.nsx_manager,
|
|
CONF.nsxv3.nsx_user,
|
|
CONF.nsxv3.nsx_password)
|
|
tn = cls.nsx.get_transport_nodes()
|
|
edge_ips = CONF.nsx_edge.nsx_edge_ip
|
|
FD1 = CONF.nsx_edge.failure_domain1
|
|
FD2 = CONF.nsx_edge.failure_domain2
|
|
edge_tn = []
|
|
cls.FD1_Node = []
|
|
cls.FD2_Node = []
|
|
for ip in edge_ips:
|
|
for t1 in tn:
|
|
if t1['node_deployment_info']['resource_type'] ==\
|
|
'EdgeNode' and ip in\
|
|
t1['node_deployment_info']['ip_addresses']:
|
|
edge_tn.append(t1)
|
|
edge_cluster_members = cls.nsx.get_edge_cluster_members(
|
|
CONF.nsx_edge.edge_cluster_id)
|
|
for tn in edge_tn:
|
|
for edge in edge_cluster_members:
|
|
if tn['node_id'] == edge['transport_node_id']:
|
|
member_index = edge['member_index']
|
|
if tn['failure_domain_id'] == FD1:
|
|
temp = tn
|
|
temp['member_index'] = member_index
|
|
cls.FD1_Node.append(temp)
|
|
elif tn['failure_domain_id'] == FD2:
|
|
temp = tn
|
|
temp['member_index'] = member_index
|
|
cls.FD2_Node.append(temp)
|
|
|
|
def verify_ping_to_fip_from_ext_vm(self, server_details):
|
|
self.using_floating_ip_check_server_and_project_network_connectivity(
|
|
server_details)
|
|
|
|
def verify_ping_own_fip(self, server):
|
|
fip = server["floating_ips"][0]["floating_ip_address"]
|
|
client = self.verify_server_ssh(server, floating_ip=fip)
|
|
ping_cmd = "ping -c 1 %s " % fip
|
|
self.exec_cmd_on_server_using_fip(ping_cmd, ssh_client=client)
|
|
|
|
def check_fd_of_router(self, nsx_router):
|
|
router_deployed_edges = nsx_router['edge_cluster_member_indices']
|
|
passive_instance_edge = router_deployed_edges[0]
|
|
active_present_fd = -1
|
|
passive_present_fd = -1
|
|
for node in self.FD1_Node:
|
|
if passive_instance_edge == node['member_index']:
|
|
passive_present_fd = 1
|
|
break
|
|
if passive_present_fd == -1:
|
|
for node in self.FD2_Node:
|
|
if passive_instance_edge == node['member_index']:
|
|
passive_present_fd = 2
|
|
break
|
|
active_instance_edge = router_deployed_edges[1]
|
|
for node in self.FD1_Node:
|
|
if active_instance_edge == node['member_index']:
|
|
active_present_fd = 1
|
|
break
|
|
if active_present_fd == -1:
|
|
for node in self.FD2_Node:
|
|
if active_instance_edge == node['member_index']:
|
|
active_present_fd = 2
|
|
break
|
|
return {'passive_present_fd': passive_present_fd,
|
|
'active_present_fd': active_present_fd}
|
|
|
|
@decorators.idempotent_id('1207461e-02ff-9027-d449-21178ecb57d4')
|
|
def test_sr_anti_affinity_for_one_router(self):
|
|
"""
|
|
Create router, check it should get deployed
|
|
in failure domains edges.i Active instance in one
|
|
failure domain and Passive instance in one
|
|
failure domain.
|
|
"""
|
|
kwargs = {"admin_state_up": "True"}
|
|
router_state = self.create_topology_router('rtr1',
|
|
set_gateway=True,
|
|
**kwargs)
|
|
time.sleep(constants.NSX_BACKEND_TIME_INTERVAL)
|
|
nsx_router = self.nsx.get_logical_router(router_state['name'],
|
|
router_state['id'])
|
|
result = self.check_fd_of_router(nsx_router)
|
|
self.assertNotEqual(result['passive_present_fd'],
|
|
result['active_present_fd'])
|
|
|
|
@decorators.idempotent_id('1207450e-02ee-9027-c339-10067dbb57d4')
|
|
def test_sr_anti_affinity_for_multiple_router(self):
|
|
"""
|
|
Create multiple router, check it should get deployed
|
|
in failure domains edges.i Active instance in one
|
|
failure domain and Passive instance in one
|
|
failure domain.
|
|
"""
|
|
kwargs = {"admin_state_up": "True"}
|
|
router_state = self.create_topology_router('rtr1',
|
|
set_gateway=True,
|
|
**kwargs)
|
|
time.sleep(constants.NSX_BACKEND_TIME_INTERVAL)
|
|
nsx_router = self.nsx.get_logical_router(router_state['name'],
|
|
router_state['id'])
|
|
result = self.check_fd_of_router(nsx_router)
|
|
self.assertNotEqual(result['passive_present_fd'],
|
|
result['active_present_fd'])
|
|
previous_router_edge_placement = result
|
|
router_state1 = self.create_topology_router('rtr2',
|
|
set_gateway=True,
|
|
**kwargs)
|
|
time.sleep(constants.NSX_BACKEND_TIME_INTERVAL)
|
|
nsx_router1 = self.nsx.get_logical_router(router_state1['name'],
|
|
router_state1['id'])
|
|
result = self.check_fd_of_router(nsx_router1)
|
|
self.assertNotEqual(result['passive_present_fd'],
|
|
result['active_present_fd'])
|
|
self.assertNotIn(previous_router_edge_placement['passive_present_fd'],
|
|
result, 'Tier-1 deployed on same edge')
|
|
self.assertNotEqual(
|
|
previous_router_edge_placement['active_present_fd'],
|
|
result, 'Tier-1 deployed on same edge')
|
|
previous_router_edge_placement = result
|
|
router_state2 = self.create_topology_router('rtr3',
|
|
set_gateway=True,
|
|
**kwargs)
|
|
time.sleep(constants.NSX_BACKEND_TIME_INTERVAL)
|
|
nsx_router2 = self.nsx.get_logical_router(router_state2['name'],
|
|
router_state2['id'])
|
|
result = self.check_fd_of_router(nsx_router2)
|
|
self.assertNotEqual(result['passive_present_fd'],
|
|
result['active_present_fd'])
|
|
self.assertNotIn(previous_router_edge_placement['passive_present_fd'],
|
|
result, 'Tier-1 deployed on same edge')
|
|
self.assertNotEqual(
|
|
previous_router_edge_placement['active_present_fd'],
|
|
result, 'Tier-1 deployed on same edge')
|
|
|
|
@decorators.idempotent_id('1207450d-02dd-9016-c328-10067daa46c3')
|
|
def test_sr_anti_affinity_for_multiple_router_with_lb(self):
|
|
"""
|
|
Create multiple router with lbaas, check it should get deployed
|
|
in failure domains edges.i Active instance in one
|
|
failure domain and Passive instance in one
|
|
failure domain.
|
|
"""
|
|
sec_rule_client = self.manager.security_group_rules_client
|
|
sec_client = self.manager.security_groups_client
|
|
kwargs = dict(security_group_rules_client=sec_rule_client,
|
|
security_groups_client=sec_client)
|
|
self.sg = self.create_topology_security_group(**kwargs)
|
|
lbaas_rules = [dict(direction='ingress', protocol='tcp',
|
|
port_range_min=constants.HTTP_PORT,
|
|
port_range_max=constants.HTTP_PORT, ),
|
|
dict(direction='ingress', protocol='tcp',
|
|
port_range_min=443, port_range_max=443, )]
|
|
for rule in lbaas_rules:
|
|
self.add_security_group_rule(self.sg, rule)
|
|
for i in range(0, 11):
|
|
router_state_1 = self.create_topology_router(set_gateway=True,
|
|
**kwargs)
|
|
network_lbaas_1 = self.create_topology_network("network_lbaas")
|
|
subnet_lbaas = self.create_topology_subnet(
|
|
"subnet_lbaas", network_lbaas_1,
|
|
router_id=router_state_1["id"])
|
|
time.sleep(constants.NSX_NETWORK_REALISE_TIMEOUT)
|
|
if i == 10:
|
|
self.assertRaises(exceptions.ServerFault,
|
|
self.load_balancers_admin_client.
|
|
create_load_balancer,
|
|
name='test-lb',
|
|
vip_subnet_id=subnet_lbaas['id'])
|
|
break
|
|
loadbalancer = self.load_balancers_admin_client.\
|
|
create_load_balancer(name='test-lb',
|
|
vip_subnet_id=subnet_lbaas['id']
|
|
)['loadbalancer']
|
|
self.addCleanup(
|
|
self.load_balancers_admin_client.delete_load_balancer,
|
|
loadbalancer['id'])
|
|
self.load_balancers_admin_client.\
|
|
wait_for_load_balancer_status(loadbalancer['id'])
|
|
listener = self.listeners_admin_client.create_listener(
|
|
loadbalancer_id=loadbalancer['id'], protocol='HTTP',
|
|
protocol_port='80', name='test_listener')['listener']
|
|
self.addCleanup(
|
|
self.listeners_admin_client.delete_listener,
|
|
listener['id'])
|
|
self.load_balancers_admin_client.\
|
|
wait_for_load_balancer_status(loadbalancer['id'])
|
|
pool = self.pools_admin_client.create_pool(
|
|
listener_id=listener['id'],
|
|
lb_algorithm='ROUND_ROBIN', protocol='HTTP')['pool']
|
|
self.load_balancers_admin_client.\
|
|
wait_for_load_balancer_status(loadbalancer['id'])
|
|
self.addCleanup(
|
|
self.pools_admin_client.delete_pool,
|
|
pool['id'])
|
|
healthmonitor = (
|
|
self.health_monitors_admin_client.create_health_monitor(
|
|
pool_id=pool['id'], type='PING',
|
|
delay='5', max_retries='3',
|
|
timeout='5'))['healthmonitor']
|
|
self.addCleanup(
|
|
self.health_monitors_admin_client.delete_health_monitor,
|
|
healthmonitor['id'])
|
|
self.load_balancers_admin_client.\
|
|
wait_for_load_balancer_status(loadbalancer['id'])
|
|
member = self.members_admin_client.create_member(
|
|
pool['id'], subnet_id=subnet_lbaas['id'],
|
|
address="127.0.0.1",
|
|
protocol_port=80)['member']
|
|
self.addCleanup(
|
|
self.members_admin_client.delete_member, pool['id'],
|
|
member['id'])
|
|
self.load_balancers_admin_client.\
|
|
wait_for_load_balancer_status(loadbalancer['id'])
|
|
nsx_router1 = self.nsx.get_logical_router(router_state_1['name'],
|
|
router_state_1['id'])
|
|
result = self.check_fd_of_router(nsx_router1)
|
|
self.assertNotEqual(result['passive_present_fd'],
|
|
result['active_present_fd'])
|
|
|
|
@decorators.idempotent_id('1206349c-02cc-9005-c317-09956daa46c3')
|
|
def test_sr_anti_affinity_traffic(self):
|
|
"""
|
|
Create router, check it should get deployed
|
|
in failure domains edges.i Active instance in one
|
|
failure domain and Passive instance in one
|
|
failure domain.
|
|
Test East-West and North-South traffic
|
|
"""
|
|
rtr_name = data_utils.rand_name(name='tempest-router')
|
|
network_name = data_utils.rand_name(name='tempest-net')
|
|
subnet_name = data_utils.rand_name(name='tempest-subnet')
|
|
router_state = self.create_topology_router(
|
|
rtr_name, set_gateway=True,
|
|
routers_client=self.cmgr_adm.routers_client)
|
|
network_state = self.create_topology_network(
|
|
network_name, networks_client=self.cmgr_adm.networks_client)
|
|
self.create_topology_subnet(
|
|
subnet_name, network_state, router_id=router_state["id"],
|
|
routers_client=self.cmgr_adm.routers_client,
|
|
subnets_client=self.cmgr_adm.subnets_client)
|
|
time.sleep(constants.NSX_NETWORK_REALISE_TIMEOUT)
|
|
security_group = self._create_security_group(
|
|
security_group_rules_client=self.cmgr_adm.
|
|
security_group_rules_client,
|
|
security_groups_client=self.cmgr_adm.security_groups_client)
|
|
image_id = self.get_glance_image_id(["cirros", "esx"])
|
|
security_groups = [{'name': security_group['name']}]
|
|
self.create_topology_instance(
|
|
"state_vm_1", [network_state],
|
|
create_floating_ip=True, image_id=image_id, clients=self.cmgr_adm,
|
|
security_groups=security_groups)
|
|
self.create_topology_instance(
|
|
"state_vm_2", [network_state],
|
|
create_floating_ip=True, image_id=image_id, clients=self.cmgr_adm,
|
|
security_groups=security_groups)
|
|
nsx_router1 = self.nsx.get_logical_router(router_state['name'],
|
|
router_state['id'])
|
|
result = self.check_fd_of_router(nsx_router1)
|
|
self.assertNotEqual(result['passive_present_fd'],
|
|
result['active_present_fd'])
|
|
self.check_cross_network_connectivity(
|
|
network_state,
|
|
self.servers_details.get("state_vm_1").floating_ips[0],
|
|
self.servers_details.get("state_vm_1").server, should_connect=True)
|
|
self.check_cross_network_connectivity(
|
|
network_state,
|
|
self.servers_details.get("state_vm_2").floating_ips[0],
|
|
self.servers_details.get("state_vm_2").server, should_connect=True)
|