From abf6fc9460c94168e7c379f839feb3c1283d50ee Mon Sep 17 00:00:00 2001 From: dkumbhar Date: Thu, 26 Nov 2020 09:20:42 +0000 Subject: [PATCH] octavia api cases & othere octavia cases automation Change-Id: I6e31b7b2179c656c30a46a6f9edf8e73a8596a30 --- .../lib/feature_manager.py | 9 +- .../services/nsxv_client.py | 10 +- .../services/openstack_network_clients.py | 24 +++ .../tests/nsxv/api/test_lb_quotas.py | 100 ++++++++++++ .../tests/nsxv/api/test_provider_sec_group.py | 3 +- .../scenario/test_lbaas_round_robin_ops.py | 3 +- .../scenario/test_octavia_loadbalancers.py | 154 +++++++++++++++++- .../nsxv3/api/test_native_dhcp_negative.py | 6 +- .../tests/nsxv3/api/test_nsx_mac_learning.py | 2 +- .../nsxv3/scenario/test_router_nonat_ops.py | 46 +++--- .../test_multiple_allow_address_pair.py | 3 +- 11 files changed, 321 insertions(+), 39 deletions(-) create mode 100644 vmware_nsx_tempest_plugin/tests/nsxv/api/test_lb_quotas.py diff --git a/vmware_nsx_tempest_plugin/lib/feature_manager.py b/vmware_nsx_tempest_plugin/lib/feature_manager.py index 6e476be..66f76ae 100644 --- a/vmware_nsx_tempest_plugin/lib/feature_manager.py +++ b/vmware_nsx_tempest_plugin/lib/feature_manager.py @@ -175,7 +175,12 @@ class FeatureManager(traffic_manager.IperfManager, net_client.region, net_client.endpoint_type, **_params) - + cls.octavia_admin_quota_client = openstack_network_clients.\ + OctaviaQuotaClient(net_client.auth_provider, + net_client.service, + net_client.region, + net_client.endpoint_type, + **_params) net_client.service = 'dns' cls.zones_v2_client = openstack_network_clients.ZonesV2Client( net_client.auth_provider, @@ -825,7 +830,7 @@ class FeatureManager(traffic_manager.IperfManager, else: break else: - self.assertFalse('server_lbaas_0' in self.http_cnt, True) + self.assertNotIn('server_lbaas_0', self.http_cnt) def count_response(self, response): response = response.decode('utf-8') diff --git a/vmware_nsx_tempest_plugin/services/nsxv_client.py b/vmware_nsx_tempest_plugin/services/nsxv_client.py index 61d4345..993baac 100644 --- a/vmware_nsx_tempest_plugin/services/nsxv_client.py +++ b/vmware_nsx_tempest_plugin/services/nsxv_client.py @@ -146,6 +146,12 @@ class VSMClient(object): endpoint = "/edges/%s/clisettings" % (edge_detail['id']) self.__set_endpoint(endpoint) self.put(body=payload) + rules = [{'name': 'anyRule', "ruleType": "user", "enabled": 'true', + "action": "accept"}] + rule_payload = {"firewallRules": rules} + endpoint = "/edges/%s/firewall/config/rules" % (edge_detail['id']) + self.__set_endpoint(endpoint) + self.post(body=rule_payload) def get_all_vdn_scopes(self): """Retrieve existing network scopes""" @@ -487,9 +493,9 @@ class VSMClient(object): if hmonitor: hms_vsm = [hm['id'] for hm in lbaas_config['monitor']] if 'hm' in cleanup: - self.assertFalse(hmonitor['id'] in hms_vsm) + self.assertNotIn(hmonitor['id'], hms_vsm) else: - self.assertTrue(hmonitor['id'] in hms_vsm) + self.assertIn(hmonitor['id'], hms_vsm) if pool: pool_vsm = \ [(p['name'], p['algorithm']) for p in lbaas_config['pool']] diff --git a/vmware_nsx_tempest_plugin/services/openstack_network_clients.py b/vmware_nsx_tempest_plugin/services/openstack_network_clients.py index 8beb839..537b925 100644 --- a/vmware_nsx_tempest_plugin/services/openstack_network_clients.py +++ b/vmware_nsx_tempest_plugin/services/openstack_network_clients.py @@ -630,6 +630,30 @@ class ContainerClient(rest_client.RestClient): return body +class OctaviaQuotaClient(base.BaseNetworkClient): + """ + The Client takes care of + listing quota, + set/unset quota, + listing default quota + """ + def list_project_quota(self, project_id): + uri = '/lbaas/quotas/' + project_id + return self.list_resources(uri) + + def set_project_quota(self, project_id, **kwargs): + uri = '/lbaas/quotas/' + project_id + return self.update_resource(uri, kwargs, expect_response_code=202) + + def delete_project_quota(self, project_id): + uri = '/lbaas/quotas/' + project_id + return self.delete_resource(uri, expect_response_code=202) + + def list_default_quota(self): + uri = '/lbaas/quotas/defaults' + return self.list_resources(uri) + + class OctaviaLB_Client(base.BaseNetworkClient): """ The Client takes care of diff --git a/vmware_nsx_tempest_plugin/tests/nsxv/api/test_lb_quotas.py b/vmware_nsx_tempest_plugin/tests/nsxv/api/test_lb_quotas.py new file mode 100644 index 0000000..3411619 --- /dev/null +++ b/vmware_nsx_tempest_plugin/tests/nsxv/api/test_lb_quotas.py @@ -0,0 +1,100 @@ +# Copyright 2019 VMware Inc +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from tempest import config +from tempest.lib import decorators +from tempest import test + +from vmware_nsx_tempest_plugin.common import constants +from vmware_nsx_tempest_plugin.lib import feature_manager + +LOG = constants.log.getLogger(__name__) +CONF = config.CONF + + +class OctaviaQuota(feature_manager.FeatureManager): + + """Base class to support LBaaS ROUND-ROBIN test. + + It provides the methods to create loadbalancer network, and + start web servers. + + Default lb_algorithm is ROUND_ROBIND. + """ + @classmethod + def setup_clients(cls): + super(OctaviaQuota, cls).setup_clients() + cls.cmgr_adm = cls.get_client_manager('admin') + cls.cmgr_alt = cls.get_client_manager('alt') + cls.cmgr_adm = cls.get_client_manager('admin') + + @classmethod + def skip_checks(cls): + super(OctaviaQuota, cls).skip_checks() + cfg = CONF.network + if not test.is_extension_enabled('lbaasv2', 'network'): + msg = 'lbaasv2 extension is not enabled.' + raise cls.skipException(msg) + if not (cfg.project_networks_reachable or cfg.public_network_id): + msg = ('Either project_networks_reachable must be "true", or ' + 'public_network_id must be defined.') + raise cls.skipException(msg) + + @classmethod + def setup_credentials(cls): + # Ask framework to not create network resources for these tests. + cls.set_network_resources() + super(OctaviaQuota, cls).setup_credentials() + + @decorators.idempotent_id('c3ac8546-6867-4b7a-8544-3843a11b1a34') + def test_list_default_quota(self): + quotas = self.octavia_admin_quota_client.\ + list_default_quota()['quota'] + for quota in quotas: + msg = quota + '\'s quota is not reset to -1' + self.assertTrue(quotas[quota] == -1, msg) + + @decorators.idempotent_id('c4ac8546-6867-4b7a-8544-3843a11b1a34') + def test_show_quota_and_set_quota_and_show_quota_to_verify(self): + project_id = CONF.auth.admin_tenant_id + quotas = self.octavia_admin_quota_client.\ + list_project_quota(project_id)['quota'] + # Update lb quota for project with increasing quota by 5 + kwargs = {} + kwargs['quota'] = { + "loadbalancer": quotas['load_balancer'] + 5, + "listener": quotas['listener'] + 5, + "member": quotas['member'] + 5, + "pool": quotas['pool'] + 5, + "healthmonitor": quotas['health_monitor'] + 5, + } + self.octavia_admin_quota_client.\ + set_project_quota(project_id, **kwargs)['quota'] + updated_quota = self.octavia_admin_quota_client.\ + list_project_quota(project_id)['quota'] + self.assertTrue(updated_quota['load_balancer'] == + (quotas['load_balancer'] + 5)) + self.assertTrue(updated_quota['listener'] == + (quotas['listener'] + 5)) + + @decorators.idempotent_id('c5ac8546-6867-4b7a-8544-3843a11b1a34') + def test_delete_quota_and_show_quota_to_verify(self): + project_id = CONF.auth.admin_tenant_id + self.octavia_admin_quota_client.\ + delete_project_quota(project_id) + updated_quota = self.octavia_admin_quota_client.\ + list_project_quota(project_id)['quota'] + for quota in updated_quota: + msg = quota + '\'s quota is not reset to -1' + self.assertTrue(updated_quota[quota] == -1, msg) diff --git a/vmware_nsx_tempest_plugin/tests/nsxv/api/test_provider_sec_group.py b/vmware_nsx_tempest_plugin/tests/nsxv/api/test_provider_sec_group.py index 5319c41..8cff07a 100644 --- a/vmware_nsx_tempest_plugin/tests/nsxv/api/test_provider_sec_group.py +++ b/vmware_nsx_tempest_plugin/tests/nsxv/api/test_provider_sec_group.py @@ -272,7 +272,8 @@ class ProviderSecGroup(base.BaseAdminNetworkTest): show_sec_group = sg_client.show_security_group(sg_id) rules_list = show_sec_group['security_group']['security_group_rules'] rules_id_list = [rule['id'] for rule in rules_list] - self.assertTrue(sg_rule1_id in rules_id_list) + sg_id_present = sg_rule1_id in rules_id_list + self.assertTrue(sg_id_present) @decorators.attr(type='nsxv') @decorators.idempotent_id('edd94f8c-53b7-4286-9350-0ddc0af3213b') diff --git a/vmware_nsx_tempest_plugin/tests/nsxv/scenario/test_lbaas_round_robin_ops.py b/vmware_nsx_tempest_plugin/tests/nsxv/scenario/test_lbaas_round_robin_ops.py index 9192111..9252bad 100644 --- a/vmware_nsx_tempest_plugin/tests/nsxv/scenario/test_lbaas_round_robin_ops.py +++ b/vmware_nsx_tempest_plugin/tests/nsxv/scenario/test_lbaas_round_robin_ops.py @@ -414,7 +414,8 @@ class LBaasRoundRobinBaseTest(dmgr.TopoDeployScenarioManager): 'Admin!23Admin') cmd = 'show flowtable topN 20 ' output = ssh_client.exec_command(cmd) - self.assertTrue(vip in output) + vip_in_output = vip in output + self.assertTrue(vip_in_output) class TestLBaasRoundRobinOps(LBaasRoundRobinBaseTest): diff --git a/vmware_nsx_tempest_plugin/tests/nsxv/scenario/test_octavia_loadbalancers.py b/vmware_nsx_tempest_plugin/tests/nsxv/scenario/test_octavia_loadbalancers.py index e558454..620932e 100644 --- a/vmware_nsx_tempest_plugin/tests/nsxv/scenario/test_octavia_loadbalancers.py +++ b/vmware_nsx_tempest_plugin/tests/nsxv/scenario/test_octavia_loadbalancers.py @@ -14,6 +14,7 @@ # under the License. import re +from tempest.common.utils.linux import remote_client from tempest import config from tempest.lib.common.utils import data_utils from tempest.lib.common.utils import test_utils @@ -102,6 +103,29 @@ class OctaviaRoundRobin(feature_manager.FeatureManager): LOG.debug("tearDown lbaas exiting...") super(OctaviaRoundRobin, self).tearDown() + def start_netcat_session(self, client_ip, server_ip, protocol_port=1212): + private_key = self.keypair['private_key'] + ssh_client1 = self.get_remote_client(server_ip, + private_key=private_key) + vip = self.loadbalancer['vip_address'] + cmd = ('nc -l -p %s &' % (protocol_port)) + ssh_client1.exec_command(cmd) + ssh_client2 = self.get_remote_client(client_ip, + private_key=private_key) + cmd = ('nc %s %s &' % (vip, protocol_port)) + ssh_client2.exec_command(cmd) + + def verify_sessioin_edge(self, vip, router): + router_info = \ + router['external_gateway_info']['external_fixed_ips'] + router_ip = \ + router_info[0]['ip_address'] + ssh_client = remote_client.RemoteClient(router_ip, 'admin', + 'Admin!23Admin') + cmd = 'show flowtable topN 20 ' + output = ssh_client.exec_command(cmd) + self.assertIn(vip, output) + def _assign_floating_ip_to_vip(self): vip_port = self.loadbalancer['vip_port_id'] sg_id = self.sg['id'] @@ -417,17 +441,20 @@ class OctaviaRoundRobin(feature_manager.FeatureManager): self.cmgr_adm.routers_client.remove_router_interface, router_lbaas['router']['id'], subnet_id=subnet_lbaas2['subnet']['id']) + self.keypair = self.create_keypair(self.cmgr_adm.keypairs_client) for instance in range(0, no_of_servers): self.create_topology_instance( "server_lbaas1_%s" % instance, [network_lbaas_1], security_groups=[{'name': self.sg['name']}], - image_id=image_id, clients=self.cmgr_adm) + image_id=image_id, clients=self.cmgr_adm, + keypair=self.keypair) self.topology_servers1 = self.topology_servers for instance in range(0, no_of_servers): self.create_topology_instance( "server_lbaas2_%s" % instance, [network_lbaas_2], security_groups=[{'name': self.sg['name']}], - image_id=image_id, clients=self.cmgr_adm) + image_id=image_id, clients=self.cmgr_adm, + keypair=self.keypair) self.topology_servers2 = self.topology_servers return dict(router=router_lbaas, subnet1=subnet_lbaas1, subnet2=subnet_lbaas2, network1=network_lbaas_1, @@ -1218,7 +1245,7 @@ class OctaviaRoundRobin(feature_manager.FeatureManager): lbs = self.octavia_admin_client.\ list_octavia_load_balancers()['loadbalancers'] lb_names = [lb['name'] for lb in lbs] - self.assertFalse(lb_name in lb_names) + self.assertNotIn(lb_name, lb_names) @decorators.attr(type='nsxv') @decorators.idempotent_id('ca5c4368-6770-4a7b-8704-3844b11b1b66') @@ -1241,7 +1268,39 @@ class OctaviaRoundRobin(feature_manager.FeatureManager): lbs = self.octavia_admin_client.\ list_octavia_load_balancers()['loadbalancers'] lb_names = [lb['name'] for lb in lbs] - self.assertFalse(self.loadbalancer['name'] in lb_names) + self.assertNotIn(self.loadbalancer['name'], lb_names) + + @decorators.attr(type='nsxv') + @decorators.idempotent_id('ca5d4368-6770-4a7b-8704-3845b11b1b66') + def test_delete_lb_with_cascade_when_pool_without_attaching_listener(self): + diction = self.deploy_octavia_topology() + subnet_id = diction['subnet']['subnet']['id'] + self.create_project_octavia(protocol_type="TCP", protocol_port="1212", + lb_algorithm="LEAST_CONNECTIONS", + hm_type='PING', vip_subnet_id=subnet_id, + default_pool=True, + timeout=self.hm_timeout, clean_up=False, + max_retries=self.hm_max_retries, + delay=self.hm_delay) + lb_id = self.loadbalancer['id'] + self.octavia_admin_pools_client.\ + delete_octavia_pool(self.pool['pool']['id']) + self.octavia_admin_client.wait_for_load_balancer_status(lb_id) + self.pool = self.octavia_admin_pools_client.\ + create_octavia_pool(loadbalancer_id=lb_id, + lb_algorithm='ROUND_ROBIN', + protocol='TCP', + name='NewPool', + session_persistence=None)['pool'] + self.octavia_admin_client.wait_for_load_balancer_status(lb_id) + self.octavia_admin_client.\ + delete_octavia_load_balancer_with_cascade(lb_id) + self.octavia_admin_client.\ + wait_for_load_balancer_status(lb_id, is_delete_op=True) + lbs = self.octavia_admin_client.\ + list_octavia_load_balancers()['loadbalancers'] + lb_names = [lb['name'] for lb in lbs] + self.assertNotIn(self.loadbalancer['name'], lb_names) @decorators.attr(type='nsxv') @decorators.idempotent_id('ca5c4368-6770-4a7b-8704-3844b11b1b66') @@ -1294,7 +1353,7 @@ class OctaviaRoundRobin(feature_manager.FeatureManager): lbs = self.octavia_admin_client.\ list_octavia_load_balancers()['loadbalancers'] lb_names = [lb['name'] for lb in lbs] - self.assertFalse(self.loadbalancer['name'] in lb_names) + self.assertNotIn(self.loadbalancer['name'], lb_names) @decorators.attr(type='nsxv') @decorators.idempotent_id('ca5c4368-6770-4a7b-8704-3844b11b1b66') @@ -1342,7 +1401,7 @@ class OctaviaRoundRobin(feature_manager.FeatureManager): lbs = self.octavia_admin_client.\ list_octavia_load_balancers()['loadbalancers'] lb_names = [lb['name'] for lb in lbs] - self.assertFalse(self.loadbalancer['name'] in lb_names) + self.assertNotIn(self.loadbalancer['name'], lb_names) @decorators.attr(type='nsxv') @decorators.idempotent_id('ca6c4368-6770-4a7b-8704-3844b11b1b61') @@ -1402,7 +1461,7 @@ class OctaviaRoundRobin(feature_manager.FeatureManager): lbs = self.octavia_admin_client.\ list_octavia_load_balancers()['loadbalancers'] lb_names = [lb['name'] for lb in lbs] - self.assertFalse(lb_name in lb_names) + self.assertNotIn(lb_name, lb_names) @decorators.attr(type='nsxv') @decorators.idempotent_id('ca5c4368-6770-4a7b-8704-3844b11b1b66') @@ -1518,7 +1577,84 @@ class OctaviaRoundRobin(feature_manager.FeatureManager): lbs = self.octavia_admin_client.\ list_octavia_load_balancers()['loadbalancers'] lb_names = [lb['name'] for lb in lbs] - self.assertFalse(self.loadbalancer['name'] in lb_names) + self.assertNotIn(self.loadbalancer['name'], lb_names) + + @decorators.attr(type='nsxv') + @decorators.idempotent_id('ca5c4368-6770-4a7b-8704-3844b11c1b66') + def test_delete_second_lb_verify_tcp_connection_with_first_lb(self): + diction = \ + self.deploy_octavia_topology_with_multi_network(no_of_servers=1) + subnet_id = diction['subnet1']['subnet']['id'] + router = diction['router']['router'] + # Create first lb + lb_name = data_utils.rand_name(self.namestart) + self.loadbalancer = self.octavia_admin_client.\ + create_octavia_load_balancer(name=lb_name, + vip_subnet_id=subnet_id, + admin_state_up=True)['loadbalancer'] + lb_id = self.loadbalancer['id'] + self.octavia_admin_client.wait_for_load_balancer_status(lb_id) + self.listener = self.octavia_admin_listener_client.\ + create_octavia_listener(loadbalancer_id=lb_id, + protocol='TCP', + protocol_port='1212', + allowed_cidrs=None, + name=lb_name)['listener'] + self.octavia_admin_client.wait_for_load_balancer_status(lb_id) + self.pool = self.octavia_admin_pools_client.\ + create_octavia_pool(listener_id=self.listener['id'], + lb_algorithm='ROUND_ROBIN', + protocol='TCP', + name=lb_name, + session_persistence=None)['pool'] + self.octavia_admin_client.wait_for_load_balancer_status(lb_id) + pool_id = self.pool['id'] + for s in self.topology_servers.keys(): + fip_data = self.servers_details[s].floating_ips[0] + fixed_ip_address = fip_data['fixed_ip_address'] + servers = list(self.topology_servers.keys()) + # Adding one VM as member + if servers.index(s) == 0: + self.octavia_admin_members_client.\ + create_octavia_member(pool_id, + subnet_id=subnet_id, + address=fixed_ip_address, + protocol_port='1212', + weight=1)['member'] + server1_fip = fip_data['floating_ip_address'] + else: + client1_fip = fip_data['floating_ip_address'] + self.octavia_admin_client.wait_for_load_balancer_status(lb_id) + self.healthmonitor = self.octavia_hm_client.\ + create_octavia_hm(pool_id=pool_id, type='PING', delay=2, + timeout=10, max_retries=5, + name=lb_name)['healthmonitor'] + self.octavia_admin_client.wait_for_load_balancer_status(lb_id) + # LB creation is done, start netcat session and verify on edge + self.start_netcat_session(client1_fip, server1_fip, + protocol_port=1212) + vip = self.loadbalancer['vip_address'] + self.vsm.enable_ssh_on_edge(router['name'], router['id']) + self.verify_session_edge(vip, router) + lb2_name = data_utils.rand_name(self.namestart) + self.loadbalancer_2 = self.octavia_admin_client.\ + create_octavia_load_balancer(name=lb2_name, + vip_subnet_id=subnet_id, + admin_state_up=True)['loadbalancer'] + lb2_id = self.loadbalancer_2['id'] + self.octavia_admin_client.wait_for_load_balancer_status(lb2_id) + self.octavia_admin_client.\ + delete_octavia_load_balancer_with_cascade(lb2_id) + self.verify_session_edge(vip, router) + self.octavia_admin_client.\ + delete_octavia_load_balancer_with_cascade(lb_id) + self.octavia_admin_client.\ + wait_for_load_balancer_status(lb_id, + is_delete_op=True) + lbs = self.octavia_admin_client.\ + list_octavia_load_balancers()['loadbalancers'] + lb_names = [lb['name'] for lb in lbs] + self.assertNotIn(self.loadbalancer['name'], lb_names) @decorators.attr(type='nsxv') @decorators.idempotent_id('ca5c4368-6770-4a7b-8704-3844b11b1b66') @@ -1681,7 +1817,7 @@ class OctaviaRoundRobin(feature_manager.FeatureManager): lbs = self.octavia_admin_client.\ list_octavia_load_balancers()['loadbalancers'] lb_names = [lb['name'] for lb in lbs] - self.assertFalse(lb_name in lb_names) + self.assertNotIn(lb_name, lb_names) @decorators.attr(type='nsxv') @decorators.idempotent_id('c5ac8546-6867-4b7a-8544-3843a11b1a34') diff --git a/vmware_nsx_tempest_plugin/tests/nsxv3/api/test_native_dhcp_negative.py b/vmware_nsx_tempest_plugin/tests/nsxv3/api/test_native_dhcp_negative.py index e7c5028..1a928cf 100644 --- a/vmware_nsx_tempest_plugin/tests/nsxv3/api/test_native_dhcp_negative.py +++ b/vmware_nsx_tempest_plugin/tests/nsxv3/api/test_native_dhcp_negative.py @@ -72,7 +72,7 @@ class NSXv3NativeDHCPNegative(base.BaseNetworkTest): nsx_network = self.nsxp.get_logical_switch(network['name'], network['id']) self.assertIsNotNone(nsx_network) - self.assertEqual('subnets' in nsx_network, False) + self.assertNotIn('subnets', nsx_network) @decorators.attr(type='nsxv3') @decorators.attr(type=['negative']) @@ -96,7 +96,7 @@ class NSXv3NativeDHCPNegative(base.BaseNetworkTest): nsx_network = self.nsxp.get_logical_switch(network['name'], network['id']) self.assertIsNotNone(nsx_network) - self.assertEqual('subnets' in nsx_network, False) + self.assertNotIn('subnets', nsx_network) @decorators.attr(type='nsxv3') @decorators.attr(type=['negative']) @@ -120,7 +120,7 @@ class NSXv3NativeDHCPNegative(base.BaseNetworkTest): nsx_network = self.nsxp.get_logical_switch(network['name'], network['id']) self.assertIsNotNone(nsx_network) - self.assertEqual('subnets' in nsx_network, True) + self.assertIn('subnets', nsx_network) # Update subnet to disable DHCP self.subnets_client.update_subnet(subnet['id'], enable_dhcp=False) if CONF.network.backend != 'nsxp': diff --git a/vmware_nsx_tempest_plugin/tests/nsxv3/api/test_nsx_mac_learning.py b/vmware_nsx_tempest_plugin/tests/nsxv3/api/test_nsx_mac_learning.py index 33fb5ae..229c68f 100644 --- a/vmware_nsx_tempest_plugin/tests/nsxv3/api/test_nsx_mac_learning.py +++ b/vmware_nsx_tempest_plugin/tests/nsxv3/api/test_nsx_mac_learning.py @@ -104,7 +104,7 @@ class NSXv3MacLearningTest(base.BaseNetworkTest): body = self.ports_client.list_ports() ports_list = body['ports'] if len(ports_list) > 0: - self.assertFalse(port_id in [n['id'] for n in ports_list], + self.assertNotIn(port_id, [n['id'] for n in ports_list], "Deleted port still present in ports list") def _conv_switch_prof_to_dict(self, switch_profiles): diff --git a/vmware_nsx_tempest_plugin/tests/nsxv3/scenario/test_router_nonat_ops.py b/vmware_nsx_tempest_plugin/tests/nsxv3/scenario/test_router_nonat_ops.py index 944d829..efad422 100644 --- a/vmware_nsx_tempest_plugin/tests/nsxv3/scenario/test_router_nonat_ops.py +++ b/vmware_nsx_tempest_plugin/tests/nsxv3/scenario/test_router_nonat_ops.py @@ -301,12 +301,14 @@ class TestRouterNoNATOps(manager.NetworkScenarioTest): else: self.assertTrue(len(nat_rules) == 3) if CONF.network.backend == 'nsxp': + route_adv_type_nat = 'TIER1_NAT' in nsx_router_policy[ + 'route_advertisement_types'] self.assertTrue( - 'TIER1_NAT' in nsx_router_policy['route_advertisement_types'], + route_adv_type_nat, nat_msg) - self.assertFalse( - 'TIER1_CONNECTED' in nsx_router_policy[ - 'route_advertisement_types'], adv_msg) + route_adv_type_conn = 'TIER1_CONNECTED' in nsx_router_policy[ + 'route_advertisement_types'] + self.assertFalse(route_adv_type_conn, adv_msg) else: self.assertTrue(router_adv['advertise_nat_routes'], nat_msg) self.assertFalse( @@ -349,12 +351,14 @@ class TestRouterNoNATOps(manager.NetworkScenarioTest): else: self.assertTrue(len(nat_rules) == 0) if CONF.network.backend == 'nsxp': - self.assertFalse( - 'TIER1_NAT' in nsx_router_policy[ - 'route_advertisement_types'], nat_msg) + route_adv_type_nat = 'TIER1_NAT' in nsx_router_policy[ + 'route_advertisement_types'] self.assertTrue( - 'TIER1_CONNECTED' in nsx_router_policy[ - 'route_advertisement_types'], adv_msg) + route_adv_type_nat, + nat_msg) + route_adv_type_conn = 'TIER1_CONNECTED' in nsx_router_policy[ + 'route_advertisement_types'] + self.assertFalse(route_adv_type_conn, adv_msg) else: self.assertFalse(router_adv['advertise_nat_routes'], nat_msg) self.assertTrue( @@ -390,12 +394,14 @@ class TestRouterNoNATOps(manager.NetworkScenarioTest): else: self.assertTrue(len(nat_rules) == 0) if CONF.network.backend == 'nsxp': - self.assertFalse( - 'TIER1_NAT' in nsx_router_policy[ - 'route_advertisement_types'], nat_msg) + route_adv_type_nat = 'TIER1_NAT' in nsx_router_policy[ + 'route_advertisement_types'] self.assertTrue( - 'TIER1_CONNECTED' in nsx_router_policy[ - 'route_advertisement_types'], adv_msg) + route_adv_type_nat, + nat_msg) + route_adv_type_conn = 'TIER1_CONNECTED' in nsx_router_policy[ + 'route_advertisement_types'] + self.assertFalse(route_adv_type_conn, adv_msg) else: self.assertFalse(router_adv['advertise_nat_routes'], nat_msg) self.assertTrue( @@ -432,12 +438,14 @@ class TestRouterNoNATOps(manager.NetworkScenarioTest): else: self.assertTrue(len(nat_rules) == 3) if CONF.network.backend == 'nsxp': + route_adv_type_nat = 'TIER1_NAT' in nsx_router_policy[ + 'route_advertisement_types'] self.assertTrue( - 'TIER1_NAT' in nsx_router_policy[ - 'route_advertisement_types'], nat_msg) - self.assertFalse( - 'TIER1_CONNECTED' in nsx_router_policy[ - 'route_advertisement_types'], adv_msg) + route_adv_type_nat, + nat_msg) + route_adv_type_conn = 'TIER1_CONNECTED' in nsx_router_policy[ + 'route_advertisement_types'] + self.assertFalse(route_adv_type_conn, adv_msg) else: self.assertTrue(router_adv['advertise_nat_routes'], nat_msg) self.assertFalse( diff --git a/vmware_nsx_tempest_plugin/tests/scenario/test_multiple_allow_address_pair.py b/vmware_nsx_tempest_plugin/tests/scenario/test_multiple_allow_address_pair.py index 87121f9..7757a65 100644 --- a/vmware_nsx_tempest_plugin/tests/scenario/test_multiple_allow_address_pair.py +++ b/vmware_nsx_tempest_plugin/tests/scenario/test_multiple_allow_address_pair.py @@ -89,7 +89,8 @@ class MultipleAllowAddress(feature_manager.FeatureManager): ipList = [ip_mac['ip_address'] for ip_mac in port_details] msg = ipaddressORcidr + 'is not in allowed address' - self.assertTrue(ipaddressORcidr in ipList, msg) + ip_or_cidr_in_list = ipaddressORcidr in ipList + self.assertTrue(ip_or_cidr_in_list, msg) @decorators.attr(type='negative') @decorators.attr(type='nsxv')