From 7b33dfb2a8c2477171d5bd6c08b5a5f2f69831b4 Mon Sep 17 00:00:00 2001 From: Shubham Kadam Date: Wed, 6 Jan 2021 09:02:07 +0000 Subject: [PATCH] Allowed CIDR feature P1 case automation Change-Id: I7ba04f57eb38cc198fdc636d64b476e1b6c065a7 --- .../lib/appliance_manager.py | 122 ++++- .../lib/feature_manager.py | 8 +- .../lib/traffic_manager.py | 10 +- .../scenario/test_allowed_addres_cidr.py | 474 +++++++++++++----- 4 files changed, 464 insertions(+), 150 deletions(-) diff --git a/vmware_nsx_tempest_plugin/lib/appliance_manager.py b/vmware_nsx_tempest_plugin/lib/appliance_manager.py index 7d4b6fa..c75cb9c 100644 --- a/vmware_nsx_tempest_plugin/lib/appliance_manager.py +++ b/vmware_nsx_tempest_plugin/lib/appliance_manager.py @@ -198,7 +198,8 @@ class ApplianceManager(manager.NetworkScenarioTest): def create_topology_network( self, network_name, networks_client=None, - tenant_id=None, net_name_enhance=True, **kwargs): + tenant_id=None, net_name_enhance=True, + clean_up=True, **kwargs): if not networks_client: networks_client = self.networks_client if not tenant_id: @@ -219,8 +220,9 @@ class ApplianceManager(manager.NetworkScenarioTest): name=name, tenant_id=tenant_id, **kwargs) network = result['network'] self.assertEqual(network['name'], name) - self.addCleanup(test_utils.call_and_ignore_notfound_exc, - networks_client.delete_network, network['id']) + if clean_up: + self.addCleanup(test_utils.call_and_ignore_notfound_exc, + networks_client.delete_network, network['id']) self.topology_networks[network_name] = network return network @@ -275,7 +277,7 @@ class ApplianceManager(manager.NetworkScenarioTest): def create_topology_subnet( self, subnet_name, network, routers_client=None, subnets_client=None, router_id=None, ip_version=4, cidr=None, - mask_bits=None, **kwargs): + mask_bits=None, clean_up=True, **kwargs): subnet_name_ = constants.APPLIANCE_NAME_STARTS_WITH + subnet_name if not subnets_client: subnets_client = self.subnets_client @@ -327,18 +329,20 @@ class ApplianceManager(manager.NetworkScenarioTest): self.assertIsNotNone(result, 'Unable to allocate tenant network') subnet = result['subnet'] self.assertEqual(subnet['cidr'], str_cidr) - self.addCleanup(test_utils.call_and_ignore_notfound_exc, - subnets_client.delete_subnet, subnet['id']) + if clean_up: + self.addCleanup(test_utils.call_and_ignore_notfound_exc, + subnets_client.delete_subnet, subnet['id']) self.topology_subnets[subnet_name] = subnet if router_id: if not routers_client: routers_client = self.routers_client routers_client.add_router_interface( router_id, subnet_id=subnet["id"]) - self.addCleanup( - test_utils.call_and_ignore_notfound_exc, - routers_client.remove_router_interface, router_id, - subnet_id=subnet["id"]) + if clean_up: + self.addCleanup( + test_utils.call_and_ignore_notfound_exc, + routers_client.remove_router_interface, router_id, + subnet_id=subnet["id"]) return subnet def create_topology_security_provider_group( @@ -663,6 +667,74 @@ class ApplianceManager(manager.NetworkScenarioTest): subnetpool_id) return body + def _create_ipv6_subnet(self, network, cidr, ipv6_ra_mode=None, + ipv6_address_mode=None, + router_id=None, slaac=False, enable_dhcp=False): + """ + Create ipv6 subnet. With slaac and dhcp-stateless mode + """ + subnet_client = self.cmgr_adm.subnets_client + subnet_name = network['name'] + 'sub' + if slaac: + subnet = self.create_topology_subnet( + subnet_name, network, + subnets_client=subnet_client, + ip_version=6, ipv6_ra_mode='slaac', + ipv6_address_mode='slaac', + cidr=cidr, router_id=router_id) + else: + subnet = self.create_topology_subnet( + subnet_name, network, + subnets_client=subnet_client, + ipv6_address_mode=ipv6_address_mode, + ipv6_ra_mode=ipv6_ra_mode, + ip_version=6, enable_dhcp=True, + cidr=cidr, router_id=router_id) + return subnet + + def create_topo_single_dhcpv6_network( + self, namestart, ipv6_address_mode, + ipv6_ra_mode, create_instance=True, + set_gateway=True, slaac=False, + ipv6cidr=None, cidr=None, + security_groups=None, **kwargs): + """ + Create Topo where 1 Ipv6 logical switches which is + connected via tier-1 router. + """ + rtr_name = data_utils.rand_name(name='tempest-router') + network_name = data_utils.rand_name(name='tempest-net') + subnet_name = data_utils.rand_name(name='tempest-subnet') + router_state = self.create_topology_router(rtr_name, + set_gateway=set_gateway, + **kwargs) + network_state = self.create_topology_network(network_name) + subnet_state = self.create_topology_subnet( + subnet_name, network_state, + router_id=router_state["id"], cidr=cidr) + subnet_v6 = self._create_ipv6_subnet( + network_state, cidr=ipv6cidr, slaac=slaac, + ipv6_address_mode=ipv6_address_mode, + ipv6_ra_mode=ipv6_ra_mode, + router_id=router_state["id"]) + if create_instance: + image_id = self.get_glance_image_id(["cirros", "esx"]) + self.create_topology_instance( + "state_vm_1", [network_state], + create_floating_ip=True, image_id=image_id, + clients=self.cmgr_adm, + security_groups=security_groups) + self.create_topology_instance( + "state_vm_2", [network_state], + create_floating_ip=True, image_id=image_id, + clients=self.cmgr_adm, + security_groups=security_groups) + topology_dict = dict(router_state=router_state, + network_state=network_state, + subnet_state=subnet_state, + subnet_v6=subnet_v6) + return topology_dict + def _create_topo_single_network(self, namestart, create_instance=True, set_gateway=True, cidr=None, clean_up=True, **kwargs): @@ -670,26 +742,32 @@ class ApplianceManager(manager.NetworkScenarioTest): Create Topo where 1 logical switches which is connected via tier-1 router. """ - rtr_name = data_utils.rand_name(name='tempest-uni-router') - network_name = data_utils.rand_name(name='tempest-uni-net') - subnet_name = data_utils.rand_name(name='tempest-uni-subnet') - router_state = self.create_topology_router(rtr_name, - set_gateway=set_gateway, - clean_up=clean_up, - **kwargs) - network_state = self.create_topology_network(network_name, - clean_up=clean_up) + rtr_name = data_utils.rand_name(name=namestart) + network_name = data_utils.rand_name(name=namestart) + subnet_name = data_utils.rand_name(name=namestart) + router_state = self.create_topology_router( + rtr_name, set_gateway=set_gateway, + clean_up=clean_up, + routers_client=self.cmgr_adm.routers_client, + **kwargs) + network_state = self.create_topology_network( + network_name, clean_up=clean_up, + networks_client=self.cmgr_adm.networks_client) subnet_state = self.create_topology_subnet( subnet_name, network_state, router_id=router_state["id"], - cidr=cidr, clean_up=clean_up) + cidr=cidr, clean_up=clean_up, + routers_client=self.cmgr_adm.routers_client, + subnets_client=self.cmgr_adm.subnets_client) if create_instance: image_id = self.get_glance_image_id(["cirros", "esx"]) self.create_topology_instance( "state_vm_1", [network_state], - create_floating_ip=True, image_id=image_id, clean_up=clean_up) + create_floating_ip=True, image_id=image_id, + clients=self.cmgr_adm, clean_up=clean_up) self.create_topology_instance( "state_vm_2", [network_state], - create_floating_ip=True, image_id=image_id, clean_up=clean_up) + create_floating_ip=True, image_id=image_id, + clients=self.cmgr_adm, clean_up=clean_up) topology_dict = dict(router_state=router_state, network_state=network_state, subnet_state=subnet_state) diff --git a/vmware_nsx_tempest_plugin/lib/feature_manager.py b/vmware_nsx_tempest_plugin/lib/feature_manager.py index 931b786..aaed44b 100644 --- a/vmware_nsx_tempest_plugin/lib/feature_manager.py +++ b/vmware_nsx_tempest_plugin/lib/feature_manager.py @@ -806,8 +806,12 @@ class FeatureManager(traffic_manager.IperfManager, i = 0 vip = self.vip_ip_address time.sleep(constants.WAIT_FOR_VIP) - self.do_http_request(vip=vip, send_counts=self.poke_counters, - no_resp=no_resp) + if HTTPS is None: + self.do_http_request(vip=vip, send_counts=self.poke_counters, + no_resp=no_resp) + else: + self.do_https_request(vip=vip, send_counts=self.poke_counters, + no_resp=no_resp) # ROUND_ROUBIN, so equal counts no_of_vms = len(self.http_cnt) if source_ip: diff --git a/vmware_nsx_tempest_plugin/lib/traffic_manager.py b/vmware_nsx_tempest_plugin/lib/traffic_manager.py index ec1f5a3..d5cd0c4 100644 --- a/vmware_nsx_tempest_plugin/lib/traffic_manager.py +++ b/vmware_nsx_tempest_plugin/lib/traffic_manager.py @@ -241,13 +241,17 @@ class TrafficManager(appliance_manager.ApplianceManager): # count_response counts the no of requests made for each members return self.http_cnt - def do_https_request(self, vip, start_path='', send_counts=None): + def do_https_request(self, vip, start_path='', send_counts=None, + no_resp=False): # http_cnt stores no of requests made for each members self.http_cnt = {} if not CONF.nsxv3.ens: + if no_resp: + send_counts = 2 for x in range(send_counts): - resp = self.query_webserver(vip, HTTPS=True) - self.count_response(resp) + resp = self.query_webserver(vip, HTTPS=True, no_resp=no_resp) + if not no_resp: + self.count_response(resp) else: for x in range(send_counts): self.http_cnt = self.query_ens(vip, HTTPS=True) diff --git a/vmware_nsx_tempest_plugin/tests/scenario/test_allowed_addres_cidr.py b/vmware_nsx_tempest_plugin/tests/scenario/test_allowed_addres_cidr.py index 556690d..432f5fc 100644 --- a/vmware_nsx_tempest_plugin/tests/scenario/test_allowed_addres_cidr.py +++ b/vmware_nsx_tempest_plugin/tests/scenario/test_allowed_addres_cidr.py @@ -19,7 +19,6 @@ import time from tempest.common.utils.linux import remote_client from tempest import config from tempest.lib.common.utils import data_utils -from tempest.lib.common.utils import test_utils from tempest.lib import decorators from tempest.lib import exceptions @@ -74,76 +73,6 @@ class TestAllowedAddresCidr(feature_manager.FeatureManager): CONF.nsxv3.nsx_user, CONF.nsxv3.nsx_password) - def _test_ping_from_external_network(self, fip_ip): - out = os.popen('ping -c 5 %s' % fip_ip).read().strip() - return out - - def _create_ipv6_subnet(self, network, cidr, ipv6_ra_mode=None, - ipv6_address_mode=None, - router_id=None, slaac=False, enable_dhcp=False): - subnet_client = self.cmgr_adm.subnets_client - subnet_name = network['name'] + 'sub' - if slaac: - subnet = self.create_topology_subnet( - subnet_name, network, - subnets_client=subnet_client, - ip_version=6, ipv6_ra_mode='slaac', - ipv6_address_mode='slaac', - cidr=cidr, router_id=router_id) - else: - subnet = self.create_topology_subnet( - subnet_name, network, - subnets_client=subnet_client, - ipv6_address_mode=ipv6_address_mode, - ipv6_ra_mode=ipv6_ra_mode, - ip_version=6, enable_dhcp=True, - cidr=cidr, router_id=router_id) - return subnet - - def create_topo_single_dhcpv6_network( - self, namestart, ipv6_address_mode, - ipv6_ra_mode, create_instance=True, - set_gateway=True, slaac=False, - ipv6cidr=None, cidr=None, - security_groups=None, **kwargs): - """ - Create Topo where 1 logical switches which is - connected via tier-1 router. - """ - rtr_name = data_utils.rand_name(name='tempest-router') - network_name = data_utils.rand_name(name='tempest-net') - subnet_name = data_utils.rand_name(name='tempest-subnet') - router_state = self.create_topology_router(rtr_name, - set_gateway=set_gateway, - **kwargs) - network_state = self.create_topology_network(network_name) - subnet_state = self.create_topology_subnet( - subnet_name, network_state, - router_id=router_state["id"], cidr=cidr) - subnet_v6 = self._create_ipv6_subnet( - network_state, cidr=ipv6cidr, slaac=slaac, - ipv6_address_mode=ipv6_address_mode, - ipv6_ra_mode=ipv6_ra_mode, - router_id=router_state["id"]) - time.sleep(constants.NSX_NETWORK_REALISE_TIMEOUT) - if create_instance: - image_id = self.get_glance_image_id(["cirros", "esx"]) - self.create_topology_instance( - "state_vm_1", [network_state], - create_floating_ip=True, image_id=image_id, - clients=self.cmgr_adm, - security_groups=security_groups) - self.create_topology_instance( - "state_vm_2", [network_state], - create_floating_ip=True, image_id=image_id, - clients=self.cmgr_adm, - security_groups=security_groups) - topology_dict = dict(router_state=router_state, - network_state=network_state, - subnet_state=subnet_state, - subnet_v6=subnet_v6) - return topology_dict - @decorators.idempotent_id('2317449c-14ca-1428-a428-09956daa46c3') def test_allowed_address_cidr_octavia_lb(self): """ @@ -151,22 +80,11 @@ class TestAllowedAddresCidr(feature_manager.FeatureManager): Verify order of NAT and Firewall. """ kwargs = {"enable_snat": True} - network_name = data_utils.rand_name(name='tempest-net') - subnet_name = data_utils.rand_name(name='tempest-subnet') - router_state = self.create_topology_router( - set_gateway=True, - routers_client=self.cmgr_adm.routers_client, **kwargs) - network_state = self.create_topology_network( - network_name, networks_client=self.cmgr_adm.networks_client) - subnet_state = self.create_topology_subnet( - subnet_name, network_state, - subnets_client=self.cmgr_adm.subnets_client) + topo_dict = self._create_topo_single_network( + 'tempest_allowed_cidr', create_instance=False, + set_gateway=True, **kwargs) + network_state = topo_dict['network_state'] time.sleep(constants.NSX_NETWORK_REALISE_TIMEOUT) - self.cmgr_adm.routers_client.add_router_interface( - router_state['id'], subnet_id=subnet_state["id"]) - self.addCleanup(test_utils.call_and_ignore_notfound_exc, - self.cmgr_adm.routers_client.remove_router_interface, - router_state['id'], subnet_id=subnet_state["id"]) sec_rule_client = self.cmgr_adm.security_group_rules_client sec_client = self.cmgr_adm.security_groups_client kwargs = dict(tenant_id=network_state['tenant_id'], @@ -235,15 +153,12 @@ class TestAllowedAddresCidr(feature_manager.FeatureManager): """ kwargs = {"enable_snat": True} mode = "dhcpv6-stateless" - network_name = data_utils.rand_name(name='tempest-net') - subnet_name = data_utils.rand_name(name='tempest-subnet') - router_name = data_utils.rand_name(name='tempest-router') - network_state1 = self.create_topology_network( + network_name = "test-net" + network = self.create_topology_network( network_name, networks_client=self.cmgr_adm.networks_client) - time.sleep(constants.NSX_NETWORK_REALISE_TIMEOUT) sec_rule_client = self.cmgr_adm.security_group_rules_client sec_client = self.cmgr_adm.security_groups_client - kwargs = dict(tenant_id=network_state1['tenant_id'], + kwargs = dict(tenant_id=network['tenant_id'], security_group_rules_client=sec_rule_client, security_groups_client=sec_client) self.sg = self._create_security_group( @@ -262,7 +177,7 @@ class TestAllowedAddresCidr(feature_manager.FeatureManager): rule, ruleclient=self.cmgr_adm.security_group_rules_client, secclient=self.cmgr_adm.security_groups_client, - tenant_id=network_state1['tenant_id']) + tenant_id=network['tenant_id']) kwargs = {"admin_state_up": "True"} security_groups = [{'name': self.sg['name']}] topology_dict = self.create_topo_single_dhcpv6_network( @@ -272,36 +187,21 @@ class TestAllowedAddresCidr(feature_manager.FeatureManager): ipv6cidr="1300::/64", cidr="13.0.0.0/24", security_groups=security_groups, **kwargs) subnetipv6_state = topology_dict['subnet_v6'] - router_state1 = self.create_topology_router( - router_name, set_gateway=True, - routers_client=self.cmgr_adm.routers_client, **kwargs) - self.create_topology_subnet( - subnet_name, network_state1, - subnets_client=self.cmgr_adm.subnets_client, - cidr="35.0.0.0/24", router_id=router_state1["id"]) - mode = "dhcpv6-stateless" - self._create_ipv6_subnet( - network_state1, cidr="3500::/64", slaac=False, - ipv6_address_mode=mode, - ipv6_ra_mode=mode, - router_id=router_state1["id"]) - network_name = data_utils.rand_name(name='tempest-net') - subnet_name = data_utils.rand_name(name='tempest-subnet') - router_name = data_utils.rand_name(name='tempest-router') - network_state2 = self.create_topology_network( - network_name, networks_client=self.cmgr_adm.networks_client) - router_state2 = self.create_topology_router( - router_name, set_gateway=True, - routers_client=self.cmgr_adm.routers_client, **kwargs) - self.create_topology_subnet( - subnet_name, network_state2, - subnets_client=self.cmgr_adm.subnets_client, - cidr="46.0.0.0/24", router_id=router_state2["id"]) - self._create_ipv6_subnet( - network_state2, cidr="4600::/64", slaac=False, - ipv6_address_mode=mode, - ipv6_ra_mode=mode, - router_id=router_state2["id"]) + topology_dict = self.create_topo_single_dhcpv6_network( + "allowed_address_cidr", create_instance=False, + set_gateway=True, + ipv6_ra_mode=mode, ipv6_address_mode=mode, + ipv6cidr="3500::/64", cidr="35.0.0.0/24", + security_groups=security_groups, **kwargs) + network_state1 = topology_dict['network_state'] + topology_dict = self.create_topo_single_dhcpv6_network( + "allowed_address_cidr", + set_gateway=True, + ipv6_ra_mode=mode, ipv6_address_mode=mode, + ipv6cidr="4600::/64", cidr="46.0.0.0/24", + security_groups=security_groups, + create_instance=False, **kwargs) + network_state2 = topology_dict['network_state'] time.sleep(constants.NSX_NETWORK_REALISE_TIMEOUT) self.start_web_servers(constants.HTTP_PORT) lb_cist = self.create_project_octavia( @@ -381,3 +281,331 @@ class TestAllowedAddresCidr(feature_manager.FeatureManager): data = ssh_client.exec_command(command) self.assertIn(data, ['state_vm_1', 'state_vm_2']) self.delete_octavia_lb_resources(lb_cist['lb_id']) + + @decorators.idempotent_id('2317449c-14ca-1428-a428-09956daa46c3') + def test_allowed_address_0_0_0_0_cidr_octavia_lb(self): + """ + Create NAT and Firewall rules on router. + Verify order of NAT and Firewall. + """ + kwargs = {"enable_snat": True} + topo_dict = self._create_topo_single_network( + 'tempest_allowed_cidr', create_instance=False, + set_gateway=True, **kwargs) + network_state = topo_dict['network_state'] + time.sleep(constants.NSX_NETWORK_REALISE_TIMEOUT) + sec_rule_client = self.cmgr_adm.security_group_rules_client + sec_client = self.cmgr_adm.security_groups_client + kwargs = dict(tenant_id=network_state['tenant_id'], + security_group_rules_client=sec_rule_client, + security_groups_client=sec_client) + self.sg = self._create_security_group( + security_group_rules_client=self.cmgr_adm. + security_group_rules_client, + security_groups_client=self.cmgr_adm.security_groups_client) + lbaas_rules = [dict(direction='ingress', protocol='tcp', + port_range_min=constants.HTTP_PORT, + port_range_max=constants.HTTP_PORT, ), + dict(direction='ingress', protocol='tcp', + port_range_min=443, port_range_max=443, ), + dict(direction='ingress', protocol='tcp', + port_range_min=constants.HTTP_PORT, + port_range_max=constants.HTTP_PORT, + ethertype='IPv6'), + dict(direction='ingress', protocol='tcp', + port_range_min=443, port_range_max=443, + ethertype='IPv6')] + for rule in lbaas_rules: + self.add_security_group_rule( + self.sg, + rule, + ruleclient=self.cmgr_adm.security_group_rules_client, + secclient=self.cmgr_adm.security_groups_client, + tenant_id=network_state['tenant_id']) + security_groups = [{'name': self.sg['name']}] + image_id = self.get_glance_image_id(["cirros", "esx"]) + self.create_topology_instance( + "state_vm_1", [network_state], + create_floating_ip=True, image_id=image_id, clients=self.cmgr_adm, + security_groups=security_groups) + self.create_topology_instance( + "state_vm_2", [network_state], + create_floating_ip=True, image_id=image_id, clients=self.cmgr_adm, + security_groups=security_groups) + self.start_web_servers(constants.HTTP_PORT) + lb_cist = self.create_project_octavia( + protocol_type="HTTP", protocol_port="80", + lb_algorithm="LEAST_CONNECTIONS", + vip_net_id=network_state['id']) + allowed_cidrs = {'allowed_cidrs': ['0.0.0.0/0']} + self.octavia_admin_listener_client.\ + update_octavia_listener(lb_cist['listener_id'], + listener_data=allowed_cidrs)['listener'] + time.sleep(constants.NSX_NETWORK_REALISE_TIMEOUT) + self.check_project_lbaas(no_resp=False) + allowed_cidrs = {'allowed_cidrs': ['0.0.0.0/24']} + self.octavia_admin_listener_client.\ + update_octavia_listener(lb_cist['listener_id'], + listener_data=allowed_cidrs)['listener'] + time.sleep(constants.NSX_NETWORK_REALISE_TIMEOUT) + self.check_project_lbaas(no_resp=True) + out = os.popen("sudo ifconfig eth1 | grep 'inet'" + "| awk {'print$2'} | " + "cut -f2 -d':'").read().strip() + allowed_cidrs = {'allowed_cidrs': ['%s/24' % out]} + self.octavia_admin_listener_client.\ + update_octavia_listener(lb_cist['listener_id'], + listener_data=allowed_cidrs)['listener'] + time.sleep(constants.NSX_NETWORK_REALISE_TIMEOUT) + self.check_project_lbaas(no_resp=False) + self.delete_octavia_lb_resources(lb_cist['lb_id']) + + @decorators.idempotent_id('2317449c-14ca-1428-a428-09956daa46c3') + def test_allowed_address_cidr_barbican_octavia_lb(self): + """ + Create NAT and Firewall rules on router. + Verify order of NAT and Firewall. + """ + kwargs = {"enable_snat": True} + topo_dict = self._create_topo_single_network( + 'tempest_allowed_cidr', create_instance=False, + set_gateway=True, **kwargs) + network_state = topo_dict['network_state'] + time.sleep(constants.NSX_NETWORK_REALISE_TIMEOUT) + sec_rule_client = self.cmgr_adm.security_group_rules_client + sec_client = self.cmgr_adm.security_groups_client + kwargs = dict(tenant_id=network_state['tenant_id'], + security_group_rules_client=sec_rule_client, + security_groups_client=sec_client) + self.sg = self._create_security_group( + security_group_rules_client=self.cmgr_adm. + security_group_rules_client, + security_groups_client=self.cmgr_adm.security_groups_client) + lbaas_rules = [dict(direction='ingress', protocol='tcp'), + dict(direction='egress', protocol='tcp'), + dict(direction='ingress', protocol='tcp', + ethertype='IPv6'), + dict(direction='egress', protocol='tcp', + ethertype='IPv6')] + for rule in lbaas_rules: + self.add_security_group_rule( + self.sg, + rule, + ruleclient=self.cmgr_adm.security_group_rules_client, + secclient=self.cmgr_adm.security_groups_client, + tenant_id=network_state['tenant_id']) + security_groups = [{'name': self.sg['name']}] + image_id = self.get_glance_image_id(["cirros", "esx"]) + self.create_topology_instance( + "state_vm_1", [network_state], + create_floating_ip=True, image_id=image_id, clients=self.cmgr_adm, + security_groups=security_groups) + self.create_topology_instance( + "state_vm_2", [network_state], + create_floating_ip=True, image_id=image_id, clients=self.cmgr_adm, + security_groups=security_groups) + self.start_web_servers(constants.HTTP_PORT) + barbican_secrets = self.create_barbican_secret_conatainer( + constants.CERT_FILE, constants.KEY_FILE) + barbican_container = barbican_secrets['secret_container'] + lb_cist = self.create_project_octavia( + protocol_type="TERMINATED_HTTPS", + protocol_port="443", + lb_algorithm="LEAST_CONNECTIONS", + vip_net_id=network_state['id'], + hm_type='HTTP', + member_count=2, + weight=5, + pool_protocol='HTTP', + pool_port='80', + barbican_container=barbican_container, + count=0, barbican=True, + delay=self.hm_delay, + max_retries=self.hm_max_retries, + timeout=self.hm_timeout) + allowed_cidrs = {'allowed_cidrs': ['0.0.0.0/0']} + self.octavia_admin_listener_client.\ + update_octavia_listener(lb_cist['listener_id'], + listener_data=allowed_cidrs)['listener'] + time.sleep(constants.NSX_NETWORK_REALISE_TIMEOUT) + self.check_project_lbaas(no_resp=False, HTTPS=True) + allowed_cidrs = {'allowed_cidrs': ['0.0.0.0/24']} + self.octavia_admin_listener_client.\ + update_octavia_listener(lb_cist['listener_id'], + listener_data=allowed_cidrs)['listener'] + time.sleep(constants.NSX_NETWORK_REALISE_TIMEOUT) + self.check_project_lbaas(no_resp=True, HTTPS=True) + out = os.popen("sudo ifconfig eth1 | grep 'inet'" + "| awk {'print$2'} | " + "cut -f2 -d':'").read().strip() + allowed_cidrs = {'allowed_cidrs': ['%s/24' % out]} + self.octavia_admin_listener_client.\ + update_octavia_listener(lb_cist['listener_id'], + listener_data=allowed_cidrs)['listener'] + time.sleep(constants.NSX_NETWORK_REALISE_TIMEOUT) + self.check_project_lbaas(no_resp=False, HTTPS=True) + allowed_cidrs = {'allowed_cidrs': []} + self.octavia_admin_listener_client.\ + update_octavia_listener(lb_cist['listener_id'], + listener_data=allowed_cidrs)['listener'] + time.sleep(constants.NSX_NETWORK_REALISE_TIMEOUT) + self.check_project_lbaas(no_resp=False, HTTPS=True) + self.delete_octavia_lb_resources(lb_cist['lb_id']) + + @decorators.idempotent_id('2317449c-14ca-1428-a428-09956daa46c3') + def test_allowed_address_ipv6_cidr_barbican_octavia_lb(self): + """ + Create NAT and Firewall rules on router. + Verify order of NAT and Firewall. + """ + kwargs = {"enable_snat": True} + mode = "dhcpv6-stateless" + network_name = data_utils.rand_name(name='tempest-net') + network_state1 = self.create_topology_network( + network_name, networks_client=self.cmgr_adm.networks_client) + time.sleep(constants.NSX_NETWORK_REALISE_TIMEOUT) + sec_rule_client = self.cmgr_adm.security_group_rules_client + sec_client = self.cmgr_adm.security_groups_client + kwargs = dict(tenant_id=network_state1['tenant_id'], + security_group_rules_client=sec_rule_client, + security_groups_client=sec_client) + self.sg = self._create_security_group( + security_group_rules_client=self.cmgr_adm. + security_group_rules_client, + security_groups_client=self.cmgr_adm.security_groups_client) + lbaas_rules = [dict(direction='ingress', protocol='tcp'), + dict(direction='ingress', protocol='tcp', + ethertype='IPv6'), + dict(direction='egress', protocol='tcp', + ethertype='IPv6'), + dict(direction='egress', protocol='tcp')] + for rule in lbaas_rules: + self.add_security_group_rule( + self.sg, + rule, + ruleclient=self.cmgr_adm.security_group_rules_client, + secclient=self.cmgr_adm.security_groups_client, + tenant_id=network_state1['tenant_id']) + kwargs = {"admin_state_up": "True"} + security_groups = [{'name': self.sg['name']}] + topology_dict = self.create_topo_single_dhcpv6_network( + "allowed_address_cidr", create_instance=True, + set_gateway=True, + ipv6_ra_mode=mode, ipv6_address_mode=mode, + ipv6cidr="1300::/64", cidr="13.0.0.0/24", + security_groups=security_groups, **kwargs) + subnetipv6_state = topology_dict['subnet_v6'] + topology_dict = self.create_topo_single_dhcpv6_network( + "allowed_address_cidr", create_instance=False, + set_gateway=True, + ipv6_ra_mode=mode, ipv6_address_mode=mode, + ipv6cidr="3500::/64", cidr="35.0.0.0/24", + security_groups=security_groups, **kwargs) + network_state1 = topology_dict['network_state'] + topology_dict = self.create_topo_single_dhcpv6_network( + "allowed_address_cidr", + set_gateway=True, + ipv6_ra_mode=mode, ipv6_address_mode=mode, + ipv6cidr="4600::/64", cidr="46.0.0.0/24", + security_groups=security_groups, + create_instance=False, **kwargs) + network_state2 = topology_dict['network_state'] + time.sleep(constants.NSX_NETWORK_REALISE_TIMEOUT) + self.start_web_servers(constants.HTTP_PORT) + barbican_secrets = self.create_barbican_secret_conatainer( + constants.CERT_FILE, constants.KEY_FILE) + barbican_container = barbican_secrets['secret_container'] + lb_cist = self.create_project_octavia( + protocol_type="TERMINATED_HTTPS", + protocol_port="443", + lb_algorithm="LEAST_CONNECTIONS", + vip_subnet_id=subnetipv6_state['id'], + hm_type='HTTP', + member_count=2, + weight=5, + pool_protocol='HTTP', + pool_port='80', + barbican_container=barbican_container, + count=0, barbican=True, + delay=self.hm_delay, + max_retries=self.hm_max_retries, + timeout=self.hm_timeout, + create_fip=False, ipv6=True) + security_groups = [{'name': self.sg['name']}] + image_id = self.get_glance_image_id(["cirros", "esx"]) + server3 = self.create_topology_instance( + "state_vm_3", [network_state1], + create_floating_ip=True, image_id=image_id, clients=self.cmgr_adm, + security_groups=security_groups) + server4 = self.create_topology_instance( + "state_vm_4", [network_state2], + create_floating_ip=True, image_id=image_id, clients=self.cmgr_adm, + security_groups=security_groups) + allowed_cidrs = {'allowed_cidrs': ['3500::/64', '4600::/64']} + self.octavia_admin_listener_client.\ + update_octavia_listener(lb_cist['listener_id'], + listener_data=allowed_cidrs)['listener'] + fip1 = server3["floating_ips"][0]["floating_ip_address"] + fip2 = server4["floating_ips"][0]["floating_ip_address"] + ssh_client = remote_client.RemoteClient( + fip1, 'cirros', 'gocubsgo') + command = "curl -k https://[%s]" % \ + lb_cist['loadbalancer']['vip_address'] + for i in range(4): + data = ssh_client.exec_command(command) + self.assertIn(data, ['state_vm_1', 'state_vm_2']) + time.sleep(constants.NSXP_BACKEND_SMALL_TIME_INTERVAL) + ssh_client = remote_client.RemoteClient( + fip2, 'cirros', 'gocubsgo') + command = "curl -k https://[%s]" % \ + lb_cist['loadbalancer']['vip_address'] + for i in range(4): + data = ssh_client.exec_command(command) + self.assertIn(data, ['state_vm_1', 'state_vm_2']) + allowed_cidrs = {'allowed_cidrs': ["6700::/64"]} + self.octavia_admin_listener_client.\ + update_octavia_listener(lb_cist['listener_id'], + listener_data=allowed_cidrs)['listener'] + fip1 = server3["floating_ips"][0]["floating_ip_address"] + fip2 = server4["floating_ips"][0]["floating_ip_address"] + ssh_client = remote_client.RemoteClient( + fip1, 'cirros', 'gocubsgo') + command = "curl -k https://[%s]" % \ + lb_cist['loadbalancer']['vip_address'] + time.sleep(constants.NSXP_BACKEND_SMALL_TIME_INTERVAL) + for i in range(4): + self.assertRaises( + exceptions.SSHExecCommandFailed, + ssh_client.exec_command, command) + time.sleep(constants.NSXP_BACKEND_SMALL_TIME_INTERVAL) + ssh_client = remote_client.RemoteClient( + fip2, 'cirros', 'gocubsgo') + command = "curl -k https://[%s]" % \ + lb_cist['loadbalancer']['vip_address'] + for i in range(4): + self.assertRaises( + exceptions.SSHExecCommandFailed, + ssh_client.exec_command, command) + allowed_cidrs = {'allowed_cidrs': []} + self.octavia_admin_listener_client.\ + update_octavia_listener(lb_cist['listener_id'], + listener_data=allowed_cidrs)['listener'] + fip1 = server3["floating_ips"][0]["floating_ip_address"] + fip2 = server4["floating_ips"][0]["floating_ip_address"] + ssh_client = remote_client.RemoteClient( + fip1, 'cirros', 'gocubsgo') + command = "curl -k https://[%s]" % \ + lb_cist['loadbalancer']['vip_address'] + time.sleep(constants.NSXP_BACKEND_SMALL_TIME_INTERVAL) + for i in range(4): + data = ssh_client.exec_command(command) + self.assertIn(data, ['state_vm_1', 'state_vm_2']) + time.sleep(constants.NSXP_BACKEND_SMALL_TIME_INTERVAL) + ssh_client = remote_client.RemoteClient( + fip2, 'cirros', 'gocubsgo') + command = "curl -k https://[%s]" % \ + lb_cist['loadbalancer']['vip_address'] + for i in range(4): + data = ssh_client.exec_command(command) + self.assertIn(data, ['state_vm_1', 'state_vm_2']) + self.delete_octavia_lb_resources(lb_cist['lb_id'])