Lbaasv2 to Octavia migration and FW/NAT automation
- Added barbican, lb vip automation with octavia - Added all cases of FW/NAT Change-Id: Ib4d24396fd183abb12fe0c4da6d050c9f96b5dee
This commit is contained in:
parent
e11ed7ebd3
commit
286e68eec5
|
@ -1564,7 +1564,8 @@ class FeatureManager(traffic_manager.IperfManager,
|
||||||
compare_type=None,
|
compare_type=None,
|
||||||
type=None, value=None, barbican=False,
|
type=None, value=None, barbican=False,
|
||||||
barbican_container=None, invert=None,
|
barbican_container=None, invert=None,
|
||||||
qos_policy_id=None):
|
qos_policy_id=None, external=None,
|
||||||
|
external_subnet=None, create_fip=None):
|
||||||
count = 0
|
count = 0
|
||||||
lb_name = None
|
lb_name = None
|
||||||
if persistence:
|
if persistence:
|
||||||
|
@ -1576,14 +1577,22 @@ class FeatureManager(traffic_manager.IperfManager,
|
||||||
|
|
||||||
if lb_id is None:
|
if lb_id is None:
|
||||||
lb_name = data_utils.rand_name(self.namestart)
|
lb_name = data_utils.rand_name(self.namestart)
|
||||||
self.loadbalancer = self.\
|
if external:
|
||||||
octavia_admin_client.\
|
self.loadbalancer = self.\
|
||||||
create_octavia_load_balancer(name=lb_name,
|
octavia_admin_client.\
|
||||||
vip_subnet_id=vip_subnet_id,
|
create_octavia_load_balancer(
|
||||||
vip_network_id=vip_net_id,
|
name=lb_name,
|
||||||
vip_port_id=vip_port_id,
|
vip_subnet_id=external_subnet)['loadbalancer']
|
||||||
vip_qos_policy_id=qos_policy_id
|
else:
|
||||||
)['loadbalancer']
|
self.loadbalancer = self.\
|
||||||
|
octavia_admin_client.\
|
||||||
|
create_octavia_load_balancer(
|
||||||
|
name=lb_name,
|
||||||
|
vip_subnet_id=vip_subnet_id,
|
||||||
|
vip_network_id=vip_net_id,
|
||||||
|
vip_port_id=vip_port_id,
|
||||||
|
vip_qos_policy_id=qos_policy_id,
|
||||||
|
admin_state_up=True)['loadbalancer']
|
||||||
lb_id = self.loadbalancer['id']
|
lb_id = self.loadbalancer['id']
|
||||||
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
||||||
if barbican:
|
if barbican:
|
||||||
|
@ -1643,27 +1652,42 @@ class FeatureManager(traffic_manager.IperfManager,
|
||||||
self.members = []
|
self.members = []
|
||||||
for server_name in self.topology_servers.keys():
|
for server_name in self.topology_servers.keys():
|
||||||
if count < member_count:
|
if count < member_count:
|
||||||
if fip_disassociate is True:
|
if create_fip:
|
||||||
x = str(self.topology_servers[server_name]['addresses'].
|
fip_data = self.servers_details[server_name].\
|
||||||
|
floating_ips[0]
|
||||||
|
if fip_disassociate is True:
|
||||||
|
x = str(
|
||||||
|
self.topology_servers[server_name]['addresses'].
|
||||||
keys()).split("'")[1]
|
keys()).split("'")[1]
|
||||||
m = self.topology_servers[server_name]
|
m = self.topology_servers[server_name]
|
||||||
fixed_ip_address = m['addresses'][x][0]['addr']
|
fixed_ip_address = m['addresses'][x][0]['addr']
|
||||||
|
else:
|
||||||
|
fixed_ip_address = fip_data['fixed_ip_address']
|
||||||
|
if fip_disassociate is None:
|
||||||
|
kwargs = dict(port_id=None)
|
||||||
|
self.cmgr_adm.floating_ips_client.\
|
||||||
|
update_floatingip(fip_data['id'],
|
||||||
|
**kwargs)['floatingip']
|
||||||
else:
|
else:
|
||||||
f_d = self.servers_details[server_name]
|
net_name = self.servers_details[server_name][2][0]['name']
|
||||||
fip_data = f_d.floating_ips[0]
|
fixed_ip_address = self.servers_details[
|
||||||
fixed_ip_address = fip_data['fixed_ip_address']
|
server_name][0]['addresses'][net_name][0]['addr']
|
||||||
if fip_disassociate is None:
|
|
||||||
kwargs = dict(port_id=None)
|
|
||||||
self.cmgr_adm.floating_ips_client.\
|
|
||||||
update_floatingip(fip_data['id'],
|
|
||||||
**kwargs)['floatingip']
|
|
||||||
if weight:
|
if weight:
|
||||||
weight += count
|
weight += count
|
||||||
member = self.octavia_admin_members_client.\
|
if barbican:
|
||||||
create_octavia_member(pool_id, subnet_id=vip_subnet_id,
|
member = self.octavia_admin_members_client.\
|
||||||
address=fixed_ip_address,
|
create_octavia_member(pool_id,
|
||||||
protocol_port=protocol_port,
|
subnet_id=vip_subnet_id,
|
||||||
weight=weight)
|
address=fixed_ip_address,
|
||||||
|
protocol_port=pool_port,
|
||||||
|
weight=weight)
|
||||||
|
else:
|
||||||
|
member = self.octavia_admin_members_client.\
|
||||||
|
create_octavia_member(pool_id,
|
||||||
|
subnet_id=vip_subnet_id,
|
||||||
|
address=fixed_ip_address,
|
||||||
|
protocol_port=protocol_port,
|
||||||
|
weight=weight)
|
||||||
else:
|
else:
|
||||||
member = self.octavia_admin_members_client.\
|
member = self.octavia_admin_members_client.\
|
||||||
create_octavia_member(pool_id, subnet_id=vip_subnet_id,
|
create_octavia_member(pool_id, subnet_id=vip_subnet_id,
|
||||||
|
@ -1679,23 +1703,28 @@ class FeatureManager(traffic_manager.IperfManager,
|
||||||
update_port(self.loadbalancer['vip_port_id'],
|
update_port(self.loadbalancer['vip_port_id'],
|
||||||
security_groups=[self.sg['id']])
|
security_groups=[self.sg['id']])
|
||||||
# create floatingip for public network
|
# create floatingip for public network
|
||||||
if fip_disassociate is True:
|
if create_fip is False:
|
||||||
self.vip_ip_address = self.loadbalancer['vip_port_id']
|
self.vip_ip_address = self.loadbalancer['vip_port_id']
|
||||||
|
vip_fip = self.vip_ip_address
|
||||||
else:
|
else:
|
||||||
self.cmgr_adm.ports_client.update_port(
|
self.cmgr_adm.ports_client.update_port(
|
||||||
self.loadbalancer['vip_port_id'],
|
self.loadbalancer['vip_port_id'],
|
||||||
security_groups=[
|
security_groups=[
|
||||||
self.sg['id']])
|
self.sg['id']])
|
||||||
vip_fip = self.create_floatingip(
|
if not external:
|
||||||
self.loadbalancer,
|
vip_fip = self.create_floatingip(
|
||||||
client=self.cmgr_adm.floating_ips_client,
|
self.loadbalancer,
|
||||||
port_id=self.loadbalancer['vip_port_id'])
|
client=self.cmgr_adm.floating_ips_client,
|
||||||
self.vip_ip_address = vip_fip['floating_ip_address']
|
port_id=self.loadbalancer['vip_port_id'])
|
||||||
|
self.vip_ip_address = vip_fip['floating_ip_address']
|
||||||
return dict(lb_id=lb_id,
|
return dict(lb_id=lb_id,
|
||||||
vip_address=self.vip_ip_address,
|
vip_address=self.vip_ip_address,
|
||||||
pool_id=pool_id,
|
pool_id=pool_id,
|
||||||
members=self.members,
|
members=self.members,
|
||||||
listener_id=self.listener['id'])
|
listener_id=self.listener['id'],
|
||||||
|
vip_fip=vip_fip,
|
||||||
|
lb_vip=self.loadbalancer['vip_port_id'],
|
||||||
|
loadbalancer=self.loadbalancer)
|
||||||
|
|
||||||
def check_router_components_on_edge(self, router):
|
def check_router_components_on_edge(self, router):
|
||||||
edge_ips = CONF.nsx_edge.nsx_edge_ip
|
edge_ips = CONF.nsx_edge.nsx_edge_ip
|
||||||
|
|
|
@ -13,14 +13,21 @@
|
||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
import time
|
||||||
|
|
||||||
|
from oslo_utils import uuidutils
|
||||||
from tempest import config
|
from tempest import config
|
||||||
|
from tempest.lib.common.utils import data_utils
|
||||||
from tempest.lib.common.utils import test_utils
|
from tempest.lib.common.utils import test_utils
|
||||||
|
|
||||||
from tempest.lib import decorators
|
from tempest.lib import decorators
|
||||||
from tempest.lib import exceptions
|
from tempest.lib import exceptions
|
||||||
from tempest import test
|
from tempest import test
|
||||||
|
|
||||||
from vmware_nsx_tempest_plugin.common import constants
|
from vmware_nsx_tempest_plugin.common import constants
|
||||||
from vmware_nsx_tempest_plugin.lib import feature_manager
|
from vmware_nsx_tempest_plugin.lib import feature_manager
|
||||||
|
from vmware_nsx_tempest_plugin.services import nsxp_client
|
||||||
|
from vmware_nsx_tempest_plugin.services import nsxv3_client
|
||||||
|
|
||||||
|
|
||||||
LOG = constants.log.getLogger(__name__)
|
LOG = constants.log.getLogger(__name__)
|
||||||
|
@ -58,6 +65,12 @@ class OctaviaRoundRobin(feature_manager.FeatureManager):
|
||||||
@classmethod
|
@classmethod
|
||||||
def resource_setup(cls):
|
def resource_setup(cls):
|
||||||
super(OctaviaRoundRobin, cls).resource_setup()
|
super(OctaviaRoundRobin, cls).resource_setup()
|
||||||
|
cls.nsx = nsxv3_client.NSXV3Client(CONF.nsxv3.nsx_manager,
|
||||||
|
CONF.nsxv3.nsx_user,
|
||||||
|
CONF.nsxv3.nsx_password)
|
||||||
|
cls.nsxp = nsxp_client.NSXPClient(CONF.nsxv3.nsx_manager,
|
||||||
|
CONF.nsxv3.nsx_user,
|
||||||
|
CONF.nsxv3.nsx_password)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def setup_credentials(cls):
|
def setup_credentials(cls):
|
||||||
|
@ -79,7 +92,7 @@ class OctaviaRoundRobin(feature_manager.FeatureManager):
|
||||||
self.vip_fip = None
|
self.vip_fip = None
|
||||||
self.web_service_start_delay = 2.5
|
self.web_service_start_delay = 2.5
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self, clean=None):
|
||||||
if self.vip_fip:
|
if self.vip_fip:
|
||||||
LOG.debug("tearDown lbass vip fip")
|
LOG.debug("tearDown lbass vip fip")
|
||||||
self.disassociate_floatingip(self.vip_fip, and_delete=True)
|
self.disassociate_floatingip(self.vip_fip, and_delete=True)
|
||||||
|
@ -87,7 +100,10 @@ class OctaviaRoundRobin(feature_manager.FeatureManager):
|
||||||
LOG.debug("tearDown lbass")
|
LOG.debug("tearDown lbass")
|
||||||
lb_id = self.loadbalancer['id']
|
lb_id = self.loadbalancer['id']
|
||||||
self.delete_octavia_lb_resources(lb_id)
|
self.delete_octavia_lb_resources(lb_id)
|
||||||
|
cert_file = open(constants.CERT_FILE, "r")
|
||||||
|
cert_content = cert_file.read()
|
||||||
|
self.check_certificate_at_backend(should_present=False,
|
||||||
|
cert_conent=cert_content)
|
||||||
LOG.debug("tearDown lbaas exiting...")
|
LOG.debug("tearDown lbaas exiting...")
|
||||||
super(OctaviaRoundRobin, self).tearDown()
|
super(OctaviaRoundRobin, self).tearDown()
|
||||||
|
|
||||||
|
@ -207,6 +223,89 @@ class OctaviaRoundRobin(feature_manager.FeatureManager):
|
||||||
vip_net_id=net_id)
|
vip_net_id=net_id)
|
||||||
self.check_project_lbaas()
|
self.check_project_lbaas()
|
||||||
|
|
||||||
|
@decorators.attr(type='nsxv3')
|
||||||
|
@decorators.idempotent_id('d6bd0657-6867-4b7a-8704-3844b11b1a34')
|
||||||
|
def test_octavia_lb_vip_route_with_tenant_net(self):
|
||||||
|
"""
|
||||||
|
This testcase creates an octavia Loadbalancer with tenant net
|
||||||
|
and create floating ip for lb vip, and check advertised
|
||||||
|
route at tier1 backend
|
||||||
|
"""
|
||||||
|
diction = self.deploy_octavia_topology()
|
||||||
|
self.start_web_servers(constants.HTTP_PORT)
|
||||||
|
net_id = diction['network']['id']
|
||||||
|
lb_dict = self.create_project_octavia(protocol_type="HTTP",
|
||||||
|
protocol_port="80",
|
||||||
|
lb_algorithm="LEAST_CONNECTIONS",
|
||||||
|
vip_net_id=net_id)
|
||||||
|
self.check_project_lbaas()
|
||||||
|
vip_address = lb_dict['vip_address']
|
||||||
|
vip_fip = lb_dict['vip_fip']
|
||||||
|
router_state_1 = diction['router']['router']
|
||||||
|
nsx_router_nat_rules = self.nsx.get_logical_router_nat_rule_ips(
|
||||||
|
router_state_1['name'], router_state_1['id'])
|
||||||
|
route_present = False
|
||||||
|
for advertised_net in nsx_router_nat_rules['advertisedNetworks']:
|
||||||
|
if len(advertised_net['networks']) > 0:
|
||||||
|
if vip_address in advertised_net[
|
||||||
|
'networks'][0]['network'] and\
|
||||||
|
advertised_net['networks'][
|
||||||
|
0]['advertiseRouteType'] == 'T1_LB_VIP' and\
|
||||||
|
advertised_net['networks'][0]['advertiseAllow']:
|
||||||
|
route_present = True
|
||||||
|
self.assertEqual(True, route_present, 'Lb vip route is not advertised')
|
||||||
|
kwargs = dict(port_id=None)
|
||||||
|
self.cmgr_adm.floating_ips_client.\
|
||||||
|
update_floatingip(vip_fip['id'],
|
||||||
|
**kwargs)['floatingip']
|
||||||
|
time.sleep(30)
|
||||||
|
nsx_router_nat_rules = self.nsx.get_logical_router_nat_rule_ips(
|
||||||
|
router_state_1['name'], router_state_1['id'])
|
||||||
|
vip_address = vip_fip['fixed_ip_address']
|
||||||
|
route_present = False
|
||||||
|
for advertised_net in nsx_router_nat_rules['advertisedNetworks']:
|
||||||
|
if len(advertised_net['networks']) > 0:
|
||||||
|
if vip_address in advertised_net[
|
||||||
|
'networks'][0]['network'] and\
|
||||||
|
advertised_net['networks'][
|
||||||
|
0]['advertiseRouteType'] == 'T1_LB_VIP' and\
|
||||||
|
advertised_net['networks'][0]['advertiseAllow']:
|
||||||
|
route_present = True
|
||||||
|
self.assertEqual(False, route_present, 'Lb vip route is advertised')
|
||||||
|
|
||||||
|
@decorators.attr(type='nsxv3')
|
||||||
|
@decorators.idempotent_id('d6bd0657-7078-4b7a-8704-3844b11b1a34')
|
||||||
|
def test_octavia_lb_vip_route_with_external_net(self):
|
||||||
|
"""
|
||||||
|
This testcase creates an octavia Loadbalancer with external net
|
||||||
|
and check lb vip route should get advertised at tier1 backend
|
||||||
|
"""
|
||||||
|
diction = self.deploy_octavia_topology()
|
||||||
|
self.start_web_servers(constants.HTTP_PORT)
|
||||||
|
network = self.cmgr_adm.networks_client.show_network(
|
||||||
|
CONF.network.public_network_id)['network']
|
||||||
|
lb_dict = self.create_project_octavia(
|
||||||
|
protocol_type="HTTP",
|
||||||
|
protocol_port="80",
|
||||||
|
lb_algorithm="LEAST_CONNECTIONS",
|
||||||
|
external_subnet=network['subnets'][0],
|
||||||
|
external=True)
|
||||||
|
self.check_project_lbaas()
|
||||||
|
vip_address = lb_dict['vip_address']
|
||||||
|
router_state_1 = diction['router']['router']
|
||||||
|
nsx_router_nat_rules = self.nsx.get_logical_router_nat_rule_ips(
|
||||||
|
router_state_1['name'], router_state_1['id'])
|
||||||
|
route_present = False
|
||||||
|
for advertised_net in nsx_router_nat_rules['advertisedNetworks']:
|
||||||
|
if len(advertised_net['networks']) > 0:
|
||||||
|
if vip_address in advertised_net[
|
||||||
|
'networks'][0]['network'] and\
|
||||||
|
advertised_net['networks'][
|
||||||
|
0]['advertiseRouteType'] == 'T1_LB_VIP' and\
|
||||||
|
advertised_net['networks'][0]['advertiseAllow']:
|
||||||
|
route_present = True
|
||||||
|
self.assertEqual(True, route_present, 'Lb vip route is not advertised')
|
||||||
|
|
||||||
@decorators.attr(type='nsxv3')
|
@decorators.attr(type='nsxv3')
|
||||||
@decorators.idempotent_id('c5ac8546-6867-4b7a-8704-3844b11b1a34')
|
@decorators.idempotent_id('c5ac8546-6867-4b7a-8704-3844b11b1a34')
|
||||||
def test_create_verify_octavia_lb_with_vip_subnet_id_LC(self):
|
def test_create_verify_octavia_lb_with_vip_subnet_id_LC(self):
|
||||||
|
@ -505,7 +604,36 @@ class OctaviaRoundRobin(feature_manager.FeatureManager):
|
||||||
|
|
||||||
@decorators.attr(type='nsxv3')
|
@decorators.attr(type='nsxv3')
|
||||||
@decorators.idempotent_id('74f022d6-a6ef-4458-96a7-541deadacf99')
|
@decorators.idempotent_id('74f022d6-a6ef-4458-96a7-541deadacf99')
|
||||||
def test_octavia_http_http_traffic_with_barbican_secrets(self):
|
def test_octavia_http_traffic_with_barbican_secrets(self):
|
||||||
|
"""
|
||||||
|
Create octavia loadbalancer with http traffic with barbican enabled.
|
||||||
|
"""
|
||||||
|
diction = self.deploy_octavia_topology()
|
||||||
|
subnet_id = diction['subnet']['subnet']['id']
|
||||||
|
if not CONF.nsxv3.ens:
|
||||||
|
self.start_web_servers(constants.HTTP_PORT)
|
||||||
|
barbican_secrets = self.create_barbican_secret_conatainer(
|
||||||
|
constants.CERT_FILE, constants.KEY_FILE)
|
||||||
|
barbican_container = barbican_secrets['secret_container']
|
||||||
|
self.create_project_octavia(protocol_type="HTTP",
|
||||||
|
protocol_port="80",
|
||||||
|
lb_algorithm="ROUND_ROBIN",
|
||||||
|
vip_subnet_id=subnet_id,
|
||||||
|
hm_type='HTTP',
|
||||||
|
member_count=2,
|
||||||
|
weight=5,
|
||||||
|
pool_protocol='HTTP',
|
||||||
|
pool_port='80',
|
||||||
|
barbican_container=barbican_container,
|
||||||
|
count=0, barbican=True,
|
||||||
|
delay=self.hm_delay,
|
||||||
|
max_retries=self.hm_max_retries,
|
||||||
|
timeout=self.hm_timeout)
|
||||||
|
self.check_lbaas_project_weight_values(barbican_http=True)
|
||||||
|
|
||||||
|
@decorators.attr(type='nsxv3')
|
||||||
|
@decorators.idempotent_id('74f022d6-a6ef-4458-96a7-541deadacf99')
|
||||||
|
def test_octavia_https_traffic_with_barbican_secrets(self):
|
||||||
"""
|
"""
|
||||||
Create octavia loadbalancer with http traffic with barbican enabled.
|
Create octavia loadbalancer with http traffic with barbican enabled.
|
||||||
"""
|
"""
|
||||||
|
@ -520,13 +648,107 @@ class OctaviaRoundRobin(feature_manager.FeatureManager):
|
||||||
protocol_port="443",
|
protocol_port="443",
|
||||||
lb_algorithm="ROUND_ROBIN",
|
lb_algorithm="ROUND_ROBIN",
|
||||||
vip_subnet_id=subnet_id,
|
vip_subnet_id=subnet_id,
|
||||||
|
hm_type='HTTP',
|
||||||
|
member_count=2,
|
||||||
|
weight=5,
|
||||||
|
pool_protocol='HTTP',
|
||||||
|
pool_port='80',
|
||||||
barbican_container=barbican_container,
|
barbican_container=barbican_container,
|
||||||
barbican=True, pool_protocol='HTTP')
|
count=0, barbican=True,
|
||||||
self.check_project_lbaas()
|
delay=self.hm_delay,
|
||||||
|
max_retries=self.hm_max_retries,
|
||||||
|
timeout=self.hm_timeout)
|
||||||
|
self.check_lbaas_project_weight_values(HTTPS=True)
|
||||||
|
|
||||||
@decorators.attr(type='nsxv3')
|
@decorators.attr(type='nsxv3')
|
||||||
@decorators.idempotent_id('74f022d6-a6ef-4458-96a7-541deadacf99')
|
@decorators.idempotent_id('d6bd0657-7078-5c8b-8704-3844b11b1a34')
|
||||||
def test_octavia_https_http_traffic_with_barbican_secrets(self):
|
def test_octavia_multiple_listeners_with_secrets(self):
|
||||||
|
"""
|
||||||
|
Create multiple terminated https protocol
|
||||||
|
based listener with same loadbalancer
|
||||||
|
"""
|
||||||
|
diction = self.deploy_octavia_topology()
|
||||||
|
subnet_id = diction['subnet']['subnet']['id']
|
||||||
|
if not CONF.nsxv3.ens:
|
||||||
|
self.start_web_servers(constants.HTTP_PORT)
|
||||||
|
barbican_secrets = self.create_barbican_secret_conatainer(
|
||||||
|
constants.CERT_FILE, constants.KEY_FILE)
|
||||||
|
barbican_container = barbican_secrets['secret_container']
|
||||||
|
protocol_type = "TERMINATED_HTTPS"
|
||||||
|
protocol_port = 443
|
||||||
|
lb_name = data_utils.rand_name("tempest_lb")
|
||||||
|
self.loadbalancer = self.\
|
||||||
|
octavia_admin_client.\
|
||||||
|
create_octavia_load_balancer(name=lb_name,
|
||||||
|
vip_subnet_id=subnet_id
|
||||||
|
)['loadbalancer']
|
||||||
|
lb_id = self.loadbalancer['id']
|
||||||
|
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
||||||
|
for i in range(1, 20):
|
||||||
|
tls_id = barbican_container["container_ref"]
|
||||||
|
self.listener = self.octavia_admin_listener_client.\
|
||||||
|
create_octavia_listener(loadbalancer_id=lb_id,
|
||||||
|
protocol=protocol_type,
|
||||||
|
protocol_port=protocol_port,
|
||||||
|
name=lb_name,
|
||||||
|
allowed_cidrs=None,
|
||||||
|
default_tls_container_ref=tls_id
|
||||||
|
)['listener']
|
||||||
|
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
||||||
|
protocol_port = protocol_port + 1
|
||||||
|
|
||||||
|
@decorators.idempotent_id('d6bd0657-7078-5c8b-0815-3844b11b1a34')
|
||||||
|
def test_octavia_create_listener_with_empty_secrets(self):
|
||||||
|
"""
|
||||||
|
Try to create listener with terminated https
|
||||||
|
protocol and empty secret , it should fail.
|
||||||
|
"""
|
||||||
|
secret_name1 = data_utils.rand_name(name='tempest-cert-secret')
|
||||||
|
kwargs = {"secret_type": constants.SECRET_TYPE,
|
||||||
|
"algorithm": constants.ALGORITHM,
|
||||||
|
"name": secret_name1}
|
||||||
|
barbican_secret1 = self.create_barbican_secret(**kwargs)
|
||||||
|
secret_name2 = data_utils.rand_name(name='tempest-key-secret')
|
||||||
|
kwargs = {"secret_type": constants.SECRET_TYPE,
|
||||||
|
"algorithm": constants.ALGORITHM,
|
||||||
|
"name": secret_name2}
|
||||||
|
barbican_secret2 = self.create_barbican_secret(**kwargs)
|
||||||
|
container_name = data_utils.rand_name(name='tempest-container')
|
||||||
|
kwargs = {"type": constants.CONTAINER_TYPE,
|
||||||
|
"name": container_name,
|
||||||
|
"secret_refs": [{"secret_ref":
|
||||||
|
barbican_secret1['secret_ref'],
|
||||||
|
"name": 'certificate'},
|
||||||
|
{"secret_ref":
|
||||||
|
barbican_secret2['secret_ref'],
|
||||||
|
"name": 'private_key'}]}
|
||||||
|
barbican_container = self.create_barbican_container(**kwargs)
|
||||||
|
diction = self.deploy_octavia_topology()
|
||||||
|
subnet_id = diction['subnet']['subnet']['id']
|
||||||
|
protocol_type = "TERMINATED_HTTPS"
|
||||||
|
protocol_port = 443
|
||||||
|
lb_name = data_utils.rand_name("tempest_lb")
|
||||||
|
self.loadbalancer = self.\
|
||||||
|
octavia_admin_client.\
|
||||||
|
create_octavia_load_balancer(name=lb_name,
|
||||||
|
vip_subnet_id=subnet_id
|
||||||
|
)['loadbalancer']
|
||||||
|
lb_id = self.loadbalancer['id']
|
||||||
|
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
||||||
|
self.assertRaises(exceptions.BadRequest,
|
||||||
|
self.octavia_admin_listener_client.
|
||||||
|
create_octavia_listener,
|
||||||
|
loadbalancer_id=lb_id,
|
||||||
|
protocol=protocol_type,
|
||||||
|
protocol_port=protocol_port,
|
||||||
|
name=lb_name,
|
||||||
|
allowed_cidrs=None,
|
||||||
|
default_tls_container_ref=barbican_container[
|
||||||
|
'container_ref'])
|
||||||
|
|
||||||
|
@decorators.attr(type='nsxv3')
|
||||||
|
@decorators.idempotent_id('d6bd0657-7078-5c8b-0815-4055c21b1a34')
|
||||||
|
def test_octavia_check_certificate_at_backend(self):
|
||||||
"""
|
"""
|
||||||
Create octavia loadbalancer with http traffic with barbican enabled.
|
Create octavia loadbalancer with http traffic with barbican enabled.
|
||||||
"""
|
"""
|
||||||
|
@ -537,13 +759,60 @@ class OctaviaRoundRobin(feature_manager.FeatureManager):
|
||||||
barbican_secrets = self.create_barbican_secret_conatainer(
|
barbican_secrets = self.create_barbican_secret_conatainer(
|
||||||
constants.CERT_FILE, constants.KEY_FILE)
|
constants.CERT_FILE, constants.KEY_FILE)
|
||||||
barbican_container = barbican_secrets['secret_container']
|
barbican_container = barbican_secrets['secret_container']
|
||||||
self.create_project_octavia(protocol_type="TERMINATED_HTTPS",
|
self.create_project_octavia(
|
||||||
protocol_port="80",
|
protocol_type="TERMINATED_HTTPS",
|
||||||
lb_algorithm="ROUND_ROBIN",
|
protocol_port="443",
|
||||||
vip_subnet_id=subnet_id,
|
lb_algorithm="ROUND_ROBIN",
|
||||||
barbican_container=barbican_container,
|
vip_subnet_id=subnet_id,
|
||||||
barbican=True, pool_protocol='HTTPS')
|
hm_type='HTTP',
|
||||||
self.check_project_lbaas()
|
member_count=2,
|
||||||
|
weight=5,
|
||||||
|
pool_protocol='HTTP',
|
||||||
|
pool_port='80',
|
||||||
|
barbican_container=barbican_container,
|
||||||
|
count=0, barbican=True, delay=self.hm_delay,
|
||||||
|
max_retries=self.hm_max_retries, timeout=self.hm_timeout)
|
||||||
|
self.check_lbaas_project_weight_values(HTTPS=True)
|
||||||
|
cert_file = open(constants.CERT_FILE, "r")
|
||||||
|
cert_content = cert_file.read()
|
||||||
|
self.check_certificate_at_backend(cert_conent=cert_content.rstrip())
|
||||||
|
|
||||||
|
@decorators.attr(type='nsxv3')
|
||||||
|
@decorators.idempotent_id('d6bd0657-7078-5c8b-0815-4055c21c2b45')
|
||||||
|
def test_octavia_create_lbaas_listener_with_invalid_container_uuid(self):
|
||||||
|
"""
|
||||||
|
Create octavia loadbalancer with http traffic with barbican enabled.
|
||||||
|
"""
|
||||||
|
diction = self.deploy_octavia_topology()
|
||||||
|
subnet_id = diction['subnet']['subnet']['id']
|
||||||
|
if not CONF.nsxv3.ens:
|
||||||
|
self.start_web_servers(constants.HTTP_PORT)
|
||||||
|
barbican_secrets = self.create_barbican_secret_conatainer(
|
||||||
|
constants.CERT_FILE, constants.KEY_FILE)
|
||||||
|
container_ref = barbican_secrets["secret_container"]['container_ref']\
|
||||||
|
.split('/')
|
||||||
|
container_ref.remove(container_ref[len(container_ref) - 1])
|
||||||
|
container_ref.append(uuidutils.generate_uuid())
|
||||||
|
container_ref = '/'.join(str(e) for e in container_ref)
|
||||||
|
protocol_type = "TERMINATED_HTTPS"
|
||||||
|
protocol_port = 443
|
||||||
|
lb_name = data_utils.rand_name("tempest_lb")
|
||||||
|
self.loadbalancer = self.\
|
||||||
|
octavia_admin_client.\
|
||||||
|
create_octavia_load_balancer(name=lb_name,
|
||||||
|
vip_subnet_id=subnet_id
|
||||||
|
)['loadbalancer']
|
||||||
|
lb_id = self.loadbalancer['id']
|
||||||
|
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
||||||
|
self.assertRaises(exceptions.BadRequest,
|
||||||
|
self.octavia_admin_listener_client.
|
||||||
|
create_octavia_listener,
|
||||||
|
loadbalancer_id=lb_id,
|
||||||
|
protocol=protocol_type,
|
||||||
|
protocol_port=protocol_port,
|
||||||
|
name=lb_name,
|
||||||
|
allowed_cidrs=None,
|
||||||
|
default_tls_container_ref=container_ref)
|
||||||
|
|
||||||
@decorators.attr(type='nsxv3')
|
@decorators.attr(type='nsxv3')
|
||||||
@decorators.idempotent_id('c5ac8546-6867-4b7a-8704-3844b11b1a34')
|
@decorators.idempotent_id('c5ac8546-6867-4b7a-8704-3844b11b1a34')
|
||||||
|
|
|
@ -16,6 +16,7 @@ import time
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
from tempest import config
|
from tempest import config
|
||||||
from tempest.lib.common.utils import data_utils
|
from tempest.lib.common.utils import data_utils
|
||||||
|
from tempest.lib.common.utils import test_utils
|
||||||
|
|
||||||
from tempest.lib import decorators
|
from tempest.lib import decorators
|
||||||
|
|
||||||
|
@ -246,6 +247,132 @@ class TestTier1DRComponentDeployment(feature_manager.FeatureManager):
|
||||||
self.assertEqual(True, result[0]['dr_present'])
|
self.assertEqual(True, result[0]['dr_present'])
|
||||||
self.assertEqual(False, result[1]['sr_present'])
|
self.assertEqual(False, result[1]['sr_present'])
|
||||||
|
|
||||||
|
@decorators.idempotent_id('1206238b-02dd-1098-c228-10066ecc69e5')
|
||||||
|
def test_tier1_sr_should_create_when_service_is_enabled_octavia(self):
|
||||||
|
"""
|
||||||
|
Check sr and dr component of router should be present
|
||||||
|
on edge when any service is enable
|
||||||
|
"""
|
||||||
|
kwargs = {"enable_snat": False}
|
||||||
|
router_state_1 = self.create_topology_router(set_gateway=True,
|
||||||
|
routers_client=self.
|
||||||
|
cmgr_adm.routers_client,
|
||||||
|
**kwargs)
|
||||||
|
network_lbaas_1 = self.create_topology_network(
|
||||||
|
"network_lbaas", networks_client=self.cmgr_adm.networks_client)
|
||||||
|
sec_rule_client = self.cmgr_adm.security_group_rules_client
|
||||||
|
sec_client = self.cmgr_adm.security_groups_client
|
||||||
|
kwargs = dict(tenant_id=network_lbaas_1['tenant_id'],
|
||||||
|
security_group_rules_client=sec_rule_client,
|
||||||
|
security_groups_client=sec_client)
|
||||||
|
self.sg = self.create_topology_security_group(**kwargs)
|
||||||
|
lbaas_rules = [dict(direction='ingress', protocol='tcp',
|
||||||
|
port_range_min=constants.HTTP_PORT,
|
||||||
|
port_range_max=constants.HTTP_PORT, ),
|
||||||
|
dict(direction='ingress', protocol='tcp',
|
||||||
|
port_range_min=443, port_range_max=443, )]
|
||||||
|
for rule in lbaas_rules:
|
||||||
|
self.add_security_group_rule(
|
||||||
|
self.sg,
|
||||||
|
rule,
|
||||||
|
ruleclient=sec_rule_client,
|
||||||
|
secclient=sec_client,
|
||||||
|
tenant_id=network_lbaas_1['tenant_id'])
|
||||||
|
self.create_topology_subnet(
|
||||||
|
"subnet_lbaas", network_lbaas_1,
|
||||||
|
subnets_client=self.cmgr_adm.subnets_client,
|
||||||
|
routers_client=self.cmgr_adm.routers_client,
|
||||||
|
router_id=router_state_1["id"])
|
||||||
|
no_of_servers = 2
|
||||||
|
image_id = self.get_glance_image_id(["cirros", "esx"])
|
||||||
|
for instance in range(0, no_of_servers):
|
||||||
|
self.create_topology_instance(
|
||||||
|
"server_lbaas_%s" % instance, [network_lbaas_1],
|
||||||
|
security_groups=[{'name': self.sg['name']}],
|
||||||
|
image_id=image_id, create_floating_ip=False,
|
||||||
|
clients=self.cmgr_adm)
|
||||||
|
result = self.check_router_components_on_edge(router_state_1)
|
||||||
|
self.assertEqual(True, result[0]['dr_present'])
|
||||||
|
self.assertEqual(False, result[1]['sr_present'])
|
||||||
|
for i in range(0, 2):
|
||||||
|
lb_dict = self.create_project_octavia(
|
||||||
|
protocol_type="HTTP", protocol_port="80",
|
||||||
|
lb_algorithm="LEAST_CONNECTIONS",
|
||||||
|
vip_net_id=network_lbaas_1['id'],
|
||||||
|
fip_disassociate=False, create_fip=False)
|
||||||
|
time.sleep(constants.NSXP_BACKEND_SMALL_TIME_INTERVAL)
|
||||||
|
result = self.check_router_components_on_edge(router_state_1)
|
||||||
|
self.assertEqual(True, result[0]['dr_present'])
|
||||||
|
self.assertEqual(True, result[1]['sr_present'])
|
||||||
|
self.delete_octavia_lb_resources(lb_dict['lb_id'])
|
||||||
|
time.sleep(constants.NSXP_BACKEND_SMALL_TIME_INTERVAL)
|
||||||
|
result = self.check_router_components_on_edge(router_state_1)
|
||||||
|
self.assertEqual(True, result[0]['dr_present'])
|
||||||
|
self.assertEqual(False, result[1]['sr_present'])
|
||||||
|
|
||||||
|
@decorators.idempotent_id('2317349c-13ee-2198-c228-10066ecc69e5')
|
||||||
|
def test_tier1_sr_component_stail_entry_should_not_present_octavia(self):
|
||||||
|
"""
|
||||||
|
Check sr component stail entry should not present on edge after
|
||||||
|
multiple times enabling and disabling service on router.
|
||||||
|
"""
|
||||||
|
kwargs = {"enable_snat": False}
|
||||||
|
router_state_1 = self.create_topology_router(set_gateway=True,
|
||||||
|
routers_client=self.
|
||||||
|
cmgr_adm.routers_client,
|
||||||
|
**kwargs)
|
||||||
|
network_lbaas_1 = self.create_topology_network(
|
||||||
|
"network_lbaas", networks_client=self.cmgr_adm.networks_client)
|
||||||
|
sec_rule_client = self.cmgr_adm.security_group_rules_client
|
||||||
|
sec_client = self.cmgr_adm.security_groups_client
|
||||||
|
kwargs = dict(tenant_id=network_lbaas_1['tenant_id'],
|
||||||
|
security_group_rules_client=sec_rule_client,
|
||||||
|
security_groups_client=sec_client)
|
||||||
|
self.sg = self.create_topology_security_group(**kwargs)
|
||||||
|
lbaas_rules = [dict(direction='ingress', protocol='tcp',
|
||||||
|
port_range_min=constants.HTTP_PORT,
|
||||||
|
port_range_max=constants.HTTP_PORT, ),
|
||||||
|
dict(direction='ingress', protocol='tcp',
|
||||||
|
port_range_min=443, port_range_max=443, )]
|
||||||
|
for rule in lbaas_rules:
|
||||||
|
self.add_security_group_rule(
|
||||||
|
self.sg,
|
||||||
|
rule,
|
||||||
|
ruleclient=sec_rule_client,
|
||||||
|
secclient=sec_client,
|
||||||
|
tenant_id=network_lbaas_1['tenant_id'])
|
||||||
|
self.create_topology_subnet(
|
||||||
|
"subnet_lbaas", network_lbaas_1,
|
||||||
|
subnets_client=self.cmgr_adm.subnets_client,
|
||||||
|
routers_client=self.cmgr_adm.routers_client,
|
||||||
|
router_id=router_state_1["id"])
|
||||||
|
no_of_servers = 2
|
||||||
|
image_id = self.get_glance_image_id(["cirros", "esx"])
|
||||||
|
for instance in range(0, no_of_servers):
|
||||||
|
self.create_topology_instance(
|
||||||
|
"server_lbaas_%s" % instance, [network_lbaas_1],
|
||||||
|
security_groups=[{'name': self.sg['name']}],
|
||||||
|
image_id=image_id, create_floating_ip=False,
|
||||||
|
clients=self.cmgr_adm)
|
||||||
|
result = self.check_router_components_on_edge(router_state_1)
|
||||||
|
self.assertEqual(True, result[0]['dr_present'])
|
||||||
|
self.assertEqual(False, result[1]['sr_present'])
|
||||||
|
for i in range(0, 20):
|
||||||
|
lb_dict = self.create_project_octavia(
|
||||||
|
protocol_type="HTTP", protocol_port="80",
|
||||||
|
lb_algorithm="LEAST_CONNECTIONS",
|
||||||
|
vip_net_id=network_lbaas_1['id'],
|
||||||
|
fip_disassociate=False, create_fip=False)
|
||||||
|
time.sleep(constants.NSXP_BACKEND_SMALL_TIME_INTERVAL)
|
||||||
|
result = self.check_router_components_on_edge(router_state_1)
|
||||||
|
self.assertEqual(True, result[0]['dr_present'])
|
||||||
|
self.assertEqual(True, result[1]['sr_present'])
|
||||||
|
self.delete_octavia_lb_resources(lb_dict['lb_id'])
|
||||||
|
time.sleep(constants.NSXP_BACKEND_SMALL_TIME_INTERVAL)
|
||||||
|
result = self.check_router_components_on_edge(router_state_1)
|
||||||
|
self.assertEqual(True, result[0]['dr_present'])
|
||||||
|
self.assertEqual(False, result[1]['sr_present'])
|
||||||
|
|
||||||
@decorators.idempotent_id('2317349c-13ee-2198-c228-10066ecc69e5')
|
@decorators.idempotent_id('2317349c-13ee-2198-c228-10066ecc69e5')
|
||||||
def test_tier1_sr_component_stail_entry_should_not_present(self):
|
def test_tier1_sr_component_stail_entry_should_not_present(self):
|
||||||
"""
|
"""
|
||||||
|
@ -371,6 +498,139 @@ class TestTier1DRComponentDeployment(feature_manager.FeatureManager):
|
||||||
self.assertEqual(True, result[0]['dr_present'])
|
self.assertEqual(True, result[0]['dr_present'])
|
||||||
self.assertEqual(False, result[1]['sr_present'])
|
self.assertEqual(False, result[1]['sr_present'])
|
||||||
|
|
||||||
|
@decorators.idempotent_id('2317349c-13ee-3209-d339-10066ecc69e5')
|
||||||
|
def test_sr_dr_componet_of_router_with_multiple_services_octavia(self):
|
||||||
|
"""
|
||||||
|
Check sr-dr component of router status
|
||||||
|
"""
|
||||||
|
kwargs = {"enable_snat": False}
|
||||||
|
network_name = data_utils.rand_name(name='tempest-net')
|
||||||
|
subnet_name = data_utils.rand_name(name='tempest-subnet')
|
||||||
|
router_state = self.create_topology_router(
|
||||||
|
set_gateway=True,
|
||||||
|
routers_client=self.cmgr_adm.routers_client, **kwargs)
|
||||||
|
network_state = self.create_topology_network(
|
||||||
|
network_name, networks_client=self.cmgr_adm.networks_client)
|
||||||
|
subnet_state = self.create_topology_subnet(
|
||||||
|
subnet_name, network_state,
|
||||||
|
subnets_client=self.cmgr_adm.subnets_client)
|
||||||
|
interface = self.cmgr_adm.routers_client.add_router_interface(
|
||||||
|
router_state['id'], subnet_id=subnet_state["id"])
|
||||||
|
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||||
|
self.cmgr_adm.routers_client.remove_router_interface,
|
||||||
|
router_state['id'], subnet_id=subnet_state["id"])
|
||||||
|
time.sleep(constants.NSXP_BACKEND_SMALL_TIME_INTERVAL)
|
||||||
|
result = self.check_router_components_on_edge(router_state)
|
||||||
|
self.assertEqual(True, result[0]['dr_present'])
|
||||||
|
self.assertEqual(False, result[1]['sr_present'])
|
||||||
|
ext_network = self.cmgr_adm.networks_client.show_network(
|
||||||
|
CONF.network.public_network_id)['network']
|
||||||
|
ext_subnet = self.cmgr_adm.subnets_client.show_subnet(
|
||||||
|
ext_network['subnets'][0])['subnet']
|
||||||
|
fw_rules = self.create_firewall_rule(
|
||||||
|
name='test_rule', protocol='icmp',
|
||||||
|
action="allow", destination_ip_address=ext_subnet["cidr"])
|
||||||
|
rules = []
|
||||||
|
# Check firewall rule
|
||||||
|
rules.append(fw_rules['firewall_rule']['id'])
|
||||||
|
policy_name = data_utils.rand_name('fw-policy-')
|
||||||
|
# Create firewall policy
|
||||||
|
fw_policy = self.create_firewall_policy(
|
||||||
|
name=policy_name, firewall_rules=rules,
|
||||||
|
project_id=router_state['project_id'])
|
||||||
|
show_policy = self.show_firewall_policy(
|
||||||
|
fw_policy['firewall_policy']['id'])
|
||||||
|
# Check firewall policy
|
||||||
|
self.assertEqual(
|
||||||
|
show_policy.get('firewall_policy')['name'],
|
||||||
|
policy_name)
|
||||||
|
self.assertEqual(show_policy.get('firewall_policy')
|
||||||
|
['firewall_rules'], rules)
|
||||||
|
policy_id = fw_policy['firewall_policy']['id']
|
||||||
|
group_name = data_utils.rand_name('fw-group-')
|
||||||
|
# Create firewall group
|
||||||
|
fw_group = self.create_firewall_group(
|
||||||
|
name=group_name,
|
||||||
|
ingress_firewall_policy_id=policy_id,
|
||||||
|
egress_firewall_policy_id=policy_id,
|
||||||
|
ports=[interface['port_id']],
|
||||||
|
project_id=router_state['project_id'])
|
||||||
|
self._wait_firewall_ready(fw_group["firewall_group"]["id"])
|
||||||
|
time.sleep(constants.NSXP_BACKEND_SMALL_TIME_INTERVAL)
|
||||||
|
result = self.check_router_components_on_edge(router_state)
|
||||||
|
self.assertEqual(True, result[0]['dr_present'])
|
||||||
|
self.assertEqual(True, result[1]['sr_present'])
|
||||||
|
public_network_info = {"external_gateway_info": dict(
|
||||||
|
network_id=CONF.network.public_network_id,
|
||||||
|
enable_snat=True)}
|
||||||
|
self.cmgr_adm.routers_client.update_router(router_state['id'],
|
||||||
|
**public_network_info)
|
||||||
|
time.sleep(constants.NSXP_BACKEND_SMALL_TIME_INTERVAL)
|
||||||
|
result = self.check_router_components_on_edge(router_state)
|
||||||
|
self.assertEqual(True, result[0]['dr_present'])
|
||||||
|
self.assertEqual(True, result[1]['sr_present'])
|
||||||
|
sec_rule_client = self.cmgr_adm.security_group_rules_client
|
||||||
|
sec_client = self.cmgr_adm.security_groups_client
|
||||||
|
kwargs = dict(tenant_id=network_state['tenant_id'],
|
||||||
|
security_group_rules_client=sec_rule_client,
|
||||||
|
security_groups_client=sec_client)
|
||||||
|
self.sg = self.create_topology_security_group(**kwargs)
|
||||||
|
lbaas_rules = [dict(direction='ingress', protocol='tcp',
|
||||||
|
port_range_min=constants.HTTP_PORT,
|
||||||
|
port_range_max=constants.HTTP_PORT, ),
|
||||||
|
dict(direction='ingress', protocol='tcp',
|
||||||
|
port_range_min=443, port_range_max=443, )]
|
||||||
|
for rule in lbaas_rules:
|
||||||
|
self.add_security_group_rule(
|
||||||
|
self.sg,
|
||||||
|
rule,
|
||||||
|
ruleclient=sec_rule_client,
|
||||||
|
secclient=sec_client,
|
||||||
|
tenant_id=network_state['tenant_id'])
|
||||||
|
no_of_servers = 2
|
||||||
|
image_id = self.get_glance_image_id(["cirros", "esx"])
|
||||||
|
for instance in range(0, no_of_servers):
|
||||||
|
self.create_topology_instance(
|
||||||
|
"server_lbaas_%s" % instance, [network_state],
|
||||||
|
security_groups=[{'name': self.sg['name']}],
|
||||||
|
image_id=image_id, create_floating_ip=False,
|
||||||
|
clients=self.cmgr_adm)
|
||||||
|
lb_dict = self.create_project_octavia(
|
||||||
|
protocol_type="HTTP", protocol_port="80",
|
||||||
|
lb_algorithm="LEAST_CONNECTIONS",
|
||||||
|
vip_net_id=network_state['id'],
|
||||||
|
fip_disassociate=False, create_fip=False)
|
||||||
|
time.sleep(constants.NSXP_BACKEND_SMALL_TIME_INTERVAL)
|
||||||
|
result = self.check_router_components_on_edge(router_state)
|
||||||
|
self.assertEqual(True, result[0]['dr_present'])
|
||||||
|
self.assertEqual(True, result[1]['sr_present'])
|
||||||
|
self.delete_octavia_lb_resources(lb_dict['lb_id'])
|
||||||
|
time.sleep(constants.NSXP_BACKEND_SMALL_TIME_INTERVAL)
|
||||||
|
result = self.check_router_components_on_edge(router_state)
|
||||||
|
self.assertEqual(True, result[0]['dr_present'])
|
||||||
|
self.assertEqual(True, result[1]['sr_present'])
|
||||||
|
public_network_info = {"external_gateway_info": dict(
|
||||||
|
network_id=CONF.network.public_network_id,
|
||||||
|
enable_snat=False)}
|
||||||
|
self.cmgr_adm.routers_client.update_router(router_state['id'],
|
||||||
|
**public_network_info)
|
||||||
|
time.sleep(constants.NSXP_BACKEND_SMALL_TIME_INTERVAL)
|
||||||
|
result = self.check_router_components_on_edge(router_state)
|
||||||
|
self.assertEqual(True, result[0]['dr_present'])
|
||||||
|
self.assertEqual(True, result[1]['sr_present'])
|
||||||
|
self.fwaas_v2_client.update_firewall_v2_group(
|
||||||
|
fw_group["firewall_group"]["id"], ports=[])
|
||||||
|
self.fwaas_v2_client.delete_firewall_v2_group(
|
||||||
|
fw_group["firewall_group"]["id"])
|
||||||
|
self.fwaas_v2_client.delete_firewall_v2_policy(
|
||||||
|
fw_policy['firewall_policy']['id'])
|
||||||
|
self.fwaas_v2_client.delete_firewall_v2_rule(
|
||||||
|
fw_rules['firewall_rule']['id'])
|
||||||
|
time.sleep(constants.NSXP_BACKEND_SMALL_TIME_INTERVAL)
|
||||||
|
result = self.check_router_components_on_edge(router_state)
|
||||||
|
self.assertEqual(True, result[0]['dr_present'])
|
||||||
|
self.assertEqual(False, result[1]['sr_present'])
|
||||||
|
|
||||||
@decorators.idempotent_id('2317349c-24ff-4310-e440-21176ecc69e5')
|
@decorators.idempotent_id('2317349c-24ff-4310-e440-21176ecc69e5')
|
||||||
def test_sr_component_status_after_deleting_multiple_services_once(self):
|
def test_sr_component_status_after_deleting_multiple_services_once(self):
|
||||||
"""
|
"""
|
||||||
|
@ -442,3 +702,120 @@ class TestTier1DRComponentDeployment(feature_manager.FeatureManager):
|
||||||
result = self.check_router_components_on_edge(router_state)
|
result = self.check_router_components_on_edge(router_state)
|
||||||
self.assertEqual(True, result[0]['dr_present'])
|
self.assertEqual(True, result[0]['dr_present'])
|
||||||
self.assertEqual(False, result[1]['sr_present'])
|
self.assertEqual(False, result[1]['sr_present'])
|
||||||
|
|
||||||
|
@decorators.idempotent_id('2317349c-13ee-3209-d339-10066ecc69e5')
|
||||||
|
def test_sr_component_status_del_mul_services_once_octavia(self):
|
||||||
|
"""
|
||||||
|
Check sr-dr component of router status
|
||||||
|
"""
|
||||||
|
kwargs = {"enable_snat": False}
|
||||||
|
network_name = data_utils.rand_name(name='tempest-net')
|
||||||
|
subnet_name = data_utils.rand_name(name='tempest-subnet')
|
||||||
|
router_state = self.create_topology_router(
|
||||||
|
set_gateway=True,
|
||||||
|
routers_client=self.cmgr_adm.routers_client, **kwargs)
|
||||||
|
network_state = self.create_topology_network(
|
||||||
|
network_name, networks_client=self.cmgr_adm.networks_client)
|
||||||
|
subnet_state = self.create_topology_subnet(
|
||||||
|
subnet_name, network_state,
|
||||||
|
subnets_client=self.cmgr_adm.subnets_client)
|
||||||
|
interface = self.cmgr_adm.routers_client.add_router_interface(
|
||||||
|
router_state['id'], subnet_id=subnet_state["id"])
|
||||||
|
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||||
|
self.cmgr_adm.routers_client.remove_router_interface,
|
||||||
|
router_state['id'], subnet_id=subnet_state["id"])
|
||||||
|
time.sleep(constants.NSXP_BACKEND_SMALL_TIME_INTERVAL)
|
||||||
|
result = self.check_router_components_on_edge(router_state)
|
||||||
|
self.assertEqual(True, result[0]['dr_present'])
|
||||||
|
self.assertEqual(False, result[1]['sr_present'])
|
||||||
|
ext_network = self.cmgr_adm.networks_client.show_network(
|
||||||
|
CONF.network.public_network_id)['network']
|
||||||
|
ext_subnet = self.cmgr_adm.subnets_client.show_subnet(
|
||||||
|
ext_network['subnets'][0])['subnet']
|
||||||
|
fw_rules = self.create_firewall_rule(
|
||||||
|
name='test_rule', protocol='icmp',
|
||||||
|
action="allow", destination_ip_address=ext_subnet["cidr"])
|
||||||
|
rules = []
|
||||||
|
# Check firewall rule
|
||||||
|
rules.append(fw_rules['firewall_rule']['id'])
|
||||||
|
policy_name = data_utils.rand_name('fw-policy-')
|
||||||
|
# Create firewall policy
|
||||||
|
fw_policy = self.create_firewall_policy(
|
||||||
|
name=policy_name, firewall_rules=rules,
|
||||||
|
project_id=router_state['project_id'])
|
||||||
|
show_policy = self.show_firewall_policy(
|
||||||
|
fw_policy['firewall_policy']['id'])
|
||||||
|
# Check firewall policy
|
||||||
|
self.assertEqual(
|
||||||
|
show_policy.get('firewall_policy')['name'],
|
||||||
|
policy_name)
|
||||||
|
self.assertEqual(show_policy.get('firewall_policy')
|
||||||
|
['firewall_rules'], rules)
|
||||||
|
policy_id = fw_policy['firewall_policy']['id']
|
||||||
|
group_name = data_utils.rand_name('fw-group-')
|
||||||
|
# Create firewall group
|
||||||
|
fw_group = self.create_firewall_group(
|
||||||
|
name=group_name,
|
||||||
|
ingress_firewall_policy_id=policy_id,
|
||||||
|
egress_firewall_policy_id=policy_id,
|
||||||
|
ports=[interface['port_id']],
|
||||||
|
project_id=router_state['project_id'])
|
||||||
|
self._wait_firewall_ready(fw_group["firewall_group"]["id"])
|
||||||
|
public_network_info = {"external_gateway_info": dict(
|
||||||
|
network_id=CONF.network.public_network_id,
|
||||||
|
enable_snat=True)}
|
||||||
|
self.cmgr_adm.routers_client.update_router(router_state['id'],
|
||||||
|
**public_network_info)
|
||||||
|
sec_rule_client = self.cmgr_adm.security_group_rules_client
|
||||||
|
sec_client = self.cmgr_adm.security_groups_client
|
||||||
|
kwargs = dict(tenant_id=network_state['tenant_id'],
|
||||||
|
security_group_rules_client=sec_rule_client,
|
||||||
|
security_groups_client=sec_client)
|
||||||
|
self.sg = self.create_topology_security_group(**kwargs)
|
||||||
|
lbaas_rules = [dict(direction='ingress', protocol='tcp',
|
||||||
|
port_range_min=constants.HTTP_PORT,
|
||||||
|
port_range_max=constants.HTTP_PORT, ),
|
||||||
|
dict(direction='ingress', protocol='tcp',
|
||||||
|
port_range_min=443, port_range_max=443, )]
|
||||||
|
for rule in lbaas_rules:
|
||||||
|
self.add_security_group_rule(
|
||||||
|
self.sg,
|
||||||
|
rule,
|
||||||
|
ruleclient=sec_rule_client,
|
||||||
|
secclient=sec_client,
|
||||||
|
tenant_id=network_state['tenant_id'])
|
||||||
|
no_of_servers = 2
|
||||||
|
image_id = self.get_glance_image_id(["cirros", "esx"])
|
||||||
|
for instance in range(0, no_of_servers):
|
||||||
|
self.create_topology_instance(
|
||||||
|
"server_lbaas_%s" % instance, [network_state],
|
||||||
|
security_groups=[{'name': self.sg['name']}],
|
||||||
|
image_id=image_id, create_floating_ip=False,
|
||||||
|
clients=self.cmgr_adm)
|
||||||
|
lb_dict = self.create_project_octavia(
|
||||||
|
protocol_type="HTTP", protocol_port="80",
|
||||||
|
lb_algorithm="LEAST_CONNECTIONS",
|
||||||
|
vip_net_id=network_state['id'],
|
||||||
|
fip_disassociate=False, create_fip=False)
|
||||||
|
time.sleep(constants.NSXP_BACKEND_SMALL_TIME_INTERVAL)
|
||||||
|
result = self.check_router_components_on_edge(router_state)
|
||||||
|
self.assertEqual(True, result[0]['dr_present'])
|
||||||
|
self.assertEqual(True, result[1]['sr_present'])
|
||||||
|
self.delete_octavia_lb_resources(lb_dict['lb_id'])
|
||||||
|
public_network_info = {"external_gateway_info": dict(
|
||||||
|
network_id=CONF.network.public_network_id,
|
||||||
|
enable_snat=False)}
|
||||||
|
self.cmgr_adm.routers_client.update_router(router_state['id'],
|
||||||
|
**public_network_info)
|
||||||
|
self.fwaas_v2_client.update_firewall_v2_group(
|
||||||
|
fw_group["firewall_group"]["id"], ports=[])
|
||||||
|
self.fwaas_v2_client.delete_firewall_v2_group(
|
||||||
|
fw_group["firewall_group"]["id"])
|
||||||
|
self.fwaas_v2_client.delete_firewall_v2_policy(
|
||||||
|
fw_policy['firewall_policy']['id'])
|
||||||
|
self.fwaas_v2_client.delete_firewall_v2_rule(
|
||||||
|
fw_rules['firewall_rule']['id'])
|
||||||
|
time.sleep(constants.NSXP_BACKEND_SMALL_TIME_INTERVAL)
|
||||||
|
result = self.check_router_components_on_edge(router_state)
|
||||||
|
self.assertEqual(True, result[0]['dr_present'])
|
||||||
|
self.assertEqual(False, result[1]['sr_present'])
|
||||||
|
|
|
@ -48,6 +48,20 @@ class TestVerifyFwNatOrder(feature_manager.FeatureManager):
|
||||||
cls.sec_rule_client = cls.cmgr_adm.security_group_rules_client
|
cls.sec_rule_client = cls.cmgr_adm.security_group_rules_client
|
||||||
cls.sec_client = cls.cmgr_adm.security_groups_client
|
cls.sec_client = cls.cmgr_adm.security_groups_client
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super(TestVerifyFwNatOrder, self).setUp()
|
||||||
|
CONF.validation.ssh_shell_prologue = ''
|
||||||
|
self.vip_ip_address = ''
|
||||||
|
self.namestart = 'lbaas-ops'
|
||||||
|
self.poke_counters = 12
|
||||||
|
self.hm_delay = 4
|
||||||
|
self.hm_max_retries = 3
|
||||||
|
self.hm_timeout = 10
|
||||||
|
self.server_names = []
|
||||||
|
self.loadbalancer = None
|
||||||
|
self.vip_fip = None
|
||||||
|
self.web_service_start_delay = 2.5
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def resource_setup(cls):
|
def resource_setup(cls):
|
||||||
super(TestVerifyFwNatOrder, cls).resource_setup()
|
super(TestVerifyFwNatOrder, cls).resource_setup()
|
||||||
|
@ -306,3 +320,576 @@ class TestVerifyFwNatOrder(feature_manager.FeatureManager):
|
||||||
self.assertNotIn("64 bytes from ", str(out))
|
self.assertNotIn("64 bytes from ", str(out))
|
||||||
self.fwaas_v2_client.update_firewall_v2_group(
|
self.fwaas_v2_client.update_firewall_v2_group(
|
||||||
fw_group["firewall_group"]["id"], ports=[])
|
fw_group["firewall_group"]["id"], ports=[])
|
||||||
|
|
||||||
|
@decorators.idempotent_id('2317449c-14ca-1428-a428-09956daa46c3')
|
||||||
|
def test_verfiy_nat_fw_order_external_fw_with_octavia_lb(self):
|
||||||
|
"""
|
||||||
|
Create NAT and Firewall rules on router.
|
||||||
|
Verify order of NAT and Firewall.
|
||||||
|
"""
|
||||||
|
kwargs = {"enable_snat": True}
|
||||||
|
network_name = data_utils.rand_name(name='tempest-net')
|
||||||
|
subnet_name = data_utils.rand_name(name='tempest-subnet')
|
||||||
|
router_state = self.create_topology_router(
|
||||||
|
set_gateway=True,
|
||||||
|
routers_client=self.cmgr_adm.routers_client, **kwargs)
|
||||||
|
network_state = self.create_topology_network(
|
||||||
|
network_name, networks_client=self.cmgr_adm.networks_client)
|
||||||
|
subnet_state = self.create_topology_subnet(
|
||||||
|
subnet_name, network_state,
|
||||||
|
subnets_client=self.cmgr_adm.subnets_client)
|
||||||
|
interface = self.cmgr_adm.routers_client.add_router_interface(
|
||||||
|
router_state['id'], subnet_id=subnet_state["id"])
|
||||||
|
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||||
|
self.cmgr_adm.routers_client.remove_router_interface,
|
||||||
|
router_state['id'], subnet_id=subnet_state["id"])
|
||||||
|
sec_rule_client = self.cmgr_adm.security_group_rules_client
|
||||||
|
sec_client = self.cmgr_adm.security_groups_client
|
||||||
|
kwargs = dict(tenant_id=network_state['tenant_id'],
|
||||||
|
security_group_rules_client=sec_rule_client,
|
||||||
|
security_groups_client=sec_client)
|
||||||
|
self.sg = self._create_security_group(
|
||||||
|
security_group_rules_client=self.cmgr_adm.
|
||||||
|
security_group_rules_client,
|
||||||
|
security_groups_client=self.cmgr_adm.security_groups_client)
|
||||||
|
lbaas_rules = [dict(direction='ingress', protocol='tcp',
|
||||||
|
port_range_min=constants.HTTP_PORT,
|
||||||
|
port_range_max=constants.HTTP_PORT, ),
|
||||||
|
dict(direction='ingress', protocol='tcp',
|
||||||
|
port_range_min=443, port_range_max=443, )]
|
||||||
|
for rule in lbaas_rules:
|
||||||
|
self.add_security_group_rule(
|
||||||
|
self.sg,
|
||||||
|
rule,
|
||||||
|
ruleclient=self.cmgr_adm.security_group_rules_client,
|
||||||
|
secclient=self.cmgr_adm.security_groups_client,
|
||||||
|
tenant_id=network_state['tenant_id'])
|
||||||
|
ext_network = self.cmgr_adm.networks_client.show_network(
|
||||||
|
CONF.network.public_network_id)['network']
|
||||||
|
ext_subnet = self.cmgr_adm.subnets_client.show_subnet(
|
||||||
|
ext_network['subnets'][0])['subnet']
|
||||||
|
security_groups = [{'name': self.sg['name']}]
|
||||||
|
image_id = self.get_glance_image_id(["cirros", "esx"])
|
||||||
|
self.create_topology_instance(
|
||||||
|
"state_vm_1", [network_state],
|
||||||
|
create_floating_ip=True, image_id=image_id, clients=self.cmgr_adm,
|
||||||
|
security_groups=security_groups)
|
||||||
|
self.create_topology_instance(
|
||||||
|
"state_vm_2", [network_state],
|
||||||
|
create_floating_ip=True, image_id=image_id, clients=self.cmgr_adm,
|
||||||
|
security_groups=security_groups)
|
||||||
|
self.start_web_servers(constants.HTTP_PORT)
|
||||||
|
if CONF.network.backend == 'nsxp':
|
||||||
|
time.sleep(constants.NSXP_BACKEND_SMALL_TIME_INTERVAL)
|
||||||
|
nsx_router = self.nsxp.get_logical_router(router_state['name'],
|
||||||
|
router_state['id'])
|
||||||
|
nat_rules = self.nsxp.get_logical_router_nat_rules(nsx_router)
|
||||||
|
for nat_rule in nat_rules:
|
||||||
|
if nat_rule['firewall_match'] == 'BYPASS':
|
||||||
|
continue
|
||||||
|
self.assertEqual('MATCH_EXTERNAL_ADDRESS',
|
||||||
|
nat_rule['firewall_match'])
|
||||||
|
nsx_router = self.nsx.get_logical_router(router_state['name'],
|
||||||
|
router_state['id'])
|
||||||
|
nat_rules = self.nsx.get_logical_router_nat_rules(nsx_router)
|
||||||
|
for nat_rule in nat_rules:
|
||||||
|
if nat_rule['firewall_match'] == 'BYPASS':
|
||||||
|
continue
|
||||||
|
self.assertEqual('MATCH_EXTERNAL_ADDRESS',
|
||||||
|
nat_rule['firewall_match'])
|
||||||
|
lb_cist = self.create_project_octavia(
|
||||||
|
protocol_type="HTTP", protocol_port="80",
|
||||||
|
lb_algorithm="LEAST_CONNECTIONS",
|
||||||
|
vip_net_id=network_state['id'])
|
||||||
|
if CONF.network.backend == 'nsxp':
|
||||||
|
time.sleep(constants.NSXP_BACKEND_SMALL_TIME_INTERVAL)
|
||||||
|
nsx_router = self.nsxp.get_logical_router(router_state['name'],
|
||||||
|
router_state['id'])
|
||||||
|
nat_rules = self.nsxp.get_logical_router_nat_rules(nsx_router)
|
||||||
|
for nat_rule in nat_rules:
|
||||||
|
if nat_rule['firewall_match'] == 'BYPASS':
|
||||||
|
continue
|
||||||
|
self.assertEqual('MATCH_EXTERNAL_ADDRESS',
|
||||||
|
nat_rule['firewall_match'])
|
||||||
|
nsx_router = self.nsx.get_logical_router(router_state['name'],
|
||||||
|
router_state['id'])
|
||||||
|
nat_rules = self.nsx.get_logical_router_nat_rules(nsx_router)
|
||||||
|
for nat_rule in nat_rules:
|
||||||
|
if nat_rule['firewall_match'] == 'BYPASS':
|
||||||
|
continue
|
||||||
|
self.assertEqual('MATCH_EXTERNAL_ADDRESS',
|
||||||
|
nat_rule['firewall_match'])
|
||||||
|
fw_rules = self.create_firewall_rule(
|
||||||
|
name='test_rule', protocol='tcp',
|
||||||
|
action="allow", destination_ip_address=ext_subnet["cidr"],
|
||||||
|
destination_port='80')
|
||||||
|
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||||
|
self.fwaas_v2_client.delete_firewall_v2_rule,
|
||||||
|
fw_rules['firewall_rule']['id'])
|
||||||
|
rules = []
|
||||||
|
# Check firewall rule
|
||||||
|
rules.append(fw_rules['firewall_rule']['id'])
|
||||||
|
policy_name = data_utils.rand_name('fw-policy-')
|
||||||
|
# Create firewall policy
|
||||||
|
fw_policy = self.create_firewall_policy(
|
||||||
|
name=policy_name, firewall_rules=rules,
|
||||||
|
project_id=router_state['project_id'])
|
||||||
|
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||||
|
self.fwaas_v2_client.delete_firewall_v2_policy,
|
||||||
|
fw_policy['firewall_policy']['id'])
|
||||||
|
show_policy = self.show_firewall_policy(
|
||||||
|
fw_policy['firewall_policy']['id'])
|
||||||
|
# Check firewall policy
|
||||||
|
self.assertEqual(
|
||||||
|
show_policy.get('firewall_policy')['name'],
|
||||||
|
policy_name)
|
||||||
|
self.assertEqual(show_policy.get('firewall_policy')
|
||||||
|
['firewall_rules'], rules)
|
||||||
|
policy_id = fw_policy['firewall_policy']['id']
|
||||||
|
group_name = data_utils.rand_name('fw-group-')
|
||||||
|
# Create firewall group
|
||||||
|
fw_group = self.create_firewall_group(
|
||||||
|
name=group_name,
|
||||||
|
ingress_firewall_policy_id=policy_id,
|
||||||
|
egress_firewall_policy_id=policy_id,
|
||||||
|
ports=[interface['port_id']],
|
||||||
|
project_id=router_state['project_id'])
|
||||||
|
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||||
|
self.fwaas_v2_client.update_firewall_v2_group,
|
||||||
|
fw_group["firewall_group"]["id"], ports=[])
|
||||||
|
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||||
|
self.fwaas_v2_client.delete_firewall_v2_group,
|
||||||
|
fw_group["firewall_group"]["id"])
|
||||||
|
self._wait_firewall_ready(fw_group["firewall_group"]["id"])
|
||||||
|
self.check_project_lbaas()
|
||||||
|
fw_rules = self.update_firewall_rule(
|
||||||
|
fw_rules['firewall_rule']['id'],
|
||||||
|
destination_ip_address=subnet_state['cidr'])
|
||||||
|
if CONF.network.backend == 'nsxp':
|
||||||
|
time.sleep(constants.NSXP_BACKEND_SMALL_TIME_INTERVAL)
|
||||||
|
self.check_project_lbaas()
|
||||||
|
self.delete_octavia_lb_resources(lb_cist['lb_id'])
|
||||||
|
self.fwaas_v2_client.update_firewall_v2_group(
|
||||||
|
fw_group["firewall_group"]["id"], ports=[])
|
||||||
|
|
||||||
|
@decorators.idempotent_id('2317320c-44bd-9106-c317-09956daa46c3')
|
||||||
|
def test_verfiy_nat_fw_order_internal_fw_with_octa(self):
|
||||||
|
"""
|
||||||
|
Create NAT and Firewall rules on router.
|
||||||
|
Verify order of NAT and Firewall.
|
||||||
|
"""
|
||||||
|
kwargs = {"enable_snat": True}
|
||||||
|
network_name = data_utils.rand_name(name='tempest-net')
|
||||||
|
subnet_name = data_utils.rand_name(name='tempest-subnet')
|
||||||
|
router_state = self.create_topology_router(
|
||||||
|
set_gateway=True,
|
||||||
|
routers_client=self.cmgr_adm.routers_client, **kwargs)
|
||||||
|
network_state = self.create_topology_network(
|
||||||
|
network_name, networks_client=self.cmgr_adm.networks_client)
|
||||||
|
subnet_state = self.create_topology_subnet(
|
||||||
|
subnet_name, network_state,
|
||||||
|
subnets_client=self.cmgr_adm.subnets_client)
|
||||||
|
interface = self.cmgr_adm.routers_client.add_router_interface(
|
||||||
|
router_state['id'], subnet_id=subnet_state["id"])
|
||||||
|
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||||
|
self.cmgr_adm.routers_client.remove_router_interface,
|
||||||
|
router_state['id'], subnet_id=subnet_state["id"])
|
||||||
|
sec_rule_client = self.cmgr_adm.security_group_rules_client
|
||||||
|
sec_client = self.cmgr_adm.security_groups_client
|
||||||
|
kwargs = dict(tenant_id=network_state['tenant_id'],
|
||||||
|
security_group_rules_client=sec_rule_client,
|
||||||
|
security_groups_client=sec_client)
|
||||||
|
self.sg = self._create_security_group(
|
||||||
|
security_group_rules_client=self.cmgr_adm.
|
||||||
|
security_group_rules_client,
|
||||||
|
security_groups_client=self.cmgr_adm.security_groups_client)
|
||||||
|
lbaas_rules = [dict(direction='ingress', protocol='tcp',
|
||||||
|
port_range_min=constants.HTTP_PORT,
|
||||||
|
port_range_max=constants.HTTP_PORT, ),
|
||||||
|
dict(direction='ingress', protocol='tcp',
|
||||||
|
port_range_min=443, port_range_max=443, )]
|
||||||
|
for rule in lbaas_rules:
|
||||||
|
self.add_security_group_rule(
|
||||||
|
self.sg,
|
||||||
|
rule,
|
||||||
|
ruleclient=self.cmgr_adm.security_group_rules_client,
|
||||||
|
secclient=self.cmgr_adm.security_groups_client,
|
||||||
|
tenant_id=network_state['tenant_id'])
|
||||||
|
ext_network = self.cmgr_adm.networks_client.show_network(
|
||||||
|
CONF.network.public_network_id)['network']
|
||||||
|
ext_subnet = self.cmgr_adm.subnets_client.show_subnet(
|
||||||
|
ext_network['subnets'][0])['subnet']
|
||||||
|
security_groups = [{'name': self.sg['name']}]
|
||||||
|
image_id = self.get_glance_image_id(["cirros", "esx"])
|
||||||
|
self.create_topology_instance(
|
||||||
|
"state_vm_1", [network_state],
|
||||||
|
create_floating_ip=True, image_id=image_id, clients=self.cmgr_adm,
|
||||||
|
security_groups=security_groups)
|
||||||
|
self.create_topology_instance(
|
||||||
|
"state_vm_2", [network_state],
|
||||||
|
create_floating_ip=True, image_id=image_id, clients=self.cmgr_adm,
|
||||||
|
security_groups=security_groups)
|
||||||
|
self.start_web_servers(constants.HTTP_PORT)
|
||||||
|
if CONF.network.backend == 'nsxp':
|
||||||
|
time.sleep(constants.NSXP_BACKEND_SMALL_TIME_INTERVAL)
|
||||||
|
nsx_router = self.nsxp.get_logical_router(router_state['name'],
|
||||||
|
router_state['id'])
|
||||||
|
nat_rules = self.nsxp.get_logical_router_nat_rules(nsx_router)
|
||||||
|
for nat_rule in nat_rules:
|
||||||
|
if nat_rule['firewall_match'] == 'BYPASS':
|
||||||
|
continue
|
||||||
|
self.assertEqual('MATCH_INTERNAL_ADDRESS',
|
||||||
|
nat_rule['firewall_match'])
|
||||||
|
nsx_router = self.nsx.get_logical_router(router_state['name'],
|
||||||
|
router_state['id'])
|
||||||
|
nat_rules = self.nsx.get_logical_router_nat_rules(nsx_router)
|
||||||
|
for nat_rule in nat_rules:
|
||||||
|
if nat_rule['firewall_match'] == 'BYPASS':
|
||||||
|
continue
|
||||||
|
self.assertEqual('MATCH_INTERNAL_ADDRESS',
|
||||||
|
nat_rule['firewall_match'])
|
||||||
|
lb_cist = self.create_project_octavia(
|
||||||
|
protocol_type="HTTP", protocol_port="80",
|
||||||
|
lb_algorithm="LEAST_CONNECTIONS",
|
||||||
|
vip_net_id=network_state['id'])
|
||||||
|
if CONF.network.backend == 'nsxp':
|
||||||
|
time.sleep(constants.NSXP_BACKEND_SMALL_TIME_INTERVAL)
|
||||||
|
nsx_router = self.nsxp.get_logical_router(router_state['name'],
|
||||||
|
router_state['id'])
|
||||||
|
nat_rules = self.nsxp.get_logical_router_nat_rules(nsx_router)
|
||||||
|
for nat_rule in nat_rules:
|
||||||
|
if nat_rule['firewall_match'] == 'BYPASS':
|
||||||
|
continue
|
||||||
|
self.assertEqual('MATCH_INTERNAL_ADDRESS',
|
||||||
|
nat_rule['firewall_match'])
|
||||||
|
nsx_router = self.nsx.get_logical_router(router_state['name'],
|
||||||
|
router_state['id'])
|
||||||
|
nat_rules = self.nsx.get_logical_router_nat_rules(nsx_router)
|
||||||
|
for nat_rule in nat_rules:
|
||||||
|
if nat_rule['firewall_match'] == 'BYPASS':
|
||||||
|
continue
|
||||||
|
self.assertEqual('MATCH_INTERNAL_ADDRESS',
|
||||||
|
nat_rule['firewall_match'])
|
||||||
|
fw_rules = self.create_firewall_rule(
|
||||||
|
name='test_rule', protocol='tcp',
|
||||||
|
action="allow", destination_ip_address=subnet_state['cidr'],
|
||||||
|
destination_port='80')
|
||||||
|
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||||
|
self.fwaas_v2_client.delete_firewall_v2_rule,
|
||||||
|
fw_rules['firewall_rule']['id'])
|
||||||
|
rules = []
|
||||||
|
# Check firewall rule
|
||||||
|
rules.append(fw_rules['firewall_rule']['id'])
|
||||||
|
policy_name = data_utils.rand_name('fw-policy-')
|
||||||
|
# Create firewall policy
|
||||||
|
fw_policy = self.create_firewall_policy(
|
||||||
|
name=policy_name, firewall_rules=rules,
|
||||||
|
project_id=router_state['project_id'])
|
||||||
|
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||||
|
self.fwaas_v2_client.delete_firewall_v2_policy,
|
||||||
|
fw_policy['firewall_policy']['id'])
|
||||||
|
show_policy = self.show_firewall_policy(
|
||||||
|
fw_policy['firewall_policy']['id'])
|
||||||
|
# Check firewall policy
|
||||||
|
self.assertEqual(
|
||||||
|
show_policy.get('firewall_policy')['name'],
|
||||||
|
policy_name)
|
||||||
|
self.assertEqual(show_policy.get('firewall_policy')
|
||||||
|
['firewall_rules'], rules)
|
||||||
|
policy_id = fw_policy['firewall_policy']['id']
|
||||||
|
group_name = data_utils.rand_name('fw-group-')
|
||||||
|
# Create firewall group
|
||||||
|
fw_group = self.create_firewall_group(
|
||||||
|
name=group_name,
|
||||||
|
ingress_firewall_policy_id=policy_id,
|
||||||
|
egress_firewall_policy_id=policy_id,
|
||||||
|
ports=[interface['port_id']],
|
||||||
|
project_id=router_state['project_id'])
|
||||||
|
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||||
|
self.fwaas_v2_client.update_firewall_v2_group,
|
||||||
|
fw_group["firewall_group"]["id"], ports=[])
|
||||||
|
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||||
|
self.fwaas_v2_client.delete_firewall_v2_group,
|
||||||
|
fw_group["firewall_group"]["id"])
|
||||||
|
self._wait_firewall_ready(fw_group["firewall_group"]["id"])
|
||||||
|
self.check_project_lbaas()
|
||||||
|
fw_rules = self.update_firewall_rule(
|
||||||
|
fw_rules['firewall_rule']['id'],
|
||||||
|
destination_ip_address=ext_subnet['cidr'])
|
||||||
|
if CONF.network.backend == 'nsxp':
|
||||||
|
time.sleep(constants.NSXP_BACKEND_SMALL_TIME_INTERVAL)
|
||||||
|
self.check_project_lbaas()
|
||||||
|
self.delete_octavia_lb_resources(lb_cist['lb_id'])
|
||||||
|
self.fwaas_v2_client.update_firewall_v2_group(
|
||||||
|
fw_group["firewall_group"]["id"], ports=[])
|
||||||
|
|
||||||
|
@decorators.idempotent_id('2317449c-14ca-1428-a428-10047daa46c3')
|
||||||
|
def test_verify_nat_fw_order_when_vm_booted_with_port_internal_fw(self):
|
||||||
|
"""
|
||||||
|
Create NAT and Firewall rules on router.
|
||||||
|
Verify order of NAT and Firewall.
|
||||||
|
"""
|
||||||
|
rtr_name = data_utils.rand_name(name='tempest-router')
|
||||||
|
network_name = data_utils.rand_name(name='tempest-net')
|
||||||
|
subnet_name = data_utils.rand_name(name='tempest-subnet')
|
||||||
|
router_state = self.create_topology_router(
|
||||||
|
rtr_name, set_gateway=True,
|
||||||
|
routers_client=self.cmgr_adm.routers_client)
|
||||||
|
network_state = self.create_topology_network(
|
||||||
|
network_name, networks_client=self.cmgr_adm.networks_client)
|
||||||
|
subnet_state = self.create_topology_subnet(
|
||||||
|
subnet_name, network_state,
|
||||||
|
subnets_client=self.cmgr_adm.subnets_client)
|
||||||
|
interface = self.cmgr_adm.routers_client.add_router_interface(
|
||||||
|
router_state['id'], subnet_id=subnet_state["id"])
|
||||||
|
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||||
|
self.cmgr_adm.routers_client.remove_router_interface,
|
||||||
|
router_state['id'], subnet_id=subnet_state["id"])
|
||||||
|
security_group = self._create_security_group(
|
||||||
|
security_group_rules_client=self.cmgr_adm.
|
||||||
|
security_group_rules_client,
|
||||||
|
security_groups_client=self.cmgr_adm.security_groups_client)
|
||||||
|
image_id = self.get_glance_image_id(["cirros", "esx"])
|
||||||
|
security_groups = [{'name': security_group['name']}]
|
||||||
|
port1 = self.create_topology_port(
|
||||||
|
network_state, ports_client=self.cmgr_adm.ports_client)['port']
|
||||||
|
port2 = self.create_topology_port(
|
||||||
|
network_state, ports_client=self.cmgr_adm.ports_client)['port']
|
||||||
|
kwargs = {'security_groups': [security_group['id']]}
|
||||||
|
port1 = self.cmgr_adm.ports_client.update_port(
|
||||||
|
port1['id'], **kwargs)['port']
|
||||||
|
port2 = self.cmgr_adm.ports_client.update_port(
|
||||||
|
port2['id'], **kwargs)['port']
|
||||||
|
server1 = self.create_topology_instance(
|
||||||
|
"state_vm_1",
|
||||||
|
create_floating_ip=True, image_id=image_id, clients=self.cmgr_adm,
|
||||||
|
security_groups=security_groups, port=port1)
|
||||||
|
server2 = self.create_topology_instance(
|
||||||
|
"state_vm_2",
|
||||||
|
create_floating_ip=True, image_id=image_id, clients=self.cmgr_adm,
|
||||||
|
security_groups=security_groups, port=port2)
|
||||||
|
if CONF.network.backend == 'nsxp':
|
||||||
|
time.sleep(constants.NSXP_BACKEND_SMALL_TIME_INTERVAL)
|
||||||
|
nsx_router = self.nsxp.get_logical_router(router_state['name'],
|
||||||
|
router_state['id'])
|
||||||
|
nat_rules = self.nsxp.get_logical_router_nat_rules(nsx_router)
|
||||||
|
for nat_rule in nat_rules:
|
||||||
|
if nat_rule['firewall_match'] == 'BYPASS':
|
||||||
|
continue
|
||||||
|
self.assertEqual('MATCH_INTERNAL_ADDRESS',
|
||||||
|
nat_rule['firewall_match'])
|
||||||
|
nsx_router = self.nsx.get_logical_router(router_state['name'],
|
||||||
|
router_state['id'])
|
||||||
|
nat_rules = self.nsx.get_logical_router_nat_rules(nsx_router)
|
||||||
|
for nat_rule in nat_rules:
|
||||||
|
if nat_rule['firewall_match'] == 'BYPASS':
|
||||||
|
continue
|
||||||
|
self.assertEqual('MATCH_INTERNAL_ADDRESS',
|
||||||
|
nat_rule['firewall_match'])
|
||||||
|
ext_network = self.cmgr_adm.networks_client.show_network(
|
||||||
|
CONF.network.public_network_id)['network']
|
||||||
|
ext_subnet = self.cmgr_adm.subnets_client.show_subnet(
|
||||||
|
ext_network['subnets'][0])['subnet']
|
||||||
|
fw_rules = self.create_firewall_rule(
|
||||||
|
name='test_rule', protocol='icmp',
|
||||||
|
action="allow",
|
||||||
|
destination_ip_address=subnet_state['cidr'])
|
||||||
|
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||||
|
self.fwaas_v2_client.delete_firewall_v2_rule,
|
||||||
|
fw_rules['firewall_rule']['id'])
|
||||||
|
rules = []
|
||||||
|
# Check firewall rule
|
||||||
|
rules.append(fw_rules['firewall_rule']['id'])
|
||||||
|
policy_name = data_utils.rand_name('fw-policy-')
|
||||||
|
# Create firewall policy
|
||||||
|
fw_policy = self.create_firewall_policy(
|
||||||
|
name=policy_name, firewall_rules=rules,
|
||||||
|
project_id=router_state['project_id'])
|
||||||
|
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||||
|
self.fwaas_v2_client.delete_firewall_v2_policy,
|
||||||
|
fw_policy['firewall_policy']['id'])
|
||||||
|
show_policy = self.show_firewall_policy(
|
||||||
|
fw_policy['firewall_policy']['id'])
|
||||||
|
# Check firewall policy
|
||||||
|
self.assertEqual(
|
||||||
|
show_policy.get('firewall_policy')['name'],
|
||||||
|
policy_name)
|
||||||
|
self.assertEqual(show_policy.get('firewall_policy')
|
||||||
|
['firewall_rules'], rules)
|
||||||
|
policy_id = fw_policy['firewall_policy']['id']
|
||||||
|
group_name = data_utils.rand_name('fw-group-')
|
||||||
|
# Create firewall group
|
||||||
|
fw_group = self.create_firewall_group(
|
||||||
|
name=group_name,
|
||||||
|
ingress_firewall_policy_id=policy_id,
|
||||||
|
egress_firewall_policy_id=policy_id,
|
||||||
|
ports=[interface['port_id']],
|
||||||
|
project_id=router_state['project_id'])
|
||||||
|
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||||
|
self.fwaas_v2_client.update_firewall_v2_group,
|
||||||
|
fw_group["firewall_group"]["id"], ports=[])
|
||||||
|
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||||
|
self.fwaas_v2_client.delete_firewall_v2_group,
|
||||||
|
fw_group["firewall_group"]["id"])
|
||||||
|
self._wait_firewall_ready(fw_group["firewall_group"]["id"])
|
||||||
|
# Verify traffic to vm
|
||||||
|
time.sleep(constants.NSXP_BACKEND_SMALL_TIME_INTERVAL)
|
||||||
|
fip = server1["floating_ips"][0]["floating_ip_address"]
|
||||||
|
out = self._test_ping_from_external_network(fip)
|
||||||
|
self.assertIn("64 bytes from ", str(out))
|
||||||
|
fip = server2["floating_ips"][0]["floating_ip_address"]
|
||||||
|
out = self._test_ping_from_external_network(fip)
|
||||||
|
self.assertIn("64 bytes from ", str(out))
|
||||||
|
fip = server2["floating_ips"][0]["floating_ip_address"]
|
||||||
|
out = self._test_ping_from_external_network(fip)
|
||||||
|
self.assertIn("64 bytes from ", str(out))
|
||||||
|
# Update destination ip address in firewall rule
|
||||||
|
fw_rules = self.update_firewall_rule(
|
||||||
|
fw_rules['firewall_rule']['id'],
|
||||||
|
destination_ip_address=ext_subnet['cidr'])
|
||||||
|
if CONF.network.backend == 'nsxp':
|
||||||
|
time.sleep(constants.NSXP_BACKEND_SMALL_TIME_INTERVAL)
|
||||||
|
# Verify traffic to vm
|
||||||
|
fip = server1["floating_ips"][0]["floating_ip_address"]
|
||||||
|
out = self._test_ping_from_external_network(fip)
|
||||||
|
self.assertNotIn("64 bytes from ", str(out))
|
||||||
|
fip = server2["floating_ips"][0]["floating_ip_address"]
|
||||||
|
out = self._test_ping_from_external_network(fip)
|
||||||
|
self.assertNotIn("64 bytes from ", str(out))
|
||||||
|
self.fwaas_v2_client.update_firewall_v2_group(
|
||||||
|
fw_group["firewall_group"]["id"], ports=[])
|
||||||
|
|
||||||
|
@decorators.idempotent_id('2317449c-14ca-1428-b530-10047daa46c3')
|
||||||
|
def test_verify_nat_fw_order_when_vm_booted_with_port_external_fw(self):
|
||||||
|
"""
|
||||||
|
Create NAT and Firewall rules on router.
|
||||||
|
Verify order of NAT and Firewall.
|
||||||
|
"""
|
||||||
|
rtr_name = data_utils.rand_name(name='tempest-router')
|
||||||
|
network_name = data_utils.rand_name(name='tempest-net')
|
||||||
|
subnet_name = data_utils.rand_name(name='tempest-subnet')
|
||||||
|
router_state = self.create_topology_router(
|
||||||
|
rtr_name, set_gateway=True,
|
||||||
|
routers_client=self.cmgr_adm.routers_client)
|
||||||
|
network_state = self.create_topology_network(
|
||||||
|
network_name, networks_client=self.cmgr_adm.networks_client)
|
||||||
|
subnet_state = self.create_topology_subnet(
|
||||||
|
subnet_name, network_state,
|
||||||
|
subnets_client=self.cmgr_adm.subnets_client)
|
||||||
|
interface = self.cmgr_adm.routers_client.add_router_interface(
|
||||||
|
router_state['id'], subnet_id=subnet_state["id"])
|
||||||
|
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||||
|
self.cmgr_adm.routers_client.remove_router_interface,
|
||||||
|
router_state['id'], subnet_id=subnet_state["id"])
|
||||||
|
security_group = self._create_security_group(
|
||||||
|
security_group_rules_client=self.cmgr_adm.
|
||||||
|
security_group_rules_client,
|
||||||
|
security_groups_client=self.cmgr_adm.security_groups_client)
|
||||||
|
image_id = self.get_glance_image_id(["cirros", "esx"])
|
||||||
|
security_groups = [{'name': security_group['name']}]
|
||||||
|
port1 = self.create_topology_port(
|
||||||
|
network_state, ports_client=self.cmgr_adm.ports_client)['port']
|
||||||
|
port2 = self.create_topology_port(
|
||||||
|
network_state, ports_client=self.cmgr_adm.ports_client)['port']
|
||||||
|
kwargs = {'security_groups': [security_group['id']]}
|
||||||
|
port1 = self.cmgr_adm.ports_client.update_port(
|
||||||
|
port1['id'], **kwargs)['port']
|
||||||
|
port2 = self.cmgr_adm.ports_client.update_port(
|
||||||
|
port2['id'], **kwargs)['port']
|
||||||
|
server1 = self.create_topology_instance(
|
||||||
|
"state_vm_1",
|
||||||
|
create_floating_ip=True, image_id=image_id, clients=self.cmgr_adm,
|
||||||
|
security_groups=security_groups, port=port1)
|
||||||
|
server2 = self.create_topology_instance(
|
||||||
|
"state_vm_2",
|
||||||
|
create_floating_ip=True, image_id=image_id, clients=self.cmgr_adm,
|
||||||
|
security_groups=security_groups, port=port2)
|
||||||
|
if CONF.network.backend == 'nsxp':
|
||||||
|
time.sleep(constants.NSXP_BACKEND_SMALL_TIME_INTERVAL)
|
||||||
|
nsx_router = self.nsxp.get_logical_router(router_state['name'],
|
||||||
|
router_state['id'])
|
||||||
|
nat_rules = self.nsxp.get_logical_router_nat_rules(nsx_router)
|
||||||
|
for nat_rule in nat_rules:
|
||||||
|
if nat_rule['firewall_match'] == 'BYPASS':
|
||||||
|
continue
|
||||||
|
self.assertEqual('MATCH_EXTERNAL_ADDRESS',
|
||||||
|
nat_rule['firewall_match'])
|
||||||
|
nsx_router = self.nsx.get_logical_router(router_state['name'],
|
||||||
|
router_state['id'])
|
||||||
|
nat_rules = self.nsx.get_logical_router_nat_rules(nsx_router)
|
||||||
|
for nat_rule in nat_rules:
|
||||||
|
if nat_rule['firewall_match'] == 'BYPASS':
|
||||||
|
continue
|
||||||
|
self.assertEqual('MATCH_EXTERNAL_ADDRESS',
|
||||||
|
nat_rule['firewall_match'])
|
||||||
|
ext_network = self.cmgr_adm.networks_client.show_network(
|
||||||
|
CONF.network.public_network_id)['network']
|
||||||
|
ext_subnet = self.cmgr_adm.subnets_client.show_subnet(
|
||||||
|
ext_network['subnets'][0])['subnet']
|
||||||
|
fw_rules = self.create_firewall_rule(
|
||||||
|
name='test_rule', protocol='icmp',
|
||||||
|
action="allow", destination_ip_address=ext_subnet["cidr"])
|
||||||
|
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||||
|
self.fwaas_v2_client.delete_firewall_v2_rule,
|
||||||
|
fw_rules['firewall_rule']['id'])
|
||||||
|
rules = []
|
||||||
|
# Check firewall rule
|
||||||
|
rules.append(fw_rules['firewall_rule']['id'])
|
||||||
|
policy_name = data_utils.rand_name('fw-policy-')
|
||||||
|
# Create firewall policy
|
||||||
|
fw_policy = self.create_firewall_policy(
|
||||||
|
name=policy_name, firewall_rules=rules,
|
||||||
|
project_id=router_state['project_id'])
|
||||||
|
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||||
|
self.fwaas_v2_client.delete_firewall_v2_policy,
|
||||||
|
fw_policy['firewall_policy']['id'])
|
||||||
|
show_policy = self.show_firewall_policy(
|
||||||
|
fw_policy['firewall_policy']['id'])
|
||||||
|
# Check firewall policy
|
||||||
|
self.assertEqual(
|
||||||
|
show_policy.get('firewall_policy')['name'],
|
||||||
|
policy_name)
|
||||||
|
self.assertEqual(show_policy.get('firewall_policy')
|
||||||
|
['firewall_rules'], rules)
|
||||||
|
policy_id = fw_policy['firewall_policy']['id']
|
||||||
|
group_name = data_utils.rand_name('fw-group-')
|
||||||
|
# Create firewall group
|
||||||
|
fw_group = self.create_firewall_group(
|
||||||
|
name=group_name,
|
||||||
|
ingress_firewall_policy_id=policy_id,
|
||||||
|
egress_firewall_policy_id=policy_id,
|
||||||
|
ports=[interface['port_id']],
|
||||||
|
project_id=router_state['project_id'])
|
||||||
|
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||||
|
self.fwaas_v2_client.update_firewall_v2_group,
|
||||||
|
fw_group["firewall_group"]["id"], ports=[])
|
||||||
|
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||||
|
self.fwaas_v2_client.delete_firewall_v2_group,
|
||||||
|
fw_group["firewall_group"]["id"])
|
||||||
|
self._wait_firewall_ready(fw_group["firewall_group"]["id"])
|
||||||
|
# Verify traffic to vm
|
||||||
|
time.sleep(constants.NSXP_BACKEND_SMALL_TIME_INTERVAL)
|
||||||
|
fip = server1["floating_ips"][0]["floating_ip_address"]
|
||||||
|
out = self._test_ping_from_external_network(fip)
|
||||||
|
self.assertIn("64 bytes from ", str(out))
|
||||||
|
fip = server2["floating_ips"][0]["floating_ip_address"]
|
||||||
|
out = self._test_ping_from_external_network(fip)
|
||||||
|
self.assertIn("64 bytes from ", str(out))
|
||||||
|
fip = server2["floating_ips"][0]["floating_ip_address"]
|
||||||
|
out = self._test_ping_from_external_network(fip)
|
||||||
|
self.assertIn("64 bytes from ", str(out))
|
||||||
|
# Update destination ip address in firewall rule
|
||||||
|
fw_rules = self.update_firewall_rule(
|
||||||
|
fw_rules['firewall_rule']['id'],
|
||||||
|
destination_ip_address=subnet_state['cidr'])
|
||||||
|
if CONF.network.backend == 'nsxp':
|
||||||
|
time.sleep(constants.NSXP_BACKEND_SMALL_TIME_INTERVAL)
|
||||||
|
# Verify traffic to vm
|
||||||
|
fip = server1["floating_ips"][0]["floating_ip_address"]
|
||||||
|
out = self._test_ping_from_external_network(fip)
|
||||||
|
self.assertNotIn("64 bytes from ", str(out))
|
||||||
|
fip = server2["floating_ips"][0]["floating_ip_address"]
|
||||||
|
out = self._test_ping_from_external_network(fip)
|
||||||
|
self.assertNotIn("64 bytes from ", str(out))
|
||||||
|
self.fwaas_v2_client.update_firewall_v2_group(
|
||||||
|
fw_group["firewall_group"]["id"], ports=[])
|
||||||
|
|
Loading…
Reference in New Issue