From 04ba7685abb61f18832713becd3b9de431776f05 Mon Sep 17 00:00:00 2001 From: Adam Harwell Date: Mon, 10 Jun 2019 15:43:22 -0700 Subject: [PATCH] WIP: Test Additional VIPs Depends-On: https://review.opendev.org/660239 Story: 2005608 Task: 30843 Task: 30846 Change-Id: Ie9827108457884fc8012d5cdc4b512d705bc231a --- octavia_tempest_plugin/common/constants.py | 2 + octavia_tempest_plugin/config.py | 6 + .../load_balancer/v2/loadbalancer_client.py | 4 +- .../tests/api/v2/test_load_balancer.py | 37 ++- .../tests/multivip_scenario/__init__.py | 0 .../tests/multivip_scenario/v2/__init__.py | 0 .../v2/test_additional_vips.py | 246 ++++++++++++++++++ octavia_tempest_plugin/tests/test_base.py | 107 +++++++- zuul.d/jobs.yaml | 12 + zuul.d/projects.yaml | 2 + 10 files changed, 400 insertions(+), 16 deletions(-) create mode 100644 octavia_tempest_plugin/tests/multivip_scenario/__init__.py create mode 100644 octavia_tempest_plugin/tests/multivip_scenario/v2/__init__.py create mode 100644 octavia_tempest_plugin/tests/multivip_scenario/v2/test_additional_vips.py diff --git a/octavia_tempest_plugin/common/constants.py b/octavia_tempest_plugin/common/constants.py index aea476e8..7402f964 100644 --- a/octavia_tempest_plugin/common/constants.py +++ b/octavia_tempest_plugin/common/constants.py @@ -68,6 +68,8 @@ SNI_CONTAINER_REFS = 'sni_container_refs' DEFAULT_POOL_ID = 'default_pool_id' L7_POLICIES = 'l7_policies' ALPN_PROTOCOLS = 'alpn_protocols' +ADDITIONAL_VIPS = 'additional_vips' +IP_ADDRESS = 'ip_address' LB_ALGORITHM = 'lb_algorithm' LB_ALGORITHM_ROUND_ROBIN = 'ROUND_ROBIN' diff --git a/octavia_tempest_plugin/config.py b/octavia_tempest_plugin/config.py index 4d1543b2..81c459a1 100644 --- a/octavia_tempest_plugin/config.py +++ b/octavia_tempest_plugin/config.py @@ -176,6 +176,12 @@ OctaviaGroup = [ cfg.StrOpt('vip_ipv6_subnet_cidr', default='fdde:1a92:7523:70a0::/64', help='CIDR format subnet to use for the IPv6 vip subnet.'), + cfg.StrOpt('vip_2_subnet_cidr', + default='10.1.2.0/24', + help='CIDR format subnet to use for the 2nd vip subnet.'), + cfg.StrOpt('vip_2_ipv6_subnet_cidr', + default='fdde:1a92:7523:70a1::/64', + help='CIDR format subnet to use for the 2nd IPv6 vip subnet.'), cfg.StrOpt('member_1_ipv4_subnet_cidr', default='10.2.1.0/24', help='CIDR format subnet to use for the member 1 subnet.'), diff --git a/octavia_tempest_plugin/services/load_balancer/v2/loadbalancer_client.py b/octavia_tempest_plugin/services/load_balancer/v2/loadbalancer_client.py index 9499d893..b386963b 100644 --- a/octavia_tempest_plugin/services/load_balancer/v2/loadbalancer_client.py +++ b/octavia_tempest_plugin/services/load_balancer/v2/loadbalancer_client.py @@ -42,7 +42,7 @@ class LoadbalancerClient(base_client.BaseLBaaSClient): vip_address=Unset, vip_network_id=Unset, vip_port_id=Unset, vip_qos_policy_id=Unset, vip_subnet_id=Unset, return_object_only=True, - tags=Unset): + tags=Unset, additional_vips=Unset): """Create a loadbalancer. :param name: Human-readable name of the resource. @@ -60,6 +60,8 @@ class LoadbalancerClient(base_client.BaseLBaaSClient): the Virtual IP (VIP). :param vip_subnet_id: The ID of the subnet for the Virtual IP (VIP). :param tags: A human-readable tags of the resource. + :param additional_vips: List of Virtual IP (VIP) dictionaries to add as + additional VIPs on the LB. :param return_object_only: If True, the response returns the object inside the root tag. False returns the full response from the API. diff --git a/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py b/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py index 7ade6428..4c411828 100644 --- a/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py +++ b/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py @@ -48,7 +48,35 @@ class LoadBalancerAPITest(test_base.LoadBalancerBaseTest): def test_load_balancer_ipv6_create(self): self._test_load_balancer_create(6) - def _test_load_balancer_create(self, ip_version): + @decorators.idempotent_id('4e3c5ad2-8f4e-4444-befc-ed2e91605bf7') + @testtools.skipUnless(CONF.load_balancer.test_with_ipv6, + 'IPv6 testing is disabled') + def test_loadbalancer_ipv4_create_additional_vips(self): + if not self.lb_admin_flavor_profile_client.is_version_supported( + self.api_version, '2.25'): + raise self.skipException( + 'Additional VIPs feature requires ' + 'Octavia API version 2.25 or newer.') + additional_vips = [ + {const.SUBNET_ID: self.lb_member_vip_2_subnet[const.ID]}, + {const.SUBNET_ID: self.lb_member_vip_2_ipv6_subnet[const.ID]}] + self._test_load_balancer_create(4, additional_vips=additional_vips) + + @decorators.idempotent_id('ca547b53-810c-400b-97e9-c954d6ef6301') + @testtools.skipUnless(CONF.load_balancer.test_with_ipv6, + 'IPv6 testing is disabled') + def test_loadbalancer_ipv6_create_additional_vips(self): + if not self.lb_admin_flavor_profile_client.is_version_supported( + self.api_version, '2.25'): + raise self.skipException( + 'Additional VIPs feature requires ' + 'Octavia API version 2.25 or newer.') + additional_vips = [ + {const.SUBNET_ID: self.lb_member_vip_2_subnet[const.ID]}, + {const.SUBNET_ID: self.lb_member_vip_2_ipv6_subnet[const.ID]}] + self._test_load_balancer_create(6, additional_vips=additional_vips) + + def _test_load_balancer_create(self, ip_version, additional_vips=None): """Tests load balancer create and basic show APIs. * Tests that users without the load balancer member role cannot @@ -78,6 +106,8 @@ class LoadBalancerAPITest(test_base.LoadBalancerBaseTest): }) self._setup_lb_network_kwargs(lb_kwargs, ip_version, use_fixed_ip=True) + if additional_vips: + lb_kwargs[const.ADDITIONAL_VIPS] = additional_vips # Test that a user without the loadbalancer role cannot # create a load balancer. @@ -145,6 +175,11 @@ class LoadBalancerAPITest(test_base.LoadBalancerBaseTest): lb[const.VIP_ADDRESS]) self.assertEqual(lb_kwargs[const.VIP_SUBNET_ID], lb[const.VIP_SUBNET_ID]) + if additional_vips: + add_vip_subnets_on_lb = [av[const.SUBNET_ID] + for av in lb[const.ADDITIONAL_VIPS]] + for add_vip in additional_vips: + self.assertIn(add_vip[const.SUBNET_ID], add_vip_subnets_on_lb) # Attempt to clean up so that one full test run doesn't start 10+ # amps before the cleanup phase fires diff --git a/octavia_tempest_plugin/tests/multivip_scenario/__init__.py b/octavia_tempest_plugin/tests/multivip_scenario/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/octavia_tempest_plugin/tests/multivip_scenario/v2/__init__.py b/octavia_tempest_plugin/tests/multivip_scenario/v2/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/octavia_tempest_plugin/tests/multivip_scenario/v2/test_additional_vips.py b/octavia_tempest_plugin/tests/multivip_scenario/v2/test_additional_vips.py new file mode 100644 index 00000000..eb801c46 --- /dev/null +++ b/octavia_tempest_plugin/tests/multivip_scenario/v2/test_additional_vips.py @@ -0,0 +1,246 @@ +# Copyright 2018 GoDaddy +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import ipaddress +import testtools + +from oslo_log import log as logging +import six +from tempest import config +from tempest.lib.common.utils import data_utils +from tempest.lib import decorators + +from octavia_tempest_plugin.common import constants as const +from octavia_tempest_plugin.tests import test_base +from octavia_tempest_plugin.tests import waiters + +CONF = config.CONF +LOG = logging.getLogger(__name__) + + +@testtools.skipUnless( + CONF.validation.run_validation, + 'Traffic tests will not work without run_validation enabled.') +class AdditionalVIPScenarioTest(test_base.LoadBalancerBaseTestWithCompute): + + @classmethod + def resource_setup(cls): + """Setup resources needed by the tests.""" + super(AdditionalVIPScenarioTest, cls).resource_setup() + + def _make_lb(self, vip_data): + if not self.lb_admin_flavor_profile_client.is_version_supported( + self.api_version, '2.25'): + raise self.skipException( + 'Additional VIPs feature require ' + 'Octavia API version 2.25 or newer.') + + lb_name = data_utils.rand_name("lb_member_lb1_operations") + lb_kwargs = {const.PROVIDER: CONF.load_balancer.provider, + const.NAME: lb_name} + lb_kwargs.update(vip_data) + + lb = self.mem_lb_client.create_loadbalancer(**lb_kwargs) + self.lb_id = lb[const.ID] + self.addCleanup( + self.mem_lb_client.cleanup_loadbalancer, + self.lb_id) + + waiters.wait_for_status(self.mem_lb_client.show_loadbalancer, + self.lb_id, const.PROVISIONING_STATUS, + const.ACTIVE, + CONF.load_balancer.lb_build_interval, + CONF.load_balancer.lb_build_timeout) + + protocol = const.HTTP + lb_feature_enabled = CONF.loadbalancer_feature_enabled + if not lb_feature_enabled.l7_protocol_enabled: + protocol = lb_feature_enabled.l4_protocol + + listener_name = data_utils.rand_name("lb_member_listener1_operations") + listener_kwargs = { + const.NAME: listener_name, + const.PROTOCOL: protocol, + const.PROTOCOL_PORT: '80', + const.LOADBALANCER_ID: self.lb_id, + } + listener = self.mem_listener_client.create_listener(**listener_kwargs) + self.listener_id = listener[const.ID] + self.addCleanup( + self.mem_listener_client.cleanup_listener, + self.listener_id, + lb_client=self.mem_lb_client, lb_id=self.lb_id) + + waiters.wait_for_status(self.mem_lb_client.show_loadbalancer, + self.lb_id, const.PROVISIONING_STATUS, + const.ACTIVE, + CONF.load_balancer.build_interval, + CONF.load_balancer.build_timeout) + + pool_name = data_utils.rand_name("lb_member_pool1_operations") + pool_kwargs = { + const.NAME: pool_name, + const.PROTOCOL: protocol, + const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN, + const.LISTENER_ID: self.listener_id, + } + pool = self.mem_pool_client.create_pool(**pool_kwargs) + self.pool_id = pool[const.ID] + self.addCleanup( + self.mem_pool_client.cleanup_pool, + self.pool_id, + lb_client=self.mem_lb_client, lb_id=self.lb_id) + + waiters.wait_for_status(self.mem_lb_client.show_loadbalancer, + self.lb_id, const.PROVISIONING_STATUS, + const.ACTIVE, + CONF.load_balancer.build_interval, + CONF.load_balancer.build_timeout) + + # Set up Member 1 for Webserver 1 + member1_name = data_utils.rand_name("lb_member_member1-traffic") + member1_kwargs = { + const.POOL_ID: self.pool_id, + const.NAME: member1_name, + const.ADMIN_STATE_UP: True, + const.ADDRESS: self.webserver1_ip, + const.PROTOCOL_PORT: 80, + } + if self.lb_member_1_subnet: + member1_kwargs[const.SUBNET_ID] = self.lb_member_1_subnet[const.ID] + + member1 = self.mem_member_client.create_member( + **member1_kwargs) + self.addCleanup( + self.mem_member_client.cleanup_member, + member1[const.ID], pool_id=self.pool_id, + lb_client=self.mem_lb_client, lb_id=self.lb_id) + waiters.wait_for_status( + self.mem_lb_client.show_loadbalancer, self.lb_id, + const.PROVISIONING_STATUS, const.ACTIVE, + CONF.load_balancer.build_interval, + CONF.load_balancer.build_timeout) + + # Set up Member 2 for Webserver 2 + member2_name = data_utils.rand_name("lb_member_member2-traffic") + member2_kwargs = { + const.POOL_ID: self.pool_id, + const.NAME: member2_name, + const.ADMIN_STATE_UP: True, + const.ADDRESS: self.webserver2_ipv6, + const.PROTOCOL_PORT: 80, + } + if self.lb_member_2_ipv6_subnet: + member2_kwargs[const.SUBNET_ID] = ( + self.lb_member_2_ipv6_subnet[const.ID]) + + member2 = self.mem_member_client.create_member( + **member2_kwargs) + self.addCleanup( + self.mem_member_client.cleanup_member, + member2[const.ID], pool_id=self.pool_id, + lb_client=self.mem_lb_client, lb_id=self.lb_id) + + lb = waiters.wait_for_status(self.mem_lb_client.show_loadbalancer, + self.lb_id, const.PROVISIONING_STATUS, + const.ACTIVE, + CONF.load_balancer.build_interval, + CONF.load_balancer.build_timeout) + return lb + + @classmethod + def _get_vips(cls, lb): + real_vips = [lb[const.VIP_ADDRESS]] + for vip in lb[const.ADDITIONAL_VIPS]: + real_vips.append(vip.get(const.IP_ADDRESS)) + LOG.debug("LB {lb_id} has VIPs: {vips}".format( + lb_id=lb[const.ID], vips=real_vips)) + + # Make floating ips if necessary for ipv4 VIPs + possibly_floating_vips = [] + for vip in real_vips: + vip_obj = ipaddress.ip_address( + vip if isinstance(vip, six.text_type) else six.u(vip)) + if (CONF.validation.connect_method == 'floating' and + vip_obj.version == 4): + port_id = lb[const.VIP_PORT_ID] + result = cls.lb_mem_float_ip_client.create_floatingip( + floating_network_id=CONF.network.public_network_id, + port_id=port_id, + fixed_ip_address=vip) + floating_ip = result['floatingip'] + floating_address = floating_ip['floating_ip_address'] + LOG.info('Created Floating IP for VIP: {fip}->{vip}'.format( + fip=floating_address, vip=vip)) + cls.addClassResourceCleanup( + waiters.wait_for_not_found, + cls.lb_mem_float_ip_client.delete_floatingip, + cls.lb_mem_float_ip_client.show_floatingip, + floatingip_id=floating_ip['id']) + # Just mask the original VIP with the floating one for return + possibly_floating_vips.append(floating_address) + else: + possibly_floating_vips.append(vip) + return possibly_floating_vips + + @decorators.idempotent_id('8df06040-f986-4598-9f82-9e60d48b5ed0') + @testtools.skipIf(CONF.load_balancer.test_with_noop, + 'Traffic tests will not work in noop mode.') + @testtools.skipUnless(CONF.load_balancer.test_with_ipv6, + 'Mixed IPv4/IPv6 traffic test requires IPv6.') + def test_ipv4_with_additional_ipv6(self): + """Tests sending traffic through all of a loadbalancer's multiple VIPs + + * Set up a LB with multiple VIPs (IPv4 primary, IPv6 additional). + * Test traffic to ensure it is balanced properly. + """ + vip_data = { + const.VIP_SUBNET_ID: self.lb_member_vip_subnet[const.ID], + const.ADDITIONAL_VIPS: [ + {const.SUBNET_ID: self.lb_member_vip_2_subnet[const.ID]}, + {const.SUBNET_ID: self.lb_member_vip_2_ipv6_subnet[const.ID]} + ] + } + lb = self._make_lb(vip_data) + vips = self._get_vips(lb) + + LOG.debug("Running traffic tests on VIPs: %s", vips) + for vip in vips: + LOG.debug("Check traffic on VIP %s", vip) + self.check_members_balanced(vip) + + @decorators.idempotent_id('b4b5e85b-a62c-4940-9175-ed2c747b529c') + @testtools.skipIf(CONF.load_balancer.test_with_noop, + 'Traffic tests will not work in noop mode.') + @testtools.skipUnless(CONF.load_balancer.test_with_ipv6, + 'Mixed IPv4/IPv6 traffic test requires IPv6.') + def test_ipv6_with_additional_ipv4(self): + """Tests sending traffic through all of a loadbalancer's multiple VIPs + + * Set up a LB with multiple VIPs (IPv6 primary, IPv4 additional). + * Test traffic to ensure it is balanced properly. + """ + vip_data = { + const.VIP_SUBNET_ID: self.lb_member_vip_ipv6_subnet[const.ID], + const.ADDITIONAL_VIPS: [ + {const.SUBNET_ID: self.lb_member_vip_2_subnet[const.ID]}, + {const.SUBNET_ID: self.lb_member_vip_2_ipv6_subnet[const.ID]}, + ] + } + lb = self._make_lb(vip_data) + vips = self._get_vips(lb) + + for vip in vips: + LOG.debug("Check traffic on VIP %s", vip) + self.check_members_balanced(vip) diff --git a/octavia_tempest_plugin/tests/test_base.py b/octavia_tempest_plugin/tests/test_base.py index b0bd643b..e67faa18 100644 --- a/octavia_tempest_plugin/tests/test_base.py +++ b/octavia_tempest_plugin/tests/test_base.py @@ -238,6 +238,7 @@ class LoadBalancerBaseTest(validators.ValidatorsMixin, if CONF.load_balancer.test_with_noop: cls.lb_member_vip_net = {'id': uuidutils.generate_uuid()} cls.lb_member_vip_subnet = {'id': uuidutils.generate_uuid()} + cls.lb_member_vip_2_subnet = {'id': uuidutils.generate_uuid()} cls.lb_member_1_net = {'id': uuidutils.generate_uuid()} cls.lb_member_1_subnet = {'id': uuidutils.generate_uuid()} cls.lb_member_2_net = {'id': uuidutils.generate_uuid()} @@ -246,6 +247,8 @@ class LoadBalancerBaseTest(validators.ValidatorsMixin, cls.lb_member_vip_ipv6_net = {'id': uuidutils.generate_uuid()} cls.lb_member_vip_ipv6_subnet = {'id': uuidutils.generate_uuid()} + cls.lb_member_vip_2_ipv6_subnet = {'id': + uuidutils.generate_uuid()} cls.lb_member_1_ipv6_subnet = {'id': uuidutils.generate_uuid()} cls.lb_member_2_ipv6_subnet = {'id': uuidutils.generate_uuid()} cls.lb_member_vip_ipv6_subnet_stateful = True @@ -262,6 +265,7 @@ class LoadBalancerBaseTest(validators.ValidatorsMixin, cls.lb_member_vip_net = override_network cls.lb_member_vip_subnet = override_subnet + cls.lb_member_vip_2_subnet = override_subnet cls.lb_member_1_net = override_network cls.lb_member_1_subnet = override_subnet cls.lb_member_2_net = override_network @@ -272,6 +276,7 @@ class LoadBalancerBaseTest(validators.ValidatorsMixin, override_ipv6_subnet = show_subnet( conf_lb.test_IPv6_subnet_override) cls.lb_member_vip_ipv6_subnet = override_ipv6_subnet + cls.lb_member_vip_2_ipv6_subnet = override_ipv6_subnet cls.lb_member_1_ipv6_subnet = override_ipv6_subnet cls.lb_member_2_ipv6_subnet = override_ipv6_subnet cls.lb_member_vip_ipv6_subnet_stateful = False @@ -280,6 +285,7 @@ class LoadBalancerBaseTest(validators.ValidatorsMixin, cls.lb_member_vip_ipv6_subnet_stateful = True else: cls.lb_member_vip_ipv6_subnet = None + cls.lb_member_vip_2_ipv6_subnet = None cls.lb_member_1_ipv6_subnet = None cls.lb_member_2_ipv6_subnet = None else: @@ -287,29 +293,34 @@ class LoadBalancerBaseTest(validators.ValidatorsMixin, LOG.debug('Octavia Setup: lb_member_vip_net = {}'.format( cls.lb_member_vip_net[const.ID])) + LOG.debug('Octavia Setup: lb_member_1_net = {}'.format( + cls.lb_member_1_net[const.ID])) + LOG.debug('Octavia Setup: lb_member_2_net = {}'.format( + cls.lb_member_2_net[const.ID])) if cls.lb_member_vip_subnet: LOG.debug('Octavia Setup: lb_member_vip_subnet = {}'.format( cls.lb_member_vip_subnet[const.ID])) - LOG.debug('Octavia Setup: lb_member_1_net = {}'.format( - cls.lb_member_1_net[const.ID])) + if cls.lb_member_vip_2_subnet: + LOG.debug('Octavia Setup: lb_member_vip_2_subnet = {}'.format( + cls.lb_member_vip_2_subnet[const.ID])) if cls.lb_member_1_subnet: LOG.debug('Octavia Setup: lb_member_1_subnet = {}'.format( cls.lb_member_1_subnet[const.ID])) - LOG.debug('Octavia Setup: lb_member_2_net = {}'.format( - cls.lb_member_2_net[const.ID])) if cls.lb_member_2_subnet: LOG.debug('Octavia Setup: lb_member_2_subnet = {}'.format( cls.lb_member_2_subnet[const.ID])) - if CONF.load_balancer.test_with_ipv6: - if cls.lb_member_vip_ipv6_subnet: - LOG.debug('Octavia Setup: lb_member_vip_ipv6_subnet = ' - '{}'.format(cls.lb_member_vip_ipv6_subnet[const.ID])) - if cls.lb_member_1_ipv6_subnet: - LOG.debug('Octavia Setup: lb_member_1_ipv6_subnet = {}'.format( - cls.lb_member_1_ipv6_subnet[const.ID])) - if cls.lb_member_2_ipv6_subnet: - LOG.debug('Octavia Setup: lb_member_2_ipv6_subnet = {}'.format( - cls.lb_member_2_ipv6_subnet[const.ID])) + if cls.lb_member_vip_ipv6_subnet: + LOG.debug('Octavia Setup: lb_member_vip_ipv6_subnet = ' + '{}'.format(cls.lb_member_vip_ipv6_subnet[const.ID])) + if cls.lb_member_vip_2_ipv6_subnet: + LOG.debug('Octavia Setup: lb_member_vip_2_ipv6_subnet = ' + '{}'.format(cls.lb_member_vip_2_ipv6_subnet[const.ID])) + if cls.lb_member_1_ipv6_subnet: + LOG.debug('Octavia Setup: lb_member_1_ipv6_subnet = {}'.format( + cls.lb_member_1_ipv6_subnet[const.ID])) + if cls.lb_member_2_ipv6_subnet: + LOG.debug('Octavia Setup: lb_member_2_ipv6_subnet = {}'.format( + cls.lb_member_2_ipv6_subnet[const.ID])) @classmethod # Neutron can be slow to clean up ports from the subnets/networks. @@ -355,6 +366,8 @@ class LoadBalancerBaseTest(validators.ValidatorsMixin, cls.lb_member_vip_net cls.lb_member_vip_subnet cls.lb_member_vip_ipv6_subnet (optional) + cls.lb_member_vip_2_subnet + cls.lb_member_vip_2_ipv6_subnet (optional) cls.lb_member_1_net cls.lb_member_1_subnet cls.lb_member_1_ipv6_subnet (optional) @@ -393,6 +406,22 @@ class LoadBalancerBaseTest(validators.ValidatorsMixin, cls.lb_mem_subnet_client.show_subnet, cls.lb_member_vip_subnet['id']) + # Create tenant VIP2 subnet + subnet_kwargs = { + 'name': data_utils.rand_name("lb_member_vip_2_subnet"), + 'network_id': cls.lb_member_vip_net['id'], + 'cidr': CONF.load_balancer.vip_2_subnet_cidr, + 'ip_version': 4} + result = cls.lb_mem_subnet_client.create_subnet(**subnet_kwargs) + cls.lb_member_vip_2_subnet = result['subnet'] + LOG.info('lb_member_vip_2_subnet: {}'.format( + cls.lb_member_vip_2_subnet)) + cls.addClassResourceCleanup( + waiters.wait_for_not_found, + cls._logging_delete_subnet, + cls.lb_mem_subnet_client.show_subnet, + cls.lb_member_vip_2_subnet['id']) + # Create tenant VIP IPv6 subnet if CONF.load_balancer.test_with_ipv6: cls.lb_member_vip_ipv6_subnet_stateful = False @@ -431,6 +460,34 @@ class LoadBalancerBaseTest(validators.ValidatorsMixin, LOG.info('lb_member_vip_ipv6_subnet: {}'.format( cls.lb_member_vip_ipv6_subnet)) + subnet_kwargs = { + 'name': data_utils.rand_name("lb_member_vip_2_ipv6_subnet"), + 'network_id': cls.lb_member_vip_net['id'], + 'ip_version': 6} + + if subnetpool_name: + subnetpool = cls.os_admin_subnetpools_client.list_subnetpools( + name=subnetpool_name)['subnetpools'] + if len(subnetpool) == 1: + subnetpool = subnetpool[0] + subnet_kwargs['subnetpool_id'] = subnetpool['id'] + + if 'subnetpool_id' not in subnet_kwargs: + subnet_kwargs['cidr'] = ( + CONF.load_balancer.vip_2_ipv6_subnet_cidr) + + result = cls.lb_mem_subnet_client.create_subnet( + **subnet_kwargs) + cls.lb_member_vip_2_ipv6_subnet = result['subnet'] + cls.addClassResourceCleanup( + waiters.wait_for_not_found, + cls._logging_delete_subnet, + cls.lb_mem_subnet_client.show_subnet, + cls.lb_member_vip_2_ipv6_subnet['id']) + + LOG.info('lb_member_vip_2_ipv6_subnet: {}'.format( + cls.lb_member_vip_2_ipv6_subnet)) + # Create tenant member 1 network network_kwargs = { 'name': data_utils.rand_name("lb_member_1_network")} @@ -816,6 +873,17 @@ class LoadBalancerBaseTestWithCompute(LoadBalancerBaseTest): cls.lb_member_router['id'], subnet_id=cls.lb_member_vip_subnet['id']) + # Add VIP2 subnet to router + cls.lb_mem_routers_client.add_router_interface( + cls.lb_member_router['id'], + subnet_id=cls.lb_member_vip_2_subnet['id']) + cls.addClassResourceCleanup( + waiters.wait_for_not_found, + cls.lb_mem_routers_client.remove_router_interface, + cls.lb_mem_routers_client.remove_router_interface, + cls.lb_member_router['id'], + subnet_id=cls.lb_member_vip_2_subnet['id']) + if (CONF.load_balancer.test_with_ipv6 and CONF.load_balancer.default_router and cls.lb_member_vip_ipv6_subnet_use_subnetpool): @@ -840,6 +908,17 @@ class LoadBalancerBaseTestWithCompute(LoadBalancerBaseTest): router['id'], subnet_id=cls.lb_member_vip_ipv6_subnet['id']) + # Add IPv6 VIP2 subnet to router1 + cls.os_admin_routers_client.add_router_interface( + router['id'], + subnet_id=cls.lb_member_vip_2_ipv6_subnet['id']) + cls.addClassResourceCleanup( + waiters.wait_for_not_found, + cls.os_admin_routers_client.remove_router_interface, + cls.os_admin_routers_client.remove_router_interface, + router['id'], + subnet_id=cls.lb_member_vip_2_ipv6_subnet['id']) + # Add member subnet 1 to router cls.lb_mem_routers_client.add_router_interface( cls.lb_member_router['id'], diff --git a/zuul.d/jobs.yaml b/zuul.d/jobs.yaml index eb5a593d..f60fd382 100644 --- a/zuul.d/jobs.yaml +++ b/zuul.d/jobs.yaml @@ -953,6 +953,18 @@ nodeset: octavia-single-node-ubuntu-bionic override-checkout: stable/train +- job: + name: octavia-v2-multivip-dsvm-scenario + parent: octavia-v2-dsvm-scenario + vars: + tempest_test_regex: ^octavia_tempest_plugin.tests.multivip_scenario.v2 + +- job: + name: octavia-v2-act-stdby-multivip-dsvm-scenario + parent: octavia-v2-act-stdby-dsvm-scenario + vars: + tempest_test_regex: ^octavia_tempest_plugin.tests.multivip_scenario.v2 + ######### Third party jobs ########## - job: diff --git a/zuul.d/projects.yaml b/zuul.d/projects.yaml index 02d3ac71..3a86f5fe 100644 --- a/zuul.d/projects.yaml +++ b/zuul.d/projects.yaml @@ -24,6 +24,8 @@ - octavia-v2-dsvm-tls-barbican-stable-victoria - octavia-v2-dsvm-tls-barbican-stable-ussuri - octavia-v2-dsvm-tls-barbican-stable-train + - octavia-v2-multivip-dsvm-scenario + - octavia-v2-act-stdby-multivip-dsvm-scenario - octavia-v2-dsvm-scenario-ipv6-only: voting: false - octavia-v2-dsvm-scenario-centos-8: