From 70f1b9c3717359f4f2cbbe410c8e10c710983112 Mon Sep 17 00:00:00 2001 From: Carlos Goncalves Date: Thu, 14 Feb 2019 23:57:44 +0100 Subject: [PATCH] Add iptables-based active/standby scenario test This patch adds a tempest scenario test for active/standby topology. This scenario takes a similar approach to scenario proposed in Change-Id Ibcd5552a67cea650edc72bfaa986357267ca2407 with the difference that it does not rely on amphora stats API. Instead, it uses iptables to log VIP traffic and make assertions based on logging. Co-Authored-By: Michael Johnson Change-Id: I24a6fc3387166ec6cdbd57a5ca9f63743748ec68 --- octavia_tempest_plugin/common/constants.py | 1 + octavia_tempest_plugin/config.py | 9 +- .../tests/act_stdby_scenario/__init__.py | 0 .../tests/act_stdby_scenario/v2/__init__.py | 0 .../v2/test_active_standby_iptables.py | 302 ++++++++++++++++++ octavia_tempest_plugin/tests/test_base.py | 1 + playbooks/act_stby_iptables/pre.yaml | 10 + zuul.d/jobs.yaml | 58 ++++ zuul.d/projects.yaml | 36 ++- 9 files changed, 403 insertions(+), 14 deletions(-) create mode 100644 octavia_tempest_plugin/tests/act_stdby_scenario/__init__.py create mode 100644 octavia_tempest_plugin/tests/act_stdby_scenario/v2/__init__.py create mode 100644 octavia_tempest_plugin/tests/act_stdby_scenario/v2/test_active_standby_iptables.py create mode 100644 playbooks/act_stby_iptables/pre.yaml diff --git a/octavia_tempest_plugin/common/constants.py b/octavia_tempest_plugin/common/constants.py index 0122424e..54b23d11 100644 --- a/octavia_tempest_plugin/common/constants.py +++ b/octavia_tempest_plugin/common/constants.py @@ -84,6 +84,7 @@ FLAVOR_PROFILE_ID = 'flavor_profile_id' # Other constants ACTIVE = 'ACTIVE' +PENDING_UPDATE = 'PENDING_UPDATE' ADMIN_STATE_UP_TRUE = 'true' ASC = 'asc' DELETED = 'DELETED' diff --git a/octavia_tempest_plugin/config.py b/octavia_tempest_plugin/config.py index 91aa2c48..c925df39 100644 --- a/octavia_tempest_plugin/config.py +++ b/octavia_tempest_plugin/config.py @@ -164,8 +164,15 @@ OctaviaGroup = [ cfg.StrOpt('member_2_ipv6_subnet_cidr', default='fd77:1457:4cf0:26a8::/64', help='CIDR format subnet to use for the member 1 ipv6 subnet.'), + # Amphora specific options + cfg.StrOpt('amphora_ssh_user', + default='ubuntu', + help='The amphora SSH user.'), + cfg.StrOpt('amphora_ssh_key', + default='/tmp/octavia_ssh_key', + help='The amphora SSH key file.'), # Environment specific options - # These are used to accomidate clouds with specific limitations + # These are used to accomodate clouds with specific limitations cfg.IntOpt('random_server_name_length', default=0, help='If non-zero, generate a random name of the length ' diff --git a/octavia_tempest_plugin/tests/act_stdby_scenario/__init__.py b/octavia_tempest_plugin/tests/act_stdby_scenario/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/octavia_tempest_plugin/tests/act_stdby_scenario/v2/__init__.py b/octavia_tempest_plugin/tests/act_stdby_scenario/v2/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/octavia_tempest_plugin/tests/act_stdby_scenario/v2/test_active_standby_iptables.py b/octavia_tempest_plugin/tests/act_stdby_scenario/v2/test_active_standby_iptables.py new file mode 100644 index 00000000..0ae9ee61 --- /dev/null +++ b/octavia_tempest_plugin/tests/act_stdby_scenario/v2/test_active_standby_iptables.py @@ -0,0 +1,302 @@ +# Copyright 2019 Rackspace US Inc. All rights reserved. +# Copyright 2019 Red Hat Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import testtools + +from oslo_log import log as logging +from tempest import config +from tempest.lib.common.utils import data_utils +from tempest.lib.common.utils.linux import remote_client +from tempest.lib import decorators +from tempest.lib import exceptions + +from octavia_tempest_plugin.common import constants as const +from octavia_tempest_plugin.tests import test_base +from octavia_tempest_plugin.tests import waiters + +CONF = config.CONF +LOG = logging.getLogger(__name__) + + +@testtools.skipUnless( + CONF.validation.run_validation, + 'Active-Standby tests will not work without run_validation enabled.') +class ActiveStandbyIptablesScenarioTest( + test_base.LoadBalancerBaseTestWithCompute): + + @classmethod + def skip_checks(cls): + super(ActiveStandbyIptablesScenarioTest, cls).skip_checks() + + if CONF.load_balancer.provider not in ['amphora', 'octavia']: + raise cls.skipException("Amphora tests require provider 'amphora' " + "or 'octavia' (alias to 'amphora', " + "deprecated) set.") + + @classmethod + def resource_setup(cls): + """Setup resources needed by the tests.""" + super(ActiveStandbyIptablesScenarioTest, cls).resource_setup() + + lb_name = data_utils.rand_name("lb_member_lb1_actstdby") + lb_kwargs = {const.PROVIDER: CONF.load_balancer.provider, + const.NAME: lb_name} + + # TODO(rm_work): Make this work with ipv6 and split this test for both + ip_version = 4 + cls._setup_lb_network_kwargs(lb_kwargs, ip_version) + + lb = cls.mem_lb_client.create_loadbalancer(**lb_kwargs) + cls.lb_id = lb[const.ID] + cls.addClassResourceCleanup( + cls.mem_lb_client.cleanup_loadbalancer, + cls.lb_id) + + if CONF.validation.connect_method == 'floating': + port_id = lb[const.VIP_PORT_ID] + result = cls.lb_mem_float_ip_client.create_floatingip( + floating_network_id=CONF.network.public_network_id, + port_id=port_id) + floating_ip = result['floatingip'] + LOG.info('lb1_floating_ip: {}'.format(floating_ip)) + cls.addClassResourceCleanup( + waiters.wait_for_not_found, + cls.lb_mem_float_ip_client.delete_floatingip, + cls.lb_mem_float_ip_client.show_floatingip, + floatingip_id=floating_ip['id']) + cls.lb_vip_address = floating_ip['floating_ip_address'] + else: + cls.lb_vip_address = lb[const.VIP_ADDRESS] + + waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer, + cls.lb_id, const.PROVISIONING_STATUS, + const.ACTIVE, + CONF.load_balancer.lb_build_interval, + CONF.load_balancer.lb_build_timeout) + + listener_name = data_utils.rand_name("lb_member_listener1_actstdby") + listener_kwargs = { + const.NAME: listener_name, + const.PROTOCOL: const.HTTP, + const.PROTOCOL_PORT: '80', + const.LOADBALANCER_ID: cls.lb_id, + } + listener = cls.mem_listener_client.create_listener(**listener_kwargs) + cls.listener_id = listener[const.ID] + cls.addClassResourceCleanup( + cls.mem_listener_client.cleanup_listener, + cls.listener_id, + lb_client=cls.mem_lb_client, lb_id=cls.lb_id) + + waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer, + cls.lb_id, const.PROVISIONING_STATUS, + const.ACTIVE, + CONF.load_balancer.build_interval, + CONF.load_balancer.build_timeout) + + pool_name = data_utils.rand_name("lb_member_pool1_actstdby") + pool_kwargs = { + const.NAME: pool_name, + const.PROTOCOL: const.HTTP, + const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN, + const.LISTENER_ID: cls.listener_id, + } + pool = cls.mem_pool_client.create_pool(**pool_kwargs) + cls.pool_id = pool[const.ID] + cls.addClassResourceCleanup( + cls.mem_pool_client.cleanup_pool, + cls.pool_id, + lb_client=cls.mem_lb_client, lb_id=cls.lb_id) + + waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer, + cls.lb_id, const.PROVISIONING_STATUS, + const.ACTIVE, + CONF.load_balancer.build_interval, + CONF.load_balancer.build_timeout) + + # Set up Member 1 for Webserver 1 + member1_name = data_utils.rand_name("lb_member_member1_actstdby") + member1_kwargs = { + const.POOL_ID: cls.pool_id, + const.NAME: member1_name, + const.ADMIN_STATE_UP: True, + const.ADDRESS: cls.webserver1_ip, + const.PROTOCOL_PORT: 80, + } + if cls.lb_member_1_subnet: + member1_kwargs[const.SUBNET_ID] = cls.lb_member_1_subnet[const.ID] + + member1 = cls.mem_member_client.create_member( + **member1_kwargs) + cls.addClassResourceCleanup( + cls.mem_member_client.cleanup_member, + member1[const.ID], pool_id=cls.pool_id, + lb_client=cls.mem_lb_client, lb_id=cls.lb_id) + waiters.wait_for_status( + cls.mem_lb_client.show_loadbalancer, cls.lb_id, + const.PROVISIONING_STATUS, const.ACTIVE, + CONF.load_balancer.check_interval, + CONF.load_balancer.check_timeout) + + # Set up Member 2 for Webserver 2 + member2_name = data_utils.rand_name("lb_member_member2_actstdby") + member2_kwargs = { + const.POOL_ID: cls.pool_id, + const.NAME: member2_name, + const.ADMIN_STATE_UP: True, + const.ADDRESS: cls.webserver2_ip, + const.PROTOCOL_PORT: 80, + } + if cls.lb_member_2_subnet: + member2_kwargs[const.SUBNET_ID] = cls.lb_member_2_subnet[const.ID] + + member2 = cls.mem_member_client.create_member( + **member2_kwargs) + cls.addClassResourceCleanup( + cls.mem_member_client.cleanup_member, + member2[const.ID], pool_id=cls.pool_id, + lb_client=cls.mem_lb_client, lb_id=cls.lb_id) + waiters.wait_for_status( + cls.mem_lb_client.show_loadbalancer, cls.lb_id, + const.PROVISIONING_STATUS, const.ACTIVE, + CONF.load_balancer.check_interval, + CONF.load_balancer.check_timeout) + + @classmethod + def _log_vip_traffic(cls, amp, log_prefix): + ssh_key = cls._get_amphora_ssh_key() + linux_client = remote_client.RemoteClient( + amp['lb_network_ip'], CONF.load_balancer.amphora_ssh_user, + pkey=ssh_key) + linux_client.validate_authentication() + + # Allow logging from non-init namespaces + # https://lore.kernel.org/patchwork/patch/673714/ + linux_client.exec_command('echo 1 | sudo tee ' + '/proc/sys/net/netfilter/nf_log_all_netns') + + linux_client.exec_command('sudo ip netns exec amphora-haproxy ' + 'iptables -I INPUT 1 -d {0} -j LOG ' + '--log-prefix "{1}"' + .format(amp['ha_ip'], log_prefix)) + + @classmethod + def _has_vip_traffic(cls, ip_address, log_prefix): + ssh_key = cls._get_amphora_ssh_key() + linux_client = remote_client.RemoteClient( + ip_address, CONF.load_balancer.amphora_ssh_user, pkey=ssh_key) + linux_client.validate_authentication() + + try: + linux_client.exec_command('sudo journalctl -t kernel | grep {0}' + .format(log_prefix)) + return True + except exceptions.SSHExecCommandFailed: + return False + + @classmethod + def _get_active_standby_amps(cls, amps, log_prefix): + active = None + stby = None + for amp in amps: + if cls._has_vip_traffic(amp['lb_network_ip'], log_prefix): + if active: + LOG.exception('Failed to determine single active amphora.') + raise Exception('More than one amphora is forwarding VIP ' + 'traffic.') + active = amp + else: + stby = amp + + return active, stby + + @classmethod + def _get_amphora_ssh_key(cls): + key_file = CONF.load_balancer.amphora_ssh_key + if not key_file: + raise Exception("SSH key file not provided.") + if not os.path.isfile(key_file): + raise Exception("Could not find amphora ssh key file {1}." + .format(key_file)) + with open(key_file, 'r') as f: + return f.read() + + @decorators.idempotent_id('deab2b3f-62c7-4a05-9e92-aa45a04773fd') + def test_active_standby_vrrp_failover(self): + """Tests active/standby VRRP failover + + * Test the load balancer to make sure it is functioning + * Identifies the Master and Backup amphora + * Deletes the Master amphora + * Sends traffic through the load balancer + * Validates that the Backup has assumed the Master role + """ + + # Send some traffic + self.check_members_balanced(self.lb_vip_address) + + # Check there are two amphorae associated to the load balancer + amps = self.os_admin.amphora_client.list_amphorae( + query_params='{loadbalancer_id}={lb_id}'.format( + loadbalancer_id=const.LOADBALANCER_ID, lb_id=self.lb_id)) + self.assertEqual(2, len(amps)) + + # Log VIP traffic + for amp in amps: + self._log_vip_traffic(amp, 'ACTSTBY-1') + + # Send some traffic + self.check_members_balanced(self.lb_vip_address) + + # Which amphora is the active? + active, _ = self._get_active_standby_amps(amps, 'ACTSTBY-1') + + # Delete active amphora + self.os_admin_servers_client.delete_server(active[const.COMPUTE_ID]) + + # Send some traffic + self.check_members_balanced(self.lb_vip_address) + + # Wait for the amphora failover to start + waiters.wait_for_status( + self.mem_lb_client.show_loadbalancer, + self.lb_id, const.PROVISIONING_STATUS, + const.PENDING_UPDATE, CONF.load_balancer.check_interval, + CONF.load_balancer.check_timeout) + + # Wait for the load balancer to return to ACTIVE so the + # cleanup steps will pass + waiters.wait_for_status( + self.mem_lb_client.show_loadbalancer, + self.lb_id, const.PROVISIONING_STATUS, + const.ACTIVE, CONF.load_balancer.lb_build_interval, + CONF.load_balancer.lb_build_timeout) + + # Check again there are two amphorae associated to the load balancer + amps = self.os_admin.amphora_client.list_amphorae( + query_params='{loadbalancer_id}={lb_id}'.format( + loadbalancer_id=const.LOADBALANCER_ID, lb_id=self.lb_id)) + self.assertEqual(2, len(amps)) + + # Log VIP traffic + for amp in amps: + self._log_vip_traffic(amp, 'ACTSTBY-2') + + # Send some traffic + self.check_members_balanced(self.lb_vip_address) + + # Ensure only one amphora is handling VIP traffic + self._get_active_standby_amps(amps, 'ACTSTBY-2') diff --git a/octavia_tempest_plugin/tests/test_base.py b/octavia_tempest_plugin/tests/test_base.py index f8acc2b6..6a9c12b1 100644 --- a/octavia_tempest_plugin/tests/test_base.py +++ b/octavia_tempest_plugin/tests/test_base.py @@ -127,6 +127,7 @@ class LoadBalancerBaseTest(test.BaseTestCase): cls.lb_admin_flavor_client = cls.os_roles_lb_admin.flavor_client cls.mem_flavor_client = cls.os_roles_lb_member.flavor_client cls.mem_provider_client = cls.os_roles_lb_member.provider_client + cls.os_admin_servers_client = cls.os_admin.servers_client @classmethod def resource_setup(cls): diff --git a/playbooks/act_stby_iptables/pre.yaml b/playbooks/act_stby_iptables/pre.yaml new file mode 100644 index 00000000..694bb3c3 --- /dev/null +++ b/playbooks/act_stby_iptables/pre.yaml @@ -0,0 +1,10 @@ +- hosts: all + name: Octavia DSVM jobs pre-run playbook + tasks: + - name: Generate Octavia RSA key + shell: ssh-keygen -t rsa -f /tmp/octavia_ssh_key -q -N "" + - name: Allow read permissions to other users + file: + path: /tmp/octavia_ssh_key + state: file + mode: 0644 diff --git a/zuul.d/jobs.yaml b/zuul.d/jobs.yaml index d53fa42e..9ec72ee0 100644 --- a/zuul.d/jobs.yaml +++ b/zuul.d/jobs.yaml @@ -389,3 +389,61 @@ - job: name: octavia-v2-dsvm-scenario-centos-7 parent: octavia-v2-dsvm-py2-scenario-centos-7 + +- job: + name: octavia-v2-act-stdby-iptables-dsvm-scenario + parent: octavia-dsvm-live-base + pre-run: playbooks/act_stby_iptables/pre.yaml + vars: + devstack_localrc: + OCTAVIA_USE_PREGENERATED_SSH_KEY: True + OCTAVIA_PREGENERATED_SSH_KEY_PATH: /tmp/octavia_ssh_key + devstack_local_conf: + post-config: + $OCTAVIA_CONF: + api_settings: + api_v1_enabled: False + controller_worker: + loadbalancer_topology: ACTIVE_STANDBY + task_flow: + engine: parallel + devstack_local_conf: + test-config: + "$TEMPEST_CONFIG": + load_balancer: + check_timeout: 240 + tempest_test_regex: ^octavia_tempest_plugin.tests.act_stdby_scenario.v2.test_active_standby_iptables + tox_envlist: all + +- job: + name: octavia-v2-act-stdby-iptables-dsvm-py2-scenario + parent: octavia-v2-act-stdby-iptables-dsvm-scenario + vars: + devstack_localrc: + USE_PYTHON3: False + +- job: + name: octavia-v2-act-stdby-iptables-dsvm-py2-scenario-centos-7 + parent: octavia-v2-act-stdby-iptables-dsvm-py2-scenario + nodeset: devstack-single-node-centos-7 + vars: + devstack_localrc: + USE_PYTHON3: False + OCTAVIA_AMP_BASE_OS: centos + OCTAVIA_AMP_DISTRIBUTION_RELEASE_ID: 7 + OCTAVIA_AMP_IMAGE_SIZE: 3 + devstack_local_conf: + test-config: + "$TEMPEST_CONFIG": + load_balancer: + amphora_ssh_user: centos + +- job: + name: octavia-v2-act-stdby-iptables-dsvm-py2-scenario-stable-rocky + parent: octavia-v2-act-stdby-iptables-dsvm-py2-scenario + override-checkout: stable/rocky + +- job: + name: octavia-v2-act-stdby-iptables-dsvm-py2-scenario-stable-queens + parent: octavia-v2-act-stdby-iptables-dsvm-py2-scenario + override-checkout: stable/queens diff --git a/zuul.d/projects.yaml b/zuul.d/projects.yaml index 07ec7ff0..07e81b54 100644 --- a/zuul.d/projects.yaml +++ b/zuul.d/projects.yaml @@ -7,24 +7,34 @@ - tempest-plugin-jobs check: jobs: - - octavia-v2-dsvm-noop-api - - octavia-v2-dsvm-noop-py2-api - - octavia-v2-dsvm-noop-py2-api-stable-rocky - - octavia-v2-dsvm-noop-py2-api-stable-queens - - octavia-v2-dsvm-scenario - - octavia-v2-dsvm-py2-scenario - - octavia-v2-dsvm-py2-scenario-stable-rocky - - octavia-v2-dsvm-py2-scenario-stable-queens - - octavia-v2-dsvm-scenario-centos-7: + # - octavia-v2-dsvm-noop-api + # - octavia-v2-dsvm-noop-py2-api + # - octavia-v2-dsvm-noop-py2-api-stable-rocky + # - octavia-v2-dsvm-noop-py2-api-stable-queens + # - octavia-v2-dsvm-scenario + # - octavia-v2-dsvm-py2-scenario + # - octavia-v2-dsvm-py2-scenario-stable-rocky + # - octavia-v2-dsvm-py2-scenario-stable-queens + # - octavia-v2-dsvm-scenario-centos-7: + # voting: false + # - octavia-v2-dsvm-scenario-ubuntu-bionic: + # voting: false + # - octavia-v2-dsvm-scenario-two-node: + # voting: false + # - octavia-v2-dsvm-py2-scenario-two-node: + # voting: false + - octavia-v2-act-stdby-iptables-dsvm-scenario: voting: false - - octavia-v2-dsvm-scenario-ubuntu-bionic: + - octavia-v2-act-stdby-iptables-dsvm-py2-scenario: voting: false - - octavia-v2-dsvm-scenario-two-node: + - octavia-v2-act-stdby-iptables-dsvm-py2-scenario-centos-7: voting: false - - octavia-v2-dsvm-py2-scenario-two-node: + - octavia-v2-act-stdby-iptables-dsvm-py2-scenario-stable-rocky: voting: false - - octavia-v2-dsvm-tls-barbican: + - octavia-v2-act-stdby-iptables-dsvm-py2-scenario-stable-queens: voting: false + # - octavia-v2-dsvm-tls-barbican: + # voting: false gate: queue: octavia jobs: