Merge "Add test_vrrp"

This commit is contained in:
Zuul 2024-03-06 19:49:04 +00:00 committed by Gerrit Code Review
commit dc5aa0a129
2 changed files with 312 additions and 2 deletions

View File

@ -1,3 +1,44 @@
if [[ "$1" == "stack" ]] && [[ "$2" == "install" ]]; then
customize_advanced_image(){
# Here we customize an advanced image to make it suitable for the plugin tests.
# Note: the advanced image was downloaded and set by neutron_tempest_plugin.
# However we can't rely on neutron_tempest_plugin capabilities for customizing
# the image since it expects a debian/ubuntu based image which does not fit well
# to this plugin tests.
# This code modifies the downloaded image by adding packages required by this
# plugin, uploads the image to glance and if all passed successfully it updates
# tempest.conf with the new image reference instead of the original one.
sudo dnf install guestfs-tools -y
for image_url in ${IMAGE_URLS//,/ }; do
if [[ $image_url =~ $ADVANCED_IMAGE_NAME ]]; then
image_file=$(basename $image_url)
break
fi
done
if [ -n "$image_file" ] && [ -s "$TOP_DIR/files/$image_file" ]; then
cp -f $TOP_DIR/files/$image_file /tmp
image_file_custom=/tmp/$image_file
timeout 150 virt-customize -a $image_file_custom --install nmap,python3,keepalived,iperf3 --selinux-relabel
if [ "$?" == "0" ]; then
source $TOP_DIR/openrc admin
old_image_id=$(openstack image show $ADVANCED_IMAGE_NAME -c id -f value)
new_image_id=$(openstack image create --disk-format qcow2 --container-format bare --public $ADVANCED_IMAGE_NAME --file $image_file_custom -c id -f value)
if [ -n "$new_image_id" ]; then
iniset $TEMPEST_CONFIG neutron_plugin_options advanced_image_ref $new_image_id
openstack image delete $old_image_id
fi
fi
fi
}
if [[ "$1" == "stack" ]]; then
case "$2" in
install)
if [[ "$INSTALL_TEMPEST" == "True" ]]; then
echo "tempest ALL=(ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/99_tempest
fi
;;
test-config)
customize_advanced_image
;;
esac
fi

View File

@ -0,0 +1,269 @@
# Copyright 2024 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib import constants
from oslo_log import log
from tempest.common import compute
from tempest.common import utils
from tempest.common import waiters
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from neutron_tempest_plugin.common import ssh
from neutron_tempest_plugin.common import utils as common_utils
from neutron_tempest_plugin import config
from neutron_tempest_plugin import exceptions
from neutron_tempest_plugin.scenario import base
CONF = config.CONF
LOG = log.getLogger(__name__)
keepalived_config_template = """vrrp_instance VIP_1 {
state MASTER
interface %s
virtual_router_id 51
priority 150
advert_int 1
authentication {
auth_type PASS
auth_pass secretpass
}
virtual_ipaddress {
%s
}
}
"""
def get_keepalived_config(interface, vip_ip):
return keepalived_config_template % (interface, vip_ip)
class VrrpTest(base.BaseTempestTestCase):
credentials = ['primary', 'admin']
@classmethod
def skip_checks(cls):
super(VrrpTest, cls).skip_checks()
if CONF.neutron_plugin_options.default_image_is_advanced:
cls.flavor_ref = CONF.compute.flavor_ref
cls.image_ref = CONF.compute.image_ref
cls.username = CONF.validation.image_ssh_user
else:
cls.flavor_ref = \
CONF.neutron_plugin_options.advanced_image_flavor_ref
cls.image_ref = CONF.neutron_plugin_options.advanced_image_ref
cls.username = CONF.neutron_plugin_options.advanced_image_ssh_user
if (not cls.flavor_ref) or (not cls.image_ref):
raise cls.skipException(
'No advanced image/flavor available for these tests')
@classmethod
@utils.requires_ext(extension="router", service="network")
def resource_setup(cls):
super(VrrpTest, cls).resource_setup()
# Create keypair with admin privileges
cls.keypair = cls.create_keypair()
# Create security group with admin privileges
cls.secgroup = cls.create_security_group(
name=data_utils.rand_name('secgroup'))
# Execute funcs to achieve ssh, ICMP and VRRP capabilities
cls.create_loginable_secgroup_rule(secgroup_id=cls.secgroup['id'])
cls.create_pingable_secgroup_rule(secgroup_id=cls.secgroup['id'])
cls.create_security_group_rule(security_group_id=cls.secgroup['id'],
protocol=constants.PROTO_NAME_VRRP,
direction=constants.INGRESS_DIRECTION)
def _create_server(self, port, name=None, scheduler_hints=None):
if not name:
name = data_utils.rand_name('vm')
params = {
'flavor_ref': self.flavor_ref,
'image_ref': self.image_ref,
'key_name': self.keypair['name'],
'name': name
}
if (scheduler_hints and CONF.compute.min_compute_nodes > 1 and
compute.is_scheduler_filter_enabled("DifferentHostFilter")):
params['scheduler_hints'] = scheduler_hints
vm = self.create_server(networks=[{'port': port['id']}], **params)
vm['server']['name'] = name
return vm
def _get_vm_host(self, server_id):
server_details = self.os_admin.servers_client.show_server(server_id)
return server_details['server']['OS-EXT-SRV-ATTR:host']
def _check_keepalived_on_server(self, ssh_client, server_id):
try:
ssh_client.execute_script('PATH=$PATH:/usr/sbin which keepalived')
except exceptions.SSHScriptFailed:
raise self.skipException(
"keepalived is not available on server %s" % (server_id))
@staticmethod
def _is_keepalived_service_active(ssh_client):
output = ssh_client.exec_command("sudo systemctl is-active keepalived")
return 'active' == output.splitlines()[0]
def _prepare_server(self, ssh_client, interface, vip_ip):
config_text = get_keepalived_config(interface, vip_ip)
config_file = 'keepalived.conf'
ssh_client.execute_script(
'echo "{0}" > /tmp/{1};'
'sudo mv -Z /tmp/{1} /etc/keepalived/'.format(
config_text, config_file))
ssh_client.exec_command("sudo systemctl restart keepalived")
# make sure keepalived is active
common_utils.wait_until_true(
lambda: self._is_keepalived_service_active(ssh_client=ssh_client),
timeout=20,
exception=RuntimeError("Timed out waiting for keepalived active"))
@staticmethod
def _get_vm_id_by_name(name, vms):
for vm in vms:
if vm['server']['name'] == name:
return vm['server']['id']
return None
def _get_client(self, ip_address, proxy_client=None):
return ssh.Client(ip_address,
self.username,
pkey=self.keypair['private_key'],
proxy_client=proxy_client)
@decorators.idempotent_id('f88ca220-eea2-48d2-9cac-3f382908cb37')
def test_vrrp_vip_failover(self):
"""This test verifies traffic flow during VRRP VIP failover
The aim of the test is to validate that in case master VM
becomes not available the traffic to the VIP is directed to the
second VM.
Recommended topology:
Controller node plus at least 2 compute nodes.
Scenario:
- Create a port for VRRP VIP and ports for VMs with
allowed address pair configured to the VIP IP address
- Attach a FIP to each one of these ports, including the VIP. We will
differentiate between private VIP and public (FIP) VIP
- Create two VMs on different compute nodes
- Setup VRRP between the VMs using keepalived
- Create a proxy VM with a normal port and a FIP. This VM is neither
part of the VRRP VIP configuration, nor keepalived is installed on it
- Test traffic to the public VIP (login via ssh)
- Test traffic to the private VIP through the proxy VM
- Kill active VM
- Test traffic to the public VIP. Traffic should now flow to the
second VM
- Test traffic to the private VIP through the proxy VM. Traffic should
now flow to the second VM
"""
network = self.create_network()
subnet = self.create_subnet(network, cidr="192.168.100.0/24")
router = self.create_router_by_client()
self.create_router_interface(router['id'], subnet['id'])
ports = {'vip': {}, 'vm1': {}, 'vm2': {}}
ports['vip']['port'] = self.create_port(
network=network)
vip_ip = ports['vip']['port']['fixed_ips'][0]['ip_address']
vm_names = ['vm1', 'vm2']
for vm in vm_names:
ports[vm]['port'] = self.create_port(
network=network, security_groups=[self.secgroup['id']],
allowed_address_pairs=[{"ip_address": vip_ip}])
for key in ports.keys():
ports[key]['fip'] = self.create_floatingip(
port=ports[key]['port'])
vms = []
vm1 = self._create_server(port=ports['vm1']['port'], name='vm1')
vm2 = self._create_server(
port=ports['vm2']['port'], name='vm2',
scheduler_hints={'different_host': vm1['server']['id']})
vms = [vm1, vm2]
if (self._get_vm_host(vm1['server']['id']) ==
self._get_vm_host(vm2['server']['id']) and
CONF.compute.min_compute_nodes > 1):
raise self.skipException(
"VMs are running on the same host."
"Make sure you have DifferentHostFilter enabled in nova.conf "
"in order to cover multi-node scenario properly.")
for vm in vm_names:
ports[vm]['client'] = ssh.Client(
ports[vm]['fip']['floating_ip_address'],
self.username,
pkey=self.keypair['private_key'])
interface = ports[vm]['client'].exec_command(
"PATH=$PATH:/usr/sbin ip route get default 8.8.8.8 | "
"head -1 | cut -d ' ' -f 5").rstrip()
self._check_keepalived_on_server(ports[vm]['client'], vm)
self._prepare_server(ports[vm]['client'], interface, vip_ip)
# create proxy vm
port_vm_proxy = self.create_port(network=network,
security_groups=[self.secgroup['id']])
self._create_server(port=port_vm_proxy, name='vm_proxy')
fip_vm_proxy = self.create_floatingip(port=port_vm_proxy)
proxy_client = ssh.Client(fip_vm_proxy['floating_ip_address'],
self.username,
pkey=self.keypair['private_key'])
# verify public VIP connectivity
ports['vip']['client'] = self._get_client(
ports['vip']['fip']['floating_ip_address'])
master_host = ports['vip']['client'].exec_command(
'hostname').rstrip()
LOG.debug('(obtained from public VIP) master_host = ' + master_host)
# verify private VIP connectivity
private_vip_client = self._get_client(
vip_ip, proxy_client=proxy_client)
master_host_private = private_vip_client.exec_command(
'hostname').rstrip()
LOG.debug('(obtained from private VIP) master_host = ' +
master_host_private)
self.assertEqual(master_host, master_host_private)
LOG.debug('Stopping master host')
master_host_id = self._get_vm_id_by_name(master_host, vms)
self.os_primary.servers_client.stop_server(master_host_id)
waiters.wait_for_server_status(self.os_primary.servers_client,
master_host_id, 'SHUTOFF')
# verify public VIP connectivity
ports['vip']['client'] = self._get_client(
ports['vip']['fip']['floating_ip_address'])
new_master_host = ports['vip']['client'].exec_command(
'hostname').rstrip()
LOG.debug('(obtained from public VIP) new_master_host = ' +
new_master_host)
self.assertNotEqual(master_host, new_master_host)
# verify private VIP connectivity
private_vip_client = self._get_client(
vip_ip, proxy_client=proxy_client)
new_master_host_private = private_vip_client.exec_command(
'hostname').rstrip()
LOG.debug('(obtained from private VIP) new_master_host = ' +
new_master_host_private)
self.assertEqual(new_master_host, new_master_host_private)