[robot] Add libraries and resources used by the suite

Add a series of libraries and resources that are used by the suite setup
and test cases functionality.

-  Libraries - Libraries written in python mostly to serve the
        installation and deployment of StarlingX from robot test cases.
-  Resources – Libraries in robot format that are used as a
         pool of keywords to be used by the entire set of test
         cases.
-  Utils – Libraries written in python that expose
         functionality to configure the framework at host
         machine level.
-  Variables – Global variables that are
          used to setup the framework as well as
          test cases.

Story: 2004828
Task: 29004

Depends-On: I6ead335412150fb8d64a6abf7909cf702d0d248c
Change-Id: I796dcaf71089424dd37a050691fd0ee003ad3176
Signed-off-by: Jose Perez Carranza <jose.perez.carranza@intel.com>
This commit is contained in:
Jose Perez Carranza 2019-08-12 10:45:17 -05:00
parent 3b98a48102
commit 2d3d047a8c
24 changed files with 4270 additions and 0 deletions

View File

@ -0,0 +1,29 @@
# Table of contents
- [iso_setup python module](#iso_setup-python-module)
# iso_setup python module
The iso_setup.py in this folder provides the capability to setup a StarlingX
iso with specific configuration, this configuration comes from the `config.ini`
file.
The **config.ini** file in the section `iso_installer` contains the
variable `KERNEL_OPTION` which can have the following values.
| Value | Description |
| ----- | ------------------------------------------------------------ |
| 0 | Standard Controller Configuration > Serial Console > Standard Security Boot Profile |
| S0 | Standard Controller Configuration > Serial Console > Extended Security Boot Profile |
| 1 | Standard Controller Configuration > Graphical Console > Standard Security Boot Profile |
| S1 | Standard Controller Configuration > Graphical Console > Extended Security Boot Profile |
| 2 | All-in-one Controller Configuration > Serial Console > Standard Security Boot Profile |
| S2 | All-in-one Controller Configuration > Serial Console > Extended Security Boot Profile |
| 3 | All-in-one Controller Configuration > Graphical Console > Standard Security Boot Profile |
| S3 | All-in-one Controller Configuration > Graphical Console > Extended Security Boot Profile |
| 4 | All-in-one (lowlatency) Controller Configuration > Serial Console > Standard Security Boot Profile |
| S4 | All-in-one (lowlatency) Controller Configuration > Serial Console > Extended Security Boot Profile |
| 5 | All-in-one (lowlatency) Controller Configuration > Graphical Console > Standard Security Boot Profile |
| S5 | All-in-one (lowlatency) Controller Configuration > Graphical Console > Extended Security Boot Profile |

View File

@ -0,0 +1,430 @@
"""Provides a library of useful utilities for Robot Framework"""
import os
import configparser
import logging
import yaml
from Utils import bash_utils as bash
from Config import config
# create the logger object
LOG = logging.getLogger(__name__)
def update_config_ini(**kwargs):
"""Update a specific config.ini
This function update a the values from a specific config.ini file
:param kwargs: this is a dict that will contains the following values:
- config_ini: which is absolute path to the config.ini (obligatory
variable)
- config_section: which is the section to modify the variables
(optional variable)
- all others variables are dynamic and they are directly dependent of
the existing values in the config.ini, e.g:
*** How to use this function ***
- Example 1:
scenario : the config.ini has a unique variable in all sections
update_config_ini(
config_ini='path_to_config.init', LOG_PATH='some_value')
where LOG_PATH is an existing value in the config.ini.
You can use as many values you want, the only limitation is that
these must exist in the config.init file.
- Example 2:
scenario: the config.ini has duplicates variables in several sections
update_config_ini(
config_ini='path_to_config.init',
config_section='LOGICAL_INTERFACE_2', LOG_PATH='some_value')
where LOGICAL_INTERFACE_2 is an existing section in the config.ini,
please notice that the variables here must exists in the section
specified.
:return
This function returns a tuple with the following values:
- status: this can be the following values:
1. True, if some values from config.ini were modified
2. False, if there were no modifications
- message: a message with descriptive information about the
success/error
"""
status = False
message = None
if len(kwargs) < 2:
raise RuntimeError('a minimum of two variables are expected')
# obligatory variable
config_ini = kwargs['config_ini']
# optional variable
config_section = kwargs.get('config_section', False)
if not os.path.exists(config_ini):
raise IOError('{}: does not exists'.format(config_ini))
configurations = configparser.ConfigParser()
# preserve the variables from config.ini in upper case
configurations.optionxform = lambda option: option.upper()
configurations.read(config_ini)
# ------------------------ validation section ---------------------------
# checking if the section given is valid (if any)
if config_section and config_section not in configurations.sections():
message = '{}: section does not exists'.format(config_section)
return status, message
elif not config_section:
# checking if the values are in more than one section
duplicates = 0
for key, value in kwargs.items():
for section in configurations.sections():
if configurations.has_option(section, key):
duplicates += 1
if duplicates > 1:
status = False
message = ('{}: is in more than one section, please '
'set config_section'.format(key))
return status, message
duplicates = 0
# -----------------------------------------------------------------------
blacklist_keys = ['config_ini', 'config_section']
count = 0
# ------------------- update config values section ----------------------
if config_section:
# (the user provides a config_section)
# get a list of tuples without the values in the blacklist list
values = [x for x in kwargs.items() if x[0] not in blacklist_keys]
for _key, _value in values:
try:
_ = configurations[config_section][_key]
except KeyError:
message = '{}: key does not exists in the section :{}'.format(
_key, config_section)
return status, message
else:
configurations[config_section][_key] = _value
count += 1
status = True
else:
# (the user does not provides a config_section only values)
# modifying configurations according to the values
for section in configurations.sections():
for item in configurations.items(section):
for key, value in kwargs.items():
if key == item[0]:
configurations[section][item[0]] = value
count += 1
# -----------------------------------------------------------------------
if count != 0:
with open(config_ini, 'w') as configfile:
configurations.write(configfile)
status = True
message = '{}: was updated successfully'.format(os.path.basename(
config_ini))
return status, message
def string_to_dict(string_table):
"""Convert string table to dictionary
This function convert a string table output from a command executed in the
controller node into a dictionary.
Useful to parse the output in keys/values for Robot Framework.
@param string_table: the string table to convert into a dictionary, it
comes from the controller node through Robot Framework.
:return:
a dictionary with all the string table entries.
"""
# string_table variable comes from Robot Framework into a dictionary list
# in unicode format, so we need the following
# 1. converting string_table variable from unicode to ascii code
# 2. split in a list with line breaks
line_breaks_list = string_table['stdout'].encode('utf-8').split('\n')
robot_dictionary = {}
try:
# getting the table headers without empty spaces
table_headers = [
header.strip() for header in line_breaks_list[1].split('|')[1:-1]
]
except IndexError:
err_dict = {
'summary': {
'err': 'IndexError',
'cause': 'the command did not return a table'
}
}
robot_dictionary.update(err_dict)
return robot_dictionary
# the blacklist is used for build the body variable without the index on it
blacklist = [0, 1, 2, len(line_breaks_list) - 1]
body = list(filter(
lambda item: line_breaks_list.index(item) not in blacklist,
line_breaks_list))
table_data = [[v.strip() for v in i.strip('|').split('|')] for i in body]
robot_dictionary = {
table_headers[0]: {
i[0]: {
k: v for k, v in zip(table_headers[1:], i[1:])
} for i in table_data
}
}
return robot_dictionary
def get_cmd_boot_line():
"""Get a cmd boot line.
This function build a custom cmd line in order to boot the startlingx iso
:return:
cmd: the cmd line for boot the iso.
"""
kernel_option = config.get('iso_installer', 'KERNEL_OPTION')
vmlinuz = config.get('iso_installer', 'VMLINUZ')
consoles = config.get('iso_installer', 'CONSOLES')
serial = config.get('iso_installer', 'SERIAL')
opts_1 = config.get('iso_installer', 'OPTS_1')
sys_type_1 = config.get('iso_installer', 'SYS_TYPE_1')
sys_type_2 = config.get('iso_installer', 'SYS_TYPE_2')
sys_type_3 = config.get('iso_installer', 'SYS_TYPE_3')
opts_2 = config.get('iso_installer', 'OPTS_2')
sec_prof_1 = config.get('iso_installer', 'SEC_PROF_1')
sec_prof_2 = config.get('iso_installer', 'SEC_PROF_2')
initrd = config.get('iso_installer', 'INITRD')
cmd = False
serial = '{vmlinuz} {consoles} {ser} {opts}'.format(
vmlinuz=vmlinuz, consoles=consoles, ser=serial, opts=opts_1)
no_serial = '{vmlinuz} {consoles} {opts}'.format(
vmlinuz=vmlinuz, consoles=consoles, opts=opts_1)
if kernel_option == '0':
cmd = ('{serial} {sys_type} {opts} {sec_prof} {initrd}'.format(
serial=serial, sys_type=sys_type_1, opts=opts_2,
sec_prof=sec_prof_1, initrd=initrd))
elif kernel_option == 'S0':
cmd = ('{serial} {sys_type} {opts} {sec_prof} {initrd}'.format(
serial=serial, sys_type=sys_type_1, opts=opts_2,
sec_prof=sec_prof_2, initrd=initrd))
elif kernel_option == '1':
cmd = ('{no_serial} {sys_type} {opts} {sec_prof} {initrd}'.format(
no_serial=no_serial, sys_type=sys_type_1, opts=opts_2,
sec_prof=sec_prof_1, initrd=initrd))
elif kernel_option == 'S1':
cmd = ('{no_serial} {sys_type} {opts} {sec_prof} {initrd}'.format(
no_serial=no_serial, sys_type=sys_type_1, opts=opts_2,
sec_prof=sec_prof_2, initrd=initrd))
elif kernel_option == '2':
cmd = ('{serial} {sys_type} {opts} {sec_prof} {initrd}'.format(
serial=serial, sys_type=sys_type_2, opts=opts_2,
sec_prof=sec_prof_1, initrd=initrd))
elif kernel_option == 'S2':
cmd = ('{serial} {sys_type} {opts} {sec_prof} {initrd}'.format(
serial=serial, sys_type=sys_type_2, opts=opts_2,
sec_prof=sec_prof_2, initrd=initrd))
elif kernel_option == '3':
cmd = ('{no_serial} {sys_type} {opts} {sec_prof} {initrd}'.format(
no_serial=no_serial, sys_type=sys_type_2, opts=opts_2,
sec_prof=sec_prof_1, initrd=initrd))
elif kernel_option == 'S3':
cmd = ('{no_serial} {sys_type} {opts} {sec_prof} {initrd}'.format(
no_serial=no_serial, sys_type=sys_type_2, opts=opts_2,
sec_prof=sec_prof_2, initrd=initrd))
elif kernel_option == '4':
cmd = ('{serial} {sys_type} {opts} {sec_prof} {initrd}'.format(
serial=serial, sys_type=sys_type_3, opts=opts_2,
sec_prof=sec_prof_1, initrd=initrd))
elif kernel_option == 'S4':
cmd = ('{serial} {sys_type} {opts} {sec_prof} {initrd}'.format(
serial=serial, sys_type=sys_type_3, opts=opts_2,
sec_prof=sec_prof_2, initrd=initrd))
elif kernel_option == '5':
cmd = ('{no_serial} {sys_type} {opts} {sec_prof} {initrd}'.format(
no_serial=no_serial, sys_type=sys_type_3, opts=opts_2,
sec_prof=sec_prof_1, initrd=initrd))
elif kernel_option == 'S5':
cmd = ('{no_serial} {sys_type} {opts} {sec_prof} {initrd}'.format(
no_serial=no_serial, sys_type=sys_type_3, opts=opts_2,
sec_prof=sec_prof_2, initrd=initrd))
return cmd
def grub_checker(iso, mode, grub_option, grub_cmd):
"""Check a grub cmd boot line against the ones in the StarlingX ISO file
This function compare the grub cmd boot line built from get_cmd_boot_line
function against a StarlingX ISO file in order to check if this is still
valid.
Basically check if all the arguments from the ISO contains them in the
built one from the get_cmd_boot_line function.
:param iso: the iso to mount.
:param mode: the mode to check the grub cmd line, this can be vbios/uefi.
:param grub_option: the boot line to compare which could have the
following values:
- 0: Standard Controller Configuration > Serial Console >
Standard Security Boot Profile.
- S0: Standard Controller Configuration > Serial Console > Extended
Security Boot Profile
- 1: Standard Controller Configuration > Graphical Console >
Standard Security Boot Profile
- S1: Standard Controller Configuration > Graphical Console >
Extended Security Boot Profile
- 2: All-in-one Controller Configuration > Serial Console >
Standard Security Boot Profile
- S2: All-in-one Controller Configuration > Serial Console >
Extended Security Boot Profile
- 3: All-in-one Controller Configuration > Graphical Console >
Standard Security Boot Profile
- S3 All-in-one Controller Configuration > Graphical Console >
Extended Security Boot Profile
- 4: All-in-one (lowlatency) Controller Configuration >
Serial Console > Standard Security Boot Profile
- S4: All-in-one (lowlatency) Controller Configuration >
Serial Console > Extended Security Boot Profile
- 5: All-in-one (lowlatency) Controller Configuration >
Graphical Console > Standard Security Boot Profile
- S5: All-in-one (lowlatency) Controller Configuration >
Graphical Console > Extended Security Boot Profile
:param grub_cmd: the cmd line built from get_cmd_boot_line function
:return
- match: if the grub_cmd has all the elements from the iso
- mismatch: if the grub_cmd does not have all the elements from the iso
"""
allowed_grub_options = [
'0', 'S0', '1', 'S1', '2', 'S2', '3', 'S3', '4', 'S4', '5', 'S5']
if grub_option not in allowed_grub_options:
raise KeyError('grub boot number does not exists')
mount_point = '/tmp/cdrom'
if os.path.exists(mount_point) and os.path.ismount(mount_point):
bash.run_command('sudo umount -l {}'.format(mount_point),
raise_exception=True)
elif not os.path.exists(mount_point):
os.makedirs(mount_point)
# mounting the iso file
bash.run_command('sudo mount -o loop {} {}'.format(iso, mount_point),
raise_exception=True)
if mode == 'vbios':
grub = '{}/syslinux.cfg'.format(mount_point)
regex = '-e "label [0-9]" -e "label [A-Z][0-9]" -e append'
grub_extracted_lines = bash.run_command('grep {} {}'.format(
regex, grub))
grub_option_list = grub_extracted_lines[1].split('\n')
key_dict = []
values_dict = []
# Filling the lists
for line in grub_option_list:
current_line = line.strip()
if current_line.startswith('label'):
key_dict.append(current_line.replace('label ', ''))
elif current_line.startswith('append'):
values_dict.append(current_line)
# zipping the list in only one as a list of tuples
grub_list = zip(key_dict, values_dict)
grub_dict = dict()
# creating a dict with the grub entries
for key, value in grub_list:
grub_dict[key] = value
# comparing the grub boot line from the ISO with the one obtained from
# get_cmd_boot_line function
iso_boot_line = grub_dict[grub_option].split()
# removing blacklist elements from iso_boot_line
blacklist = [
i for i, word in enumerate(iso_boot_line)
if word.startswith('console')
]
for index in blacklist:
del iso_boot_line[index]
if set(grub_cmd.split()).issuperset(set(iso_boot_line)):
status = 'match'
else:
status = 'mismatch'
diff = [
element for element in iso_boot_line
if element not in grub_cmd.split()]
LOG.warn('missed params from cmd grub line')
for element in diff:
LOG.warn(element)
elif mode == 'uefi':
raise NotImplementedError
else:
raise IndexError('{}: not allowed'.format(mode))
# dismount the mount_point
bash.run_command('sudo umount -l {}'.format(mount_point),
raise_exception=True)
return status
def get_controllers_ip(env, config_file, config_type, lab_file):
"""Get IPs of the controllers from the specific stx configuration file
Args:
- config_file: The stx-configuration.ini file
- config_type: The type of configuration selected from the command
line.
Return:
- controller_data: Dictionary with the key name and the IP of the
controllers
"""
# Read Configurtion File
conf = yaml.safe_load(open(config_file))
cont_data = {}
# Get Controllers IP's
if config_type == 'simplex':
cont_data['IP_UNIT_0_ADDRESS'] = conf['external_oam_floating_address']
cont_data['IP_UNIT_1_ADDRESS'] = ''
else:
cont_data['IP_UNIT_0_ADDRESS'] = conf['external_oam_node_0_address']
cont_data['IP_UNIT_1_ADDRESS'] = conf['external_oam_node_1_address']
if env == 'baremetal':
# Get phyisical interfaces
conf_lab = yaml.safe_load(open(lab_file))
cont_data['OAM_IF'] = conf_lab['nodes']['controller-0']['oam_if']
cont_data['MGMT_IF'] = conf_lab['nodes']['controller-0']['mgmt_if']
return cont_data

View File

@ -0,0 +1,220 @@
"""Provides the capability to setup a StarlingX iso with specific
configuration"""
from imp import reload
import os
import getpass
import subprocess
import pexpect
import psutil
from Config import config
from Libraries import common
from Utils import logger
from Utils import network
# reloading config.ini
reload(config)
# Global variables
THIS_PATH = os.path.dirname(os.path.abspath(__file__))
PROJECT_PATH = os.path.dirname(THIS_PATH)
CURRENT_USER = getpass.getuser()
PASSWORD = config.get('credentials', 'STX_DEPLOY_USER_PSWD')
PROMPT = '$'
# setup the logger
LOG_FILENAME = 'iso_setup.log'
LOG_PATH = config.get('general', 'LOG_PATH')
LOG = logger.setup_logging(
'iso_setup', log_file='{path}/{filename}'.format(
path=LOG_PATH, filename=LOG_FILENAME), console_log=False)
class Installer(object):
"""Install a StarlingX ISO though serial console"""
def __init__(self):
self.child = pexpect.spawn(config.get('iso_installer', 'VIRSH_CMD'))
self.child.logfile = open('{}/iso_setup_console.txt'.format(
LOG_PATH), 'wb')
@staticmethod
def open_xterm_console():
"""Open a xterm console to visualize logs from serial connection"""
suite_path = os.path.dirname(THIS_PATH)
terminal = 'xterm'
terminal_title = '"controller-0 boot console"'
geometry = '-0+0' # upper right hand corner
os.environ['DISPLAY'] = ':0'
command = 'python {suite}/Utils/watcher.py {log_path}'.format(
suite=suite_path, log_path=LOG_PATH)
try:
pid_list = subprocess.check_output(['pidof', terminal]).split()
# killing all xterm active sessions
for pid in pid_list:
_pid = psutil.Process(int(pid))
# terminate the process
_pid.terminate()
if _pid.is_running():
# forces the process to terminate
_pid.suspend()
_pid.resume()
except subprocess.CalledProcessError:
LOG.info('There is not process for : {}'.format(terminal))
os.system('{term} -geometry {geo} -T {title} -e {cmd} &'.format(
term=terminal, geo=geometry, title=terminal_title, cmd=command))
def boot_installer(self):
"""Interact with the installation process at boot time
The aim of this function is send the appropriate arguments in order to
boot the ISO
"""
boot_timeout = int(config.get('iso_installer', 'BOOT_TIMEOUT'))
self.child.expect('Escape character')
LOG.info('connected to the VM (controller-0)')
# send a escape character
self.child.sendline('\x1b')
self.child.expect('boot:')
cmd_boot_line = common.get_cmd_boot_line()
self.child.sendline(cmd_boot_line)
LOG.info('kernel command line sent: {}'.format(cmd_boot_line))
# send a enter character
self.child.sendline('\r')
# setting a boot timeout
self.child.timeout = boot_timeout
self.child.expect('Loading vmlinuz')
LOG.info('Loading vmlinuz')
self.child.expect('Loading initrd.img')
LOG.info('Loading initrd.img')
self.child.expect('Starting installer, one moment...')
LOG.info('Starting installer ...')
self.child.expect('Performing post-installation setup tasks')
LOG.info('Performing post-installation setup tasks')
def first_login(self):
"""Change the password at first login"""
user_name = config.get('credentials', 'STX_DEPLOY_USER_NAME')
self.child.expect('localhost login:')
LOG.info('the system boot up correctly')
LOG.info('logging into the system')
self.child.sendline(user_name)
self.child.expect('Password:')
self.child.sendline(user_name)
LOG.info('setting a new password')
self.child.expect('UNIX password:')
self.child.sendline(user_name)
self.child.expect('New password:')
self.child.sendline(PASSWORD)
self.child.expect('Retype new password:')
self.child.sendline(PASSWORD)
self.child.expect('$')
LOG.info('the password was changed successfully')
def configure_temp_network(self):
"""Setup a temporal controller IP"""
controller_tmp_ip = config.get('iso_installer', 'CONTROLLER_TMP_IP')
controller_tmp_gateway = config.get(
'iso_installer', 'CONTROLLER_TMP_GATEWAY')
LOG.info('Configuring temporal network')
self.child.expect(PROMPT)
# -----------------------------
# getting OS network interfaces
timeout_before = self.child.timeout
self.child.timeout = 10
self.child.sendline('ls /sys/class/net')
cmd_stdout = []
try:
for stdout in self.child:
cmd_stdout.append(stdout.strip())
except pexpect.exceptions.TIMEOUT:
LOG.info('custom timeout reached')
network_interfaces = []
network_interfaces.extend(''.join(cmd_stdout[-1:]).split())
# returning to the original timeout value
self.child.timeout = timeout_before
controller_tmp_interface = network_interfaces[0]
# -----------------------------
self.child.sendline('sudo ip addr add {0}/24 dev {1}'.format(
controller_tmp_ip, controller_tmp_interface))
self.child.expect('Password:')
self.child.sendline(PASSWORD)
self.child.expect(PROMPT)
self.child.sendline('sudo ip link set {} up'.format(
controller_tmp_interface))
self.child.expect(PROMPT)
self.child.sendline('sudo ip route add default via {}'.format(
controller_tmp_gateway))
LOG.info('Network configured, testing ping')
self.child.sendline('ping -c 1 127.0.0.1')
self.child.expect('1 packets transmitted')
LOG.info('Ping successful')
# updating networks in the config.ini
configuration_file = os.path.join(
PROJECT_PATH, 'Config', 'config.ini')
configuration_type = config.get('general', 'CONFIGURATION_TYPE')
network.update_networks_config(
network_interfaces, configuration_file, configuration_type)
def config_controller(self, config_file):
"""Configure controller with provided configuration file
:param config_file: which is the configuration file for
config_controller
"""
config_controller_timeout = int(config.get(
'iso_installer', 'CONFIG_CONTROLLER_TIMEOUT'))
self.child.expect(PROMPT)
LOG.info('Applying configuration (this will take several minutes)')
self.child.sendline(
'sudo config_controller --force --config-file {}'
.format(config_file))
self.child.timeout = config_controller_timeout
self.child.expect('Configuration was applied')
LOG.info(self.child.before)
def finish_logging(self):
"""Stop logging and close log file"""
self.child.logfile.close()
LOG.info('Closing the log')
def install_iso():
"""Start the process of installing a StarlingX ISO"""
install_obj = Installer()
install_obj.open_xterm_console()
install_obj.boot_installer()
install_obj.first_login()
install_obj.configure_temp_network()
return install_obj
def config_controller(controller_connection, config_file):
"""Start controller configuration with specified configuration file
:param controller_connection: which is the connection stabilised through
to the controller
:param config_file: which is the configuration file for config_controller
"""
controller_connection.config_controller(config_file)
controller_connection.finish_logging()

View File

@ -0,0 +1,528 @@
"""Provides the capability to setup a StarlingX iso with specific
configuration"""
from ast import literal_eval
from imp import reload
import os
import re
from shutil import copyfile
from shutil import copytree
from shutil import rmtree
import sys
import threading
import pexpect
import yaml
from bash import bash
from Config import config
from Utils import logger
from Utils import network
from Utils.utils import isdir
# reloading config.ini
reload(config)
# Global variables
THIS_PATH = os.path.dirname(os.path.abspath(__file__))
PROJECT_PATH = os.path.dirname(THIS_PATH)
PROMPT = '$'
# Setup the logger
LOG_FILENAME = 'iso_setup_baremetal.log'
LOG_PATH = config.get('general', 'LOG_PATH')
LOG = logger.setup_logging('iso_setup_baremetal', log_file='{path}/{filename}'
.format(path=LOG_PATH, filename=LOG_FILENAME),
console_log=False)
class PxeServer(object):
"""Handle PXE services and mount ISO for Installation"""
def __init__(self, iso_path):
self.iso_path = iso_path
self.iso_name = os.path.basename(self.iso_path).replace('.iso', '')
self.tftp_dir = '/var/lib/tftpboot/uefi'
def mount_iso(self):
"""Mounting ISO and grabbing shim, grub.efi and grub.cfg files"""
# Mounting ISO on /mnt and on http server
mount_point = '/mnt'
http_mnt_point = '/var/www/html/stx'
tmp_mnt_point = '/tmp'
if os.listdir(mount_point):
LOG.info('{} is busy umounting'.format(mount_point))
umounting_attempts = 3
while umounting_attempts > 0:
umounting = bash('sudo umount -l {}'.format(mount_point))
if umounting.stderr and umounting_attempts:
LOG.info('Failed to umount {}, retrying...'.format(
mount_point))
elif umounting.stderr and not umounting_attempts:
LOG.info('Max umounting attempts reached, leaving '
'installation')
sys.exit(1)
else:
break
umounting_attempts -= 1
bash('sudo mount {0} {1}'.format(self.iso_path, mount_point))
LOG.info('Mounting ISO on {}'.format(mount_point))
if isdir(os.path.join(http_mnt_point, self.iso_name)):
LOG.info('Folder {0}/{1} already exists in http server, deleting '
'it.'.format(http_mnt_point, self.iso_name))
rmtree(os.path.join(http_mnt_point, self.iso_name))
copytree(mount_point, os.path.join(http_mnt_point, self.iso_name))
if isdir(os.path.join(tmp_mnt_point, self.iso_name)):
LOG.info('Folder {0}/{1} already exists in http server, deleting '
'it.'.format(tmp_mnt_point, self.iso_name))
rmtree(os.path.join(tmp_mnt_point, self.iso_name))
# Changing from RPM to CPIO format
LOG.info('Uncompressing RPM necessary files')
copytree(os.path.join(http_mnt_point, self.iso_name, 'Packages'),
os.path.join(tmp_mnt_point, self.iso_name, 'Packages'))
grub2_regex = re.compile('grub2-efi-x64-[0-9]')
os.chdir(os.path.join(tmp_mnt_point, self.iso_name, 'Packages'))
for package in os.listdir(
os.path.join(tmp_mnt_point, self.iso_name, 'Packages')):
if 'shim' in package or grub2_regex.search(package):
LOG.info('Found grub/shim file uncompressing it')
bash('rpm2cpio {} | cpio -dimv'.format(package))
# Copying shim, and grub files to tftpboot folder
# fixme: handle condition to make sure tftp_dir exists
if not os.path.isdir(self.tftp_dir):
os.makedirs(self.tftp_dir)
LOG.info('Copying grub and shim files to TFTP server')
for root, _, files in os.walk('/tmp/{}/Packages'.format(
self.iso_name)):
for package in files:
if any(boot_file in package for boot_file in ('shim.efi',
'grubx64.efi')):
copyfile(os.path.join(root, package),
os.path.join(self.tftp_dir, package))
if 'grub.cfg' in package:
copyfile(os.path.join(root, package),
os.path.join(self.tftp_dir, package))
copyfile(os.path.join(http_mnt_point, self.iso_name,
'EFI/BOOT/grub.cfg'),
os.path.join(self.tftp_dir, 'grub.cfg'))
# Copying vmlinuz and initrd
images_dir = os.path.join(self.tftp_dir, 'images')
if isdir(images_dir):
LOG.info('{} already exists, deleting directory.'.format(
images_dir))
rmtree(images_dir)
LOG.info('Copying vmlinuz and initrd files.')
copytree(os.path.join(mount_point, 'images/pxeboot'),
os.path.join(self.tftp_dir, 'images'))
@staticmethod
def check_pxe_services():
"""This function is intended to restart DHCP service
DHCP service needs to be restarted in order to grab the changes on the
dhcp config file"""
LOG.info('Checking PXE needed services')
services = ['dhcpd', 'tftp', 'httpd']
for service in services:
active_service = bash('sudo systemctl is-active {}'
.format(service))
if 'active' in active_service.stdout:
LOG.info('{} service is active'.format(service))
continue
else:
LOG.info('{} service is not active, restarting'
.format(service))
bash('sudo systemctl restart {}'.format(service))
def get_efi_boot_line(self, grub_dict):
"""Get linuxefi command and initrdefi command from grub_dict
Get linuxefi command and initrdefi command from grub_dict according to
specified option on configuration argument while running runner.py
"""
configuration_type = config.get('general', 'CONFIGURATION_TYPE')
http_server_ip = config.get('baremetal', 'HTTP_SERVER')
LOG.info('config_type')
LOG.info(configuration_type)
boot_lines = dict()
if configuration_type == 'simplex':
boot_lines = grub_dict['aio']['serial']['standard']
elif configuration_type == 'duplex':
boot_lines = grub_dict['aio']['serial']['standard']
elif configuration_type == 'multinode_controller_storage':
boot_lines = grub_dict['standard']['serial']['standard']
elif configuration_type == 'multinode_dedicated_storage':
boot_lines = grub_dict['standard']['serial']['standard']
prefix = 'uefi/images'
linuxefi_cmd = boot_lines['linuxefi']
linuxefi_http_cmd = list()
for parameter in linuxefi_cmd.split(' '):
if 'inst.ks' in parameter:
ks_file = parameter.split('/')[-1]
parameter = 'inst.ks=http://{server}/stx/{iso}/{ks_file}' \
.format(server=http_server_ip, iso=self.iso_name,
ks_file=ks_file)
linuxefi_http_cmd.append(parameter)
elif 'inst.stage2' in parameter:
parameter = 'inst.stage2=http://{server}/stx/{iso}' \
.format(server=http_server_ip, iso=self.iso_name)
linuxefi_http_cmd.append(parameter)
elif 'vmlinuz' in parameter:
parameter = '{prefix}{parameter}'.format(prefix=prefix,
parameter=parameter)
linuxefi_http_cmd.append(parameter)
else:
linuxefi_http_cmd.append(parameter)
inst_repo = 'inst.repo=http://{server}/stx/{iso}'\
.format(server=http_server_ip, iso=self.iso_name)
linuxefi_http_cmd.append(inst_repo)
boot_lines['linuxefi'] = ' '.join(linuxefi_http_cmd)
initrd_cmd = boot_lines['initrdefi']
initrd_prefix_cmd = list()
for parameter in initrd_cmd.split(' '):
if 'initrd.img' in parameter:
parameter = '{prefix}{parameter}'.format(prefix=prefix,
parameter=parameter)
initrd_prefix_cmd.append(parameter)
else:
initrd_prefix_cmd.append(parameter)
boot_lines['initrdefi'] = ' '.join(initrd_prefix_cmd)
return boot_lines
def handle_grub(self):
"""Pointing source files to http server on grub"""
installation_type = config.get('general', 'CONFIGURATION_TYPE')
grub_dict = analyze_grub(
os.path.join(self.tftp_dir, 'grub.cfg'))
grub_lines = self.get_efi_boot_line(grub_dict)
grub_entry = ("menuentry '{config}'{{\n{linuxefi}\n{initrdefi}\n}}"
.format(config=installation_type,
linuxefi=grub_lines['linuxefi'],
initrdefi=grub_lines['initrdefi']))
grub_timeout = 'timeout=5\n'
with open(os.path.join(self.tftp_dir, 'grub.cfg'), 'w') as grub_file:
grub_file.writelines(grub_timeout)
grub_file.write(grub_entry)
class Node(object):
"""Constructs a Node server that can be booted to pxe and also follow the
installation of STX system
"""
def __init__(self, node):
self.name = node['name']
self.personality = node['personality']
self.pxe_nic_mac = node['pxe_nic_mac']
self.bmc_ip = node['bmc_ip']
self.bmc_user = node['bmc_user']
self.bmc_pswd = node['bmc_pswd']
if self.name == 'controller-0':
self.installation_ip = node['installation_ip']
def boot_server_to_pxe(self):
"""Boot the installation target server using PXE server"""
LOG.info('Booting {} To PXE'.format(self.name))
LOG.info('Node {}: Setting PXE as first boot option'
.format(self.name))
set_pxe = bash('ipmitool -I lanplus -H {node_bmc_ip} '
'-U {node_bmc_user} -P {node_bmc_pswd} '
'chassis bootdev pxe'.format(
node_bmc_ip=self.bmc_ip,
node_bmc_user=self.bmc_user,
node_bmc_pswd=self.bmc_pswd))
if set_pxe.stderr:
LOG.info(set_pxe.stderr)
LOG.info('Node {}: Resetting target.'.format(self.name))
power_status = bash('ipmitool -I lanplus -H {node_bmc_ip} '
'-U {node_bmc_user} -P {node_bmc_pswd} '
'chassis power status'.format(
node_bmc_ip=self.bmc_ip,
node_bmc_user=self.bmc_user,
node_bmc_pswd=self.bmc_pswd))
if power_status.stderr:
LOG.info(set_pxe.stderr)
if "Chassis Power is on" in str(power_status):
power = bash('ipmitool -I lanplus -H {node_bmc_ip} '
'-U {node_bmc_user} -P {node_bmc_pswd} '
'chassis power reset'.format(
node_bmc_ip=self.bmc_ip,
node_bmc_user=self.bmc_user,
node_bmc_pswd=self.bmc_pswd))
else:
power = bash('ipmitool -I lanplus -H {node_bmc_ip} '
'-U {node_bmc_user} -P {node_bmc_pswd} '
'chassis power on'.format(
node_bmc_ip=self.bmc_ip,
node_bmc_user=self.bmc_user,
node_bmc_pswd=self.bmc_pswd))
if power.stderr:
LOG.info(power.stderr)
LOG.info('Node {}: Deactivating sol sessions.'.format(self.name))
kill_sol = bash('ipmitool -I lanplus -H {node_bmc_ip} '
'-U {node_bmc_user} -P {node_bmc_pswd} sol '
'deactivate'.format(node_bmc_ip=self.bmc_ip,
node_bmc_user=self.bmc_user,
node_bmc_pswd=self.bmc_pswd))
if kill_sol.stderr:
LOG.info(kill_sol.stderr)
def follow_node_installation(self):
"""This function is intended to follow nodes installation"""
user_name = config.get('credentials', 'STX_DEPLOY_USER_NAME')
password = config.get('credentials', 'STX_DEPLOY_USER_PSWD')
LOG.info('Node {}: Following node installation.'.format(self.name))
installation = pexpect.spawn(('ipmitool -I lanplus -H {node_bmc_ip} '
'-U {node_bmc_user} -P {node_bmc_pswd} '
'sol activate')
.format(node_bmc_ip=self.bmc_ip,
node_bmc_user=self.bmc_user,
node_bmc_pswd=self.bmc_pswd))
installation.logfile = open('{}/iso_setup_installation.txt'.format(
LOG_PATH), 'wb')
installation.timeout = int(config.get('iso_installer', 'BOOT_TIMEOUT'))
installation.expect('Start PXE over IPv4.')
LOG.info('Node {}: Trying to boot using PXE'.format(self.name))
installation.expect('Linux version')
LOG.info('Node {}: Loading Linux Kernel'.format(self.name))
installation.expect('Welcome to')
LOG.info('Node {}: CentOS have been loaded'.format(self.name))
installation.expect('Starting installer, one moment...')
LOG.info('Node {}: Starting installer ...'.format(self.name))
installation.expect('Performing post-installation setup tasks')
LOG.info('Node {}: Performing post-installation setup tasks'
.format(self.name))
installation.expect('login:')
LOG.info('Node {}: the system boot up correctly'.format(self.name))
LOG.info('Node {}: logging into the system'.format(self.name))
installation.sendline(user_name)
installation.expect('Password:')
installation.sendline(user_name)
LOG.info('Node {}: setting a new password'.format(self.name))
installation.expect('UNIX password:')
installation.sendline(user_name)
installation.expect('New password:')
installation.sendline(password)
installation.expect('Retype new password:')
installation.sendline(password)
installation.expect('$')
LOG.info('Node {}: the password was changed successfully'
.format(self.name))
installation.close()
LOG.info('Node {}: Closing SOL session after successfully installation'
.format(self.name))
deactivate_sol = bash(('ipmitool -I lanplus -H {node_bmc_ip} '
'-U {node_bmc_user} -P {node_bmc_pswd} '
'sol deactivate')
.format(node_bmc_ip=self.bmc_ip,
node_bmc_user=self.bmc_user,
node_bmc_pswd=self.bmc_pswd))
if not deactivate_sol.stderr:
LOG.info('Node {}: SOL session closed successfully'
.format(self.name))
def analyze_grub(grub_cfg_file):
"""Get linuxefi command and initrdefi command from grub_dict
Get linuxefi command and initrdefi command from grub_dict according to
selected option in config file
"""
with open(grub_cfg_file, 'r') as grub:
lines = grub.readlines()
cmd_lines = list()
for line in lines:
if 'linuxefi' in line:
line = line.strip()
cmd_line = "'linuxefi': '{line}',".format(line=line)
cmd_lines.append(cmd_line)
elif 'initrdefi' in line:
line = line.strip()
cmd_line = "'initrdefi': '{line}'".format(line=line)
cmd_lines.append(cmd_line)
elif 'submenu' in line or 'menuentry' in line:
if re.search('--id=(.*) {', line):
menu_name = re.search('--id=(.*) {', line)
else:
menu_name = re.search("'(.*)'", line)
menu_name = menu_name.group(1)
line = "'{}': {{".format(menu_name)
cmd_lines.append(line)
elif '}' in line:
cmd_lines.append('},')
grub_menu = ''.join(cmd_lines) # type: str
grub_menu = '{{ {} }}'.format(grub_menu)
grub_dict = literal_eval(grub_menu)
return grub_dict
def mount_iso_on_pxe(iso):
""""Manage and enable PXE services"""
pxe_server = PxeServer(iso)
pxe_server.mount_iso()
pxe_server.handle_grub()
pxe_server.check_pxe_services()
def install_iso_master_controller():
"""Launch ISO installation on controller-0"""
nodes_file = os.path.join(os.environ['PYTHONPATH'], 'baremetal',
'baremetal_setup.yaml')
nodes = yaml.safe_load(open(nodes_file))
# Update config.ini with OAM and MGMT interfaces
network_interfaces = []
network_interfaces.insert(0, nodes['nodes']['controller-0']['oam_if'])
network_interfaces.insert(1, nodes['nodes']['controller-0']['mgmt_if'])
configuration_file = os.path.join(
PROJECT_PATH, 'Config', 'config.ini')
configuration_type = config.get('general', 'CONFIGURATION_TYPE')
network.update_networks_config(
network_interfaces, configuration_file, configuration_type)
# Installing STX on main controller
controller_0 = nodes['nodes']['controller-0']
master_controller = Node(controller_0)
master_controller.boot_server_to_pxe()
master_controller.follow_node_installation()
return master_controller
def get_controller0_ip():
"""Returns master controller IP"""
nodes_file = os.path.join(THIS_PATH, '..', 'BareMetal',
'installation_setup.yaml')
nodes = yaml.load(open(nodes_file))
controller_0 = nodes['controller-0']
master_controller = Node(controller_0)
return master_controller.installation_ip
def config_controller(config_file):
"""Configures master controller using its corresponding init file"""
config_controller_timeout = int(config.get(
'iso_installer', 'CONFIG_CONTROLLER_TIMEOUT'))
nodes_file = os.path.join(os.environ['PYTHONPATH'], 'baremetal',
'baremetal_setup.yaml')
nodes = yaml.safe_load(open(nodes_file))
controller_0 = nodes['nodes']['controller-0']
master_controller = Node(controller_0)
serial_cmd = ('ipmitool -I lanplus -H {node_bmc_ip} -U {node_bmc_user} '
'-P {node_bmc_pswd} sol activate'
.format(node_bmc_ip=master_controller.bmc_ip,
node_bmc_user=master_controller.bmc_user,
node_bmc_pswd=master_controller.bmc_pswd))
configuring_controller = pexpect.spawn(serial_cmd)
configuring_controller.logfile = open('{}/iso_setup_installation.txt'
.format(LOG_PATH), 'wb')
configuring_controller.sendline('\r')
configuring_controller.expect(PROMPT)
LOG.info('Applying configuration (this will take several minutes)')
configuring_controller.sendline('sudo config_controller --force --config-file {}'
.format(config_file))
configuring_controller.timeout = config_controller_timeout
configuring_controller.expect('Configuration was applied')
LOG.info(configuring_controller.before)
configuring_controller.logfile.close()
LOG.info('Closing the log')
configuring_controller.close()
closing_serial_connection = (
bash('ipmitool -I lanplus -H {node_bmc_ip} -U {node_bmc_user} '
'-P {node_bmc_pswd} sol deactivate'
.format(node_bmc_ip=master_controller.bmc_ip,
node_bmc_user=master_controller.bmc_user,
node_bmc_pswd=master_controller.bmc_pswd)))
if closing_serial_connection.stderr:
LOG.info(closing_serial_connection.stderr)
def install_secondary_nodes():
"""Installs STX on controller-1 and computes"""
nodes_file = os.path.join(THIS_PATH, '..', 'BareMetal',
'installation_setup.yml')
nodes = yaml.load(open(nodes_file))
# Removing controller-0 from Nodes
controller_0 = nodes.pop('controller-0')
master_controller = Node(controller_0)
serial_cmd = ('ipmitool -I lanplus -H {node_bmc_ip} -U {node_bmc_user} '
'-P {node_bmc_pswd} sol activate'
.format(node_bmc_ip=master_controller.bmc_ip,
node_bmc_user=master_controller.bmc_user,
node_bmc_pswd=master_controller.bmc_pswd))
controller_0_serial = pexpect.spawn(serial_cmd)
# Loading openrc
controller_0_serial.sendline('source /etc/nova/openrc')
# Adding nodes to master controller
nodes_instances = list()
node_names = nodes.keys()
for node_name in node_names:
node = Node(nodes[node_name])
nodes_instances.append(node)
controller_0_serial.sendline('system host-add -n {name} '
'-p {personality} -m {mac_address}'
.format(name=node.name,
personality=node.personality,
mac_address=node.pxe_nic_mac))
node.boot_server_to_pxe()
node_installation_threads = list()
for nodes_instance in nodes_instances:
thread = threading.Thread(
target=nodes_instance.follow_node_installation())
LOG.info('Starting installation on {}'.format(nodes_instance.name))
thread.start()
node_installation_threads.append(thread)
# Waiting for nodes to be installed
LOG.info('Waiting for nodes to be installed')
for node_installation_thread in node_installation_threads:
node_installation_thread.join()
LOG.info('All nodes have been installed successfully!')

View File

@ -0,0 +1,62 @@
*** Settings ***
Documentation Lock and Unlock compute and storage hosts. Swact a controller.
... Author(s):
... - Jose Perez Carranza <jose.perez.carranza@intel.com>
... - Juan Carlos Alonso <juan.carlos.alonso@intel.com>
Library SSHLibrary
Library Collections
Library OperatingSystem
Library Libraries/common.py
Library String
Variables Variables/Global.py
Variables Variables/config_init.py Config
... %{PYTHONPATH}/Config/config.ini
*** Keywords ***
Unlock Controller
[Arguments] ${controller_name}
[Documentation] Unlocks specified controller.
Wait Until Keyword Succeeds 15 min 10 sec Check Property Value
... ${controller_name} availability online
${result} Run Command system host-unlock ${controller_name} True
... 60
Wait Until Keyword Succeeds 20 min 5 sec Check Property Value
... ${controller_name} administrative unlocked
[Return] ${result}
Unlock Compute
[Arguments] ${compute}
[Documentation] Unlock specified compute.
Run Command system host-unlock ${compute} True 60 sec
Check Host Readiness ${compute}
Lock Node
[Documentation] Locks specified node.
[Arguments] ${controller_name}
Wait Until Keyword Succeeds 5 min 10 sec Check Property Value
... ${controller_name} availability available
${result} Run Command system host-lock ${controller_name} True
Wait Until Keyword Succeeds 5 min 10 sec Check Property Value
... ${controller_name} administrative locked
[Return] ${result}
Swact Controller
[Arguments] ${controller}
[Documentation] Swact the active controller and activates the SSH
... connection with the new active controller
${result} Run Command system host-swact ${controller} True
${new_act_cont} Set Variable If
... '${controller}'=='controller -0' controller-1 controller-0
Wait Until Keyword Succeeds 10 min 2 sec Check Host Task
... ${controller} Swact: Complete
Check Host Readiness ${new_act_cont} 1
# - Switch SSH connection to the Active Controller
Switch Controller Connection ${secondary_controller_connection}
... ${master_controller_connection}
Unlock Storage
[Arguments] ${storage}
[Documentation] Unlock specified storage node.
Run Command system host-unlock ${storage} True 60 sec
Check Host Readiness ${storage}

View File

@ -0,0 +1,93 @@
*** Settings ***
Documentation Checks the health of the PODs, kube system services and
... perform a helm override to openstack application.
... Author(s):
... - Jose Perez Carranza <jose.perez.carranza@intel.com>
... - Juan Carlos Alonso <juan.carlos.alonso@intel.com>
Library SSHLibrary
Library Collections
Library OperatingSystem
Library Libraries/common.py
Library String
Variables Variables/Global.py
Variables Variables/config_init.py Config
... %{PYTHONPATH}/Config/config.ini
*** Keywords ***
Check PODs Health
[Documentation] Check all OpenStack pods are healthy
${kubectl_cmd} Set Variable kubectl get pods --all-namespaces -o wide
${cmd} Catenate SEPARATOR=| ${kubectl_cmd} grep -v NAMESPACE
... grep -v Running grep -v Completed
&{result} Run Command ${cmd}
${value} Get From Dictionary ${result} stdout
Should Be Empty ${value}
Helm Override OpenStack
[Arguments] ${app_name} ${char_name} ${namespace}
[Documentation] Helm override for OpenStack nova chart and reset.
${kubectl_cmd} Set Variable system helm-override-update
${cmd} Catenate ${kubectl_cmd} --set conf.nova.DEFAULT.foo=bar
... ${app_name} ${char_name} ${namespace}
Run Command ${cmd} True
Check Helm Override OpenStack
[Documentation] Check nova-compute.conf is updated in all nova-compute
... containers.
${kubectl_cmd} Set Variable kubectl get pods --all-namespaces -o wide
${cmd} Catenate SEPARATOR=| ${kubectl_cmd} grep nova-compute
... awk '{print $2}'
&{result} Run Command ${cmd}
@{nova_pod_list} Convert Response To List ${result}
${kubectl_cmd} Set Variable kubectl exec -n openstack -it
: FOR ${nova_pod} IN @{nova_pod_list}
\ ${cmd} Catenate ${kubectl_cmd} ${nova_pod}
... -- grep foo /etc/nova/nova.conf
\ &{result} Run Command ${cmd}
\ Should Contain ${result.stdout} foo = bar
Check Kube System Services
[Documentation] Check pods status and kube-system services are
... displayed.
${kubectl_cmd} Set Variable kubectl get services -n kube-system
${cmd} Catenate SEPARATOR=| ${kubectl_cmd} grep -v NAME
... awk '{print $1}'
&{result} Run Command ${cmd}
${kubeb_systems} Get From Dictionary ${result} stdout
Should Contain ${kubeb_systems} ingress
Should Contain ${kubeb_systems} ingress-error-pages
Should Contain ${kubeb_systems} ingress-exporter
Should Contain ${kubeb_systems} kube-dns
Should Contain ${kubeb_systems} tiller-deploy
&{result} Run Command kubectl get deployments.apps -n kube-system
${kubeb_systems} Get From Dictionary ${result} stdout
Should Contain ${kubeb_systems} calico-kube-controllers
Should Contain ${kubeb_systems} coredns
Should Contain ${kubeb_systems} ingress-error-pages
Should Contain ${kubeb_systems} rbd-provisioner
Should Contain ${kubeb_systems} tiller-deploy
Create POD
[Arguments] ${pod_yml} ${pod_name}
[Documentation] Create a POD.
&{result} Run Command kubectl create -f ${pod_yml}
${value} Get From Dictionary ${result} stdout
Should Be Equal As Strings ${value} pod/${pod_name} created
Delete POD
[Arguments] ${pod_name}
[Documentation] Delete a POD.
&{result} Run Command kubectl delete pods ${pod_name} timeout=60
${value} Get From Dictionary ${result} stdout
Should Be Equal As Strings ${value} pod "${pod_name}" deleted
Check POD
[Arguments] ${pod_name}
[Documentation] Check if a POD is running.
${kubectl_cmd} Set Variable kubectl get pods -n default
${cmd} Catenate SEPARATOR=| ${kubectl_cmd} grep ${pod_name}
... awk '{print $3}'
&{result} Run Command ${cmd}
${status} Get From Dictionary ${result} stdout
Should Be Equal As Strings ${status} Running

View File

@ -0,0 +1,477 @@
*** Settings ***
Documentation Establish a SSH connection with the master controller to
... execute openstack commands to create networks, subnetworks, flavors,
... images, volumes, snapshots, instances, etc.
... Author(s):
... - Jose Perez Carranza <jose.perez.carranza@intel.com>
... - Juan Carlos Alonso <juan.carlos.alonso@intel.com>
Library Collections
Library SSHLibrary
Library String
Resource Resources/Utils.robot
Variables Variables/Global.py
*** Keywords ***
Run OS Command
[Arguments] ${cmd} ${fail_if_error}=False ${timeout}=${TIMEOUT+20}
[Documentation] Keyword to execute exclusively commands for OpenStack as
... it uses the proper token for OS authentication.
${load_os_token} Set Variable export OS_CLOUD=openstack_helm
${stdout} ${stderr} ${rc} Execute Command
... ${load_os_token} && ${cmd} return_stdout=True
... return_stderr=True return_rc=True timeout=${timeout}
${res} Create dictionary stdout=${stdout} stderr=${stderr}
... rc=${rc}
Run Keyword If ${rc} != 0 and ${fail_if_error} == True FAIL
... ${stderr}
[Return] ${res}
Create Network
[Arguments] ${network_name} ${additional_args}=${EMPTY}
... ${verbose}=TRUE
[Documentation] Create Network with openstack request.
${openstack_cmd} Set Variable openstack network create
${cmd} Catenate ${openstack_cmd} ${network_name}
... ${additional_args}
Run OS Command ${cmd} True 30 sec
Create Subnet
[Arguments] ${network_name} ${range_ip}
... ${additional_args}=${EMPTY}
[Documentation] Create SubNet for the Network with neutron request.
${openstack_cmd} Set Variable openstack subnet create
${cmd} Catenate ${openstack_cmd} --network ${network_name}
... --subnet-range ${range_ip} ${additional_args}
Run OS Command ${cmd} True 30 sec
Create Flavor
[Arguments] ${ram} ${vcpus} ${disk} ${name}
... ${extra_args}=${EMPTY}
[Documentation] Create a flavor with specified values.
${openstack_cmd} Set Variable openstack flavor create
${cmd} Catenate ${openstack_cmd} --ram ${ram} --disk ${disk}
... --vcpus ${vcpus} --public --id auto ${extra_args}
... ${name}
Run OS Command ${cmd} True 3 min
Create Image
[Arguments] ${file_path} ${disk_format} ${name}
[Documentation] Create image from a given .img file.
SSHLibrary.File Should Exist ${file_path}
${openstack_cmd} Set Variable openstack image create
${cmd} Catenate ${openstack_cmd} --file ${file_path}
... --disk-format ${disk_format} --public ${name}
Run OS Command ${cmd} True 3 min
Wait Until Keyword Succeeds 5 min 10 sec Check Field Value
... image ${name} status active
Create Volume
[Arguments] ${size} ${image} ${bootable} ${name}
[Documentation] Create Volume.
${openstack_cmd} Set Variable openstack volume create
${cmd} Catenate ${openstack_cmd} --size ${size}
... --image ${image} ${bootable} ${name}
Run OS Command ${cmd} True 30 sec
Wait Until Keyword Succeeds 10 min 10 sec Check Field Value
... volume ${name} status available
Create Snapshot
[Arguments] ${volume} ${name}
[Documentation] Create Snapshot.
Run OS Command
... openstack volume snapshot create --volume ${volume} ${name}
... True 30 sec
Wait Until Keyword Succeeds 5 min 10 sec Check Field Value
... volume snapshot ${name} status available
Create Stack
[Arguments] ${stack_name} ${stack_template} ${net_id}
[Documentation] Create Stack
${openstack_cmd} Set Variable openstack stack create --template
${cmd} Catenate ${openstack_cmd} ${stack_template}
... ${stack_name} --parameter "NetID=${net_id}"
${output} Run OS Command ${cmd}
Wait Until Keyword Succeeds 5 min 10 sec Check Field Value
... stack ${stack_name} stack_status CREATE_COMPLETE
${openstack_cmd} Set Variable openstack server list
${cmd} Catenate SEPARATOR=| ${openstack_cmd} awk '{print$4}'
... grep -v "Name"
&{result} Run OS Command ${cmd} True 30 sec
@{vm_list} Convert Response To List ${result}
: FOR ${vm} IN @{vm_list}
\ Wait Until Keyword Succeeds 5 min 10 sec Check Field Value
... server ${vm} status ACTIVE
\ Wait Until Keyword Succeeds 5 min 10 sec Check Field Value
... server ${vm} power_state Running
Create Instance
[Arguments] ${net_name} ${vm_name} ${image} ${flavor}
[Documentation] Create a VM Instances with the net id of the Netowrk
... flavor and image
${net_id} Get Net Id ${net_name}
${openstack_cmd} Set Variable openstack server create
${cmd} Catenate ${openstack_cmd} --image ${image}
... --flavor ${flavor} --nic net-id=${net_id} ${vm_name}
Run OS Command ${cmd} True 30 sec
Wait Until Keyword Succeeds 5 min 10 sec Check Field Value
... server ${vm_name} status ACTIVE
Wait Until Keyword Succeeds 5 min 10 sec Check Field Value
... server ${vm_name} power_state Running
Create Instance From Volume
[Arguments] ${net_name} ${vm_name} ${volume} ${flavor}
[Documentation] Create a VM Instances with the net id of the Netowrk
... flavor and volume
${net_id} Get Net Id ${net_name}
${openstack_cmd} Set Variable openstack server create
${cmd} Catenate ${openstack_cmd} --volume ${volume}
... --flavor ${flavor} --nic net-id=${net_id} ${vm_name}
Run OS Command ${cmd} True 30 sec
Wait Until Keyword Succeeds 5 min 10 sec Check Field Value
... server ${vm_name} status ACTIVE
Wait Until Keyword Succeeds 5 min 10 sec Check Field Value
... server ${vm_name} power_state Running
Create Instance From Snapshot
[Arguments] ${net_name} ${image} ${vm_name} ${snapshot}
... ${size} ${flavor}
[Documentation] Create a VM Instances with the net id of the Netowrk
... flavor and snapshot
${net_id} Get Net Id ${net_name}
${snapshot_id} Get Snapshot ID ${snapshot}
${openstack_cmd} Set Variable openstack server create
${mapping} Catenate SEPARATOR=: ${snapshot_id} snapshot
... ${size}
${cmd} Catenate ${openstack_cmd} --flavor ${flavor}
... --image ${image} --nic net-id=${net_id}
... --block-device-mapping vdb=${mapping} ${vm_name}
Run OS Command ${cmd} True 30 sec
Wait Until Keyword Succeeds 5 min 10 sec Check Field Value
... server ${vm_name} status ACTIVE
Wait Until Keyword Succeeds 5 min 10 sec Check Field Value
... server ${vm_name} power_state Running
Create Instance With Keypair
[Arguments] ${net_name} ${vm_name} ${image} ${flavor}
... ${key_name}
[Documentation] Create a VM Instances with the net id of the
... Netowrk flavor and image and a keypair
${net_id} Get Net Id ${net_name}
${openstack_cmd} Set Variable openstack server create
${cmd} Catenate ${openstack_cmd} --image ${image}
... --flavor ${flavor} --nic net-id=${net_id}
... --key-name ${key_name} ${vm_name}
Run OS Command ${cmd} True 30 sec
Wait Until Keyword Succeeds 5 min 10 sec Check Field Value
... server ${vm_name} status ACTIVE
Wait Until Keyword Succeeds 5 min 10 sec Check Field Value
... server ${vm_name} power_state Running
Create KeyPair
[Arguments] ${key_name}
[Documentation] Create new public or private key for server ssh access.
${key_dir} Create Directory On Current Host .ssh
... /home/${CLI_USER_NAME}
${result} Run Keyword And Ignore Error
... SSHLibrary.File Should Exist ${key_dir}/${key_name}
${key_exist} Get From List ${result} 0
Run Keyword If '${key_exist}' == 'FAIL'
... Generate SSH Key On Current Host /home/${CLI_USER_NAME}/.ssh
... ${key_name}
${openstack_cmd} Set Variable openstack keypair create
${cmd} Catenate ${openstack_cmd}
... --public-key ${key_dir}/${key_name}.pub ${key_name}
Run OS Command ${cmd} True 30 sec
Suspend Instance
[Arguments] ${vm_name}
[Documentation] Suspend the corresponding VM
Run OS Command openstack server suspend ${vm_name} True 30 sec
Wait Until Keyword Succeeds 5 min 10 sec Check Field Value
... server ${vm_name} status SUSPENDED
Resume Instance
[Arguments] ${vm_name}
[Documentation] Resume the corresponding VM
Run OS Command openstack server resume ${vm_name} True 30 sec
Wait Until Keyword Succeeds 5 min 10 sec Check Field Value
... server ${vm_name} status ACTIVE
Set Error State Instance
[Arguments] ${vm_name} ${value}
[Documentation] Set 'Error' value to the corresponding VM
Run OS Command openstack server set --state ${value} ${vm_name}
... True 30 sec
Wait Until Keyword Succeeds 5 min 10 sec Check Field Value
... server ${vm_name} status ERROR
Set Active State Instance
[Arguments] ${vm_name} ${value}
[Documentation] Set 'Active' value to the corresponding VM
Run OS Command openstack server set --state ${value} ${vm_name}
... True 30 sec
Wait Until Keyword Succeeds 5 min 10 sec Check Field Value
... server ${vm_name} status ACTIVE
Pause Instance
[Arguments] ${vm_name}
[Documentation] Pause an instance.
Run OS Command openstack server pause ${vm_name} True 30 sec
Wait Until Keyword Succeeds 5 min 10 sec Check Field Value
... server ${vm_name} status PAUSED
Unpause Instance
[Arguments] ${vm_name}
[Documentation] Unpause an instance.
Run OS Command openstack server unpause ${vm_name} True 30 sec
Wait Until Keyword Succeeds 5 min 10 sec Check Field Value
... server ${vm_name} status ACTIVE
Stop Instance
[Arguments] ${vm_name}
[Documentation] Stop an instance.
Run OS Command openstack server stop ${vm_name} True 30 sec
Wait Until Keyword Succeeds 5 min 10 sec Check Field Value
... server ${vm_name} status SHUTOFF
Start Instance
[Arguments] ${vm_name}
[Documentation] Start an instance.
Run OS Command openstack server start ${vm_name} True 30 sec
Wait Until Keyword Succeeds 5 min 10 sec Check Field Value
... server ${vm_name} status ACTIVE
Lock Instance
[Arguments] ${vm_name}
[Documentation] Lock an instance.
Run OS Command openstack server lock ${vm_name} True 30 sec
Unlock Instance
[Arguments] ${vm_name}
[Documentation] Unlock an instance.
Run OS Command openstack server unlock ${vm_name} True 30 sec
Reboot Instance
[Arguments] ${vm_name}
[Documentation] Reboot an instance.
Run OS Command openstack server reboot ${vm_name} True 30 sec
Wait Until Keyword Succeeds 5 min 10 sec Check Field Value
... server ${vm_name} status REBOOT
Wait Until Keyword Succeeds 5 min 10 sec Check Field Value
... server ${vm_name} status ACTIVE
Rebuild Instance
[Arguments] ${vm_name}
[Documentation] Rebuild an instance.
Run OS Command openstack server rebuild ${vm_name} True 30 sec
Wait Until Keyword Succeeds 5 min 10 sec Check Field Value
... server ${vm_name} status REBUILD
Wait Until Keyword Succeeds 5 min 10 sec Check Field Value
... server ${vm_name} status ACTIVE
Rebuild Instance From Volume
[Arguments] ${vm_name} ${image}
[Documentation] Rebuild an instance from volume
Run OS Command openstack server rebuild --image ${image} ${vm_name}
... True 30 sec
Wait Until Keyword Succeeds 5 min 10 sec Check Field Value
... server ${vm_name} status REBUILD
Wait Until Keyword Succeeds 5 min 10 sec Check Field Value
... server ${vm_name} status ACTIVE
Resize Instance
[Arguments] ${vm_name} ${flavor}
[Documentation] Resize an instance.
Run OS Command openstack server resize --flavor ${flavor} ${vm_name}
... True 30 sec
Wait Until Keyword Succeeds 5 min 10 sec Check Field Value
... server ${vm_name} status RESIZE
Wait Until Keyword Succeeds 5 min 10 sec Check Field Value
... server ${vm_name} status VERIFY_RESIZE
Run OS Command openstack server resize --confirm ${vm_name}
... True 30 sec
Wait Until Keyword Succeeds 5 min 10 sec Check Field Value
... server ${vm_name} status ACTIVE
Set Instance Property
[Arguments] ${vm_name} ${key}
[Documentation] Set properties of an instance.
Run OS Command openstack server set ${key} ${vm_name} True
... 30 sec
Unset Instance Property
[Arguments] ${vm_name} ${key}
[Documentation] Unset properties of an instance.
Run OS Command openstack server unset ${key} ${vm_name} True
... 30 sec
Evacuate Instances
[Arguments] ${host}
[Documentation] Evacuate all VMs from computes or from controllers.
${openstack_cmd} Set Variable openstack compute service set
${cmd} Catenate ${openstack_cmd} --disable
... --disable-reason test-evacuate ${host} nova-compute
Run OS Command ${cmd} True 30 sec
Wait Until Keyword Succeeds 5 min 10 sec
... Check Compute Service Property ${host} disabled
Wait Until Keyword Succeeds 5 min 10 sec
... Check Compute Service Property ${host} enabled
Delete Stack
[Arguments] ${stack}
[Documentation] Delete an specific stack.
${openstack_cmd} Set Variable openstack stack delete
${cmd} Catenate ${openstack_cmd} ${stack} -y
Run OS Command ${cmd} True 30 sec
Delete Snapshot
[Arguments] ${snapshot}
[Documentation] Delete an specific snapshot.
${openstack_cmd} Set Variable openstack volume snapshot delete
${cmd} Catenate ${openstack_cmd} ${snapshot}
Run OS Command ${cmd} True 30 sec
Delete Volume
[Arguments] ${volume}
[Documentation] Delete an specific volume.
${openstack_cmd} Set Variable openstack volume delete
${cmd} Catenate ${openstack_cmd} ${volume}
Run OS Command ${cmd} True 30 sec
Delete Flavor
[Arguments] ${flavor}
[Documentation] Delete an specific flavor.
${openstack_cmd} Set Variable openstack flavor delete
${cmd} Catenate ${openstack_cmd} ${flavor}
Run OS Command ${cmd} True 30 sec
Delete Image
[Arguments] ${image}
[Documentation] Delete an specific image.
${openstack_cmd} Set Variable openstack image delete
${cmd} Catenate ${openstack_cmd} ${image}
Run OS Command ${cmd} True 30 sec
Delete Instance
[Arguments] ${vm}
[Documentation] Delete an specific instance.
${openstack_cmd} Set Variable openstack server delete
${cmd} Catenate ${openstack_cmd} ${vm}
Run OS Command ${cmd} True 30 sec
Delete Network
[Arguments] ${network}
[Documentation] Delete an specific network.
${openstack_cmd} Set Variable openstack network delete
${cmd} Catenate ${openstack_cmd} ${network}
Run OS Command ${cmd} True 30 sec
Delete KeyPair
[Arguments] ${keypair}
[Documentation] Delete an specific keypair.
${openstack_cmd} Set Variable openstack keypair delete
${cmd} Catenate ${openstack_cmd} ${keypair}
Run OS Command ${cmd} True 30 sec
Delete All Stacks
[Documentation] Get a list of all existing stacks to delete them one
... by one.
${openstack_cmd} Set Variable openstack stack list
${cmd} Catenate SEPARATOR=| ${openstack_cmd} awk '{print$4}'
... grep -v "Stack"
&{result} Run OS Command ${cmd} True 30 sec
@{stack_list} Convert Response To List ${result}
: FOR ${stack} IN @{stack_list}
\ Delete Stack ${stack}
Delete All Snapshots
[Documentation] Get a list of all existing snapshots to delete them one
... by one.
${openstack_cmd} Set Variable openstack volume snapshot list
${cmd} Catenate SEPARATOR=| ${openstack_cmd} awk '{print$4}'
... grep -v "Name"
&{result} Run OS Command ${cmd} True 30 sec
@{snapshot_list} Convert Response To List ${result}
: FOR ${snapshot} IN @{snapshot_list}
\ Delete Snapshot ${snapshot}
Delete All Volumes
[Documentation] Get a list of all existing volumes to delete them one
... by one.
${openstack_cmd} Set Variable openstack volume list
${cmd} Catenate SEPARATOR=| ${openstack_cmd} awk '{print$2}'
... grep -v "ID"
&{result} Run OS Command ${cmd} True 30 sec
@{volume_list} Convert Response To List ${result}
: FOR ${volume} IN @{volume_list}
\ Delete Volume ${volume}
Delete All Flavors
[Documentation] Get a list of all existing flavors to delete them one
... by one.
${openstack_cmd} Set Variable openstack flavor list
${cmd} Catenate SEPARATOR=| ${openstack_cmd} awk '{print$4}'
... grep -v "Name" grep -v "m1"
&{result} Run OS Command ${cmd} True
@{flavor_list} Convert Response To List ${result}
: FOR ${flavor} IN @{flavor_list}
\ Delete Flavor ${flavor}
Delete All Images
[Documentation] Get a list of all existing images to delete them one
... by one.
${openstack_cmd} Set Variable openstack image list
${cmd} Catenate SEPARATOR=| ${openstack_cmd} awk '{print$4}'
... grep -v "Name" grep -v "Cirros"
&{result} Run OS Command ${cmd} True
@{image_list} Convert Response To List ${result}
: FOR ${image} IN @{image_list}
\ Delete Image ${image}
Delete All Instances
[Documentation] Get a list of all existing instances to delete them one
... by one.
${openstack_cmd} Set Variable openstack server list
${cmd} Catenate SEPARATOR=| ${openstack_cmd} awk '{print$4}'
... grep -v "Name"
&{result} Run OS Command ${cmd} True
@{vm_list} Convert Response To List ${result}
: FOR ${vm} IN @{vm_list}
\ Delete Instance ${vm}
Delete All Networks
[Documentation] Get a list of all existing networks to delete them one
... by one.
${openstack_cmd} Set Variable openstack network list
${cmd} Catenate SEPARATOR=| ${openstack_cmd} awk '{print$4}'
... grep -v "Name" grep -v "private" grep -v "public"
... grep -v "external" grep -v "internal"
&{result} Run OS Command ${cmd} True
@{network_list} Convert Response To List ${result}
: FOR ${network} IN @{network_list}
\ Delete Network ${network}
Delete All KeyPairs
[Documentation] Get a list of all existing keypais to delete them one
... by one.
${openstack_cmd} Set Variable openstack keypair list
${cmd} Catenate SEPARATOR=| ${openstack_cmd} awk '{print$2}'
... grep -v "Name"
&{result} Run OS Command ${cmd} True
@{key_list} Convert Response To List ${result}
: FOR ${key} IN @{key_list}
\ Delete KeyPair ${key}
Openstack Cleanup All
[Documentation] Delete all instances, images, flavors, networks and
... keypairs generated during tests.
Delete All Instances
Delete All Images
Delete All Flavors
Delete All Networks
Delete All KeyPairs
Run Keyword And Ignore Error Delete All Volumes
Delete All Snapshots
Delete All Volumes
Delete All Stacks

View File

@ -0,0 +1,598 @@
*** Settings ***
Documentation Library with keywords to be used during provisioning of
... StarlingX deployment.
... Author(s):
... - Jose Perez Carranza <jose.perez.carranza@intel.com>
... - Juan Carlos Alonso <juan.carlos.alonso@intel.com>
Variables Variables/Global.py
Variables Variables/config_init.py Config
... %{PYTHONPATH}/Config/config.ini
Variables ${ENV_YAML}
Resource Resources/Utils.robot
Resource Resources/OpenStack.robot
Resource Resources/HostManagement.robot
Library Libraries/common.py
Library Collections
Library String
*** Keywords ***
Set NTP Server
[Documentation] Setup the NTP servers for the system.
${ntp_servers} Set Variable 0.pool.ntp.org,1.pool.ntp.org
Run Command system ntp-modify ntpservers=${ntp_servers}
Configure Data Interfaces
[Arguments] ${host} ${data0if} ${data1if} ${physnet0}
... ${physnet1} ${mtu}
[Documentation] Configure data interfaces with proper values.
# - Configuring OAM Network and Cluster Host Interface for controller-1
Run Keyword IF '${host}'=='controller-1' Run Keywords
... Provide OAM Network Interface ${host}
... AND
... Setup Cluster Host Interfaces ${host}
${iface_info} Get Interface Information ${host} ${data0if}
${data0portuuid} Get From List ${iface_info} 0
${data0portname} Get From List ${iface_info} 1
${data0pciaddr} Get From List ${iface_info} 2
${iface_info} Get Interface Information ${host} ${data1if}
${data1portuuid} Get From List ${iface_info} 0
${data1portname} Get From List ${iface_info} 1
${data1pciaddr} Get From List ${iface_info} 2
${data0ifuuid} Get Interface UUID ${host} ${data0portname}
${data1ifuuid} Get Interface UUID ${host} ${data1portname}
# - Configure the datanetworks in sysinv, prior to referencing it in
# - the 'system host-if-modify' command
Run Keyword If '${host}'=='controller-0' or '${host}'=='compute-0'
... Run Keywords
... Run Command system datanetwork-add ${physnet0} vlan True
... AND
... Run Command system datanetwork-add ${physnet1} vlan True
Add Interface To Data Network ${mtu} data0 ${physnet0}
... ${host} ${data0ifuuid}
Add Interface To Data Network ${mtu} data1 ${physnet1}
... ${host} ${data1ifuuid}
Enable Containerized Services
[Arguments] ${host}
[Documentation] apply all the node labels for each controller
... and compute functions.
${is_controller} Evaluate "controller" in """${host}"""
Run Keyword If ${is_controller}==True Run Command
... system host-label-assign ${host} openstack-control-plane=enabled
... True
Run Keyword If
... '${CONFIGURATION_TYPE}'=='Simplex' or '${CONFIGURATION_TYPE}'=='Duplex' or ${is_controller}==False
... Run Keywords Run Command
... system host-label-assign ${host} openstack-compute-node=enabled
... True
... AND
... Run Command system host-label-assign ${host} openvswitch=enabled
... True
... AND
... Run Command system host-label-assign ${host} sriov=enabled
... True
Setup Partitions
[Arguments] ${host} ${lgv_name} ${nova_size} ${cgts_part_size}
... ${setup_cgts}=False
[Documentation] Setup required partition on specified host.
${root_disk_device} Get Root Disk Device ${host}
${root_disk_uuid} Get Disk List UID ${host} ${root_disk_device}
${part_status} Set Variable If
... '${host}'=='controller-0' Ready Creating
${is_controller} Evaluate "controller" in """${host}"""
# --- Configuring nova-local ---
${nova_size} Run Keyword If ${nova_size}==True
... Calcultae Nova Partition Size For Computes ${host}
... ${root_disk_uuid} ${cgts_part_size}
... ELSE
... Set Variable ${nova_size}
${nova_partition_uuid} Add Disk Partition ${host}
... ${root_disk_uuid} ${nova_size} ${part_status}
Add Local Volume Group ${host} ${lgv_name}
Add Physical Volume ${host} ${lgv_name} ${nova_partition_uuid}
Run Keyword If ${is_controller}==False Modify LVG Attributes
... -b image ${host} ${lgv_name}
# --- Extending cgts-vg ---
${cgts_partition_uuid} Run Keyword If ${is_controller}==True or ${setup_cgts}==True
... Add Disk Partition ${host}
... ${root_disk_uuid} ${cgts_part_size} ${part_status}
Run Keyword If ${is_controller}==True or ${setup_cgts}==True
... Add Physical Volume ${host} ${lgv_name}
... ${cgts_partition_uuid}
Configure Ceph
[Arguments] ${host} ${backend_type}
[Documentation] Enable CEPH partition on the specified node
Add ODS To Tier ${host}
Unlock Master Controller
[Arguments] ${controller}
[Documentation] Verify that controller with ACTIVE ssh connection
... is unlocked and reestablish the ssh connection on the suite.
${error_expected} Set Variable *Socket is closed*
Unlock Controller ${controller}
Wait Until Keyword Fails 20 min 20 sec ${error_expected}
... Run Command whoami
Close Connection
Wait Until Keyword Succeeds 15 min 30 sec
... Open Master Controller Connection
Wait Until Keyword Succeeds 15 min 30 sec
... Check Controller Is Unlocked ${controller}
Check Host Readiness ${controller}
Unlock Second Controller
[Arguments] ${controller}
[Documentation] Verify second controller is unlocked.
Unlock Controller ${controller}
Wait Until Keyword Succeeds 40 min 30 sec
... Check Controller Is Unlocked ${controller}
# - Generate a new secondary connection due lost of comunication
Wait Until Keyword Succeeds 50 min 20 sec Check Property Value
... ${controller} availability available
Generate Secondary Controller Connection ${controller}
Set Ceph Pool Replication
[Documentation] Set Ceph pool replication to get HEALTH_OK
Run Command ceph osd pool ls | xargs -i ceph osd pool set {} size 1
... True
Check Ceph Status
[Arguments] ${status_1}=HEALTH_OK ${status_2}=HEALTH_WARN
[Documentation] Verifies the status of the CEPH feature.
${result} Run Command ceph -s
Should Contain Any ${result.stdout} ${status_1} ${status_2}
Run Keyword If '${CONFIGURATION_TYPE}'!='Simplex'
... Run Command ceph osd tree True
Bring Up Services
[Arguments] ${application}
[Documentation] Use sysinv to apply the application.
System Application Apply ${application}
Wait Until Keyword Succeeds 60 min 5 sec
... Check System Application Status ${application} applied
Set Cluster Endpoints
[Arguments] ${clouds_yml}
[Documentation] Set and verify the cluster endpoints.
Execute Sudo Command mkdir -p /etc/openstack
Execute Sudo Command mv ${clouds_yml} /etc/openstack/.
Provider Network Setup
[Arguments] ${physnet0} ${physnet1}
[Documentation] Create the network segment ranges.
&{output} Run OS Command
... openstack project list | grep admin | awk '{print $2}' True
${adminid} Get From Dictionary ${output} stdout
${openstack_cmd} Set Variable openstack network segment range create
${cmd} Catenate ${openstack_cmd} ${physnet0}-a
... --network-type vlan --physical-network ${physnet0}
... --minimum 400 --maximum 499 --private --project ${adminid}
Run OS Command ${cmd} True
${cmd} Catenate ${openstack_cmd} ${physnet0}-b
... --network-type vlan --physical-network ${physnet0}
... --minimum 10 --maximum 10
Run OS Command ${cmd} True
${cmd} Catenate ${openstack_cmd} ${physnet1}-a
... --network-type vlan --physical-network ${physnet1}
... --minimum 500 --maximum 599 --private --project ${adminid}
Run OS Command ${cmd} True
Tenant Networking Setup
[Arguments] ${physnet0} ${physnet1} ${externalnet} ${publicnet}
... ${privatenet} ${internalnet} ${publicsubnet}
... ${privatesubnet} ${internalsubnet} ${externalsubnet}
... ${publicrouter} ${privaterouter}
[Documentation] Setup tenant networking
&{output} Run OS Command
... openstack project list | grep admin | awk '{print $2}' True
${adminid} Get From Dictionary ${output} stdout
${openstack_cmd} Set Variable openstack network create
${cmd} Catenate ${openstack_cmd} --project ${adminid}
... --provider-network-type=vlan
... --provider-physical-network=${physnet0} --provider-segment=10
... --share --external ${externalnet}
Run OS Command ${cmd} True
${cmd} Catenate ${openstack_cmd} --project ${adminid}
... --provider-network-type=vlan
... --provider-physical-network=${physnet0} --provider-segment=400
... ${publicnet}
Run OS Command ${cmd} True
${cmd} Catenate ${openstack_cmd} --project ${adminid}
... --provider-network-type=vlan
... --provider-physical-network=${physnet1} --provider-segment=500
... ${privatenet}
Run OS Command ${cmd} True
${cmd} Catenate ${openstack_cmd} --project ${adminid}
... ${internalnet}
Run OS Command ${cmd} True
&{output} Run OS Command
... openstack network list | grep ${publicnet} | awk '{print $2}'
${publicnetid} Get From Dictionary ${output} stdout
&{output} Run OS Command
... openstack network list | grep ${privatenet} | awk '{print $2}'
${privatenetid} Get From Dictionary ${output} stdout
&{output} Run OS Command
... openstack network list | grep ${internalnet} | awk '{print $2}'
${internalnetid} Get From Dictionary ${output} stdout
&{output} Run OS Command
... openstack network list | grep ${externalnet} | awk '{print $2}'
${externalnetid} Get From Dictionary ${output} stdout
${openstack_cmd} Set Variable openstack subnet create
${cmd} Catenate ${openstack_cmd} --project ${adminid}
... ${publicsubnet} --network ${publicnet}
... --subnet-range 192.168.101.0/24
Run OS Command ${cmd} True
${cmd} Catenate ${openstack_cmd} --project ${adminid}
... ${privatesubnet} --network ${privatenet}
... --subnet-range 192.168.201.0/24
Run OS Command ${cmd} True
${cmd} Catenate ${openstack_cmd} --project ${adminid}
... ${internalsubnet} --gateway none --network ${internalnet}
... --subnet-range 10.1.1.0/24
Run OS Command ${cmd} True
${cmd} Catenate ${openstack_cmd} --project ${adminid}
... ${externalsubnet} --gateway 192.168.1.1 --no-dhcp
... --network ${externalnet} --subnet-range 192.168.1.0/24
... --ip-version 4
Run OS Command ${cmd} True
Run OS Command openstack router create ${publicrouter} True
Run OS Command openstack router create ${privaterouter} True
&{output} Run OS Command
... openstack router list | grep ${privaterouter} | awk '{print $2}'
${privaterouterid} Get From Dictionary ${output} stdout
&{output} Run OS Command
... openstack router list | grep ${publicrouter} | awk '{print $2}'
${publicrouterid} Get From Dictionary ${output} stdout
${cmd} Catenate openstack router set ${publicrouterid}
... --external-gateway ${externalnetid} --disable-snat
Run OS Command ${cmd} True
${cmd} Catenate openstack router set ${privaterouterid}
... --external-gateway ${externalnetid} --disable-snat
Run OS Command ${cmd} True
Run OS Command
... openstack router add subnet ${publicrouter} ${publicsubnet}
... True
Run OS Command
... openstack router add subnet ${privaterouter} ${privatesubnet}
... True
Install Remaining Nodes Virtual
[Documentation] Install rest of the nodes according to the configuration
... selected.
Start Nodes Virtual
Second Controller Installation Virtual ${second_controller}
${vm_computes} Get Compute List To Install Virtual
${vm_storages} Get Storage List To Install Virtual
Run Keyword If '${CONFIGURATION_TYPE}'!='Duplex' Run Keywords
... Install Compute Nodes Virtual ${vm_computes}
... AND
... Run Keyword If '${CONFIGURATION_TYPE}'=='MN-External'
... Install Storage Nodes Virtual ${vm_storages}
Second Controller Installation Virtual
[Arguments] ${controller}
[Documentation] Validates that second controller is installed correctly.
... for virtual environments.
${mac_address} Get Qemu VM MAC Address ${controller} stxbr2
# Workaround for launchpad 1822657
Run Command
... system host-add -n ${controller} -p controller -m ${mac_address}
${listed} Wait Until Keyword Succeeds 2 min 5 sec
... Run Command system host-show ${controller}
# Try To Add it again, if not possible fail
Run Keyword If ${listed.rc}!=0 Run Command
... system host-add -n ${controller} -p controller -m ${mac_address}
... True
Wait Until Keyword Succeeds 50 min 20 sec Check Property Value
... ${second_controller} install_state completed
Get Compute List To Install Virtual
[Documentation] Get a list of computes that will be installed, in this
... case are the name of the VMs created on the host machine.
${cmd} Catenate virsh -c qemu:///system list --all |
... awk '/compute/{print $2}'
${result} Run ${cmd}
@{computes} Split String ${result}
[Return] @{computes}
Get Storage List To Install Virtual
[Documentation] Get a list of storage nodes that will be installed, in
... this case are the name of the VMs created on the host machine.
${cmd} Catenate virsh -c qemu:///system list --all |
... awk '/storage/{print $2}'
${result} Run ${cmd}
@{storages} Split String ${result}
[Return] @{storages}
Install Compute Nodes Virtual
[Arguments] ${computes}
[Documentation] Install the compute nodes of the system with given
... computes list.
Set Test Variable ${counter} ${0}
: FOR ${vm} IN @{computes}
\ ${mac_address} Get Qemu VM MAC Address ${vm} stxbr2
\ Run Command
... system host-add -n compute-${counter} -p worker -m ${mac_address}
\ ${listed} Wait Until Keyword Succeeds 2 min 5 sec
... Run Command system host-show compute-${counter}
\ Run Keyword If ${listed.rc}!=0 Run Command
... system host-add -n compute-${counter} -p worker -m ${mac_address}
... True
\ ${counter} Set Variable ${counter+1}
# The reason for a second loop is to add all computes on the first loop
# and verify its installation on the second loop, hence a lot of time is
# save on the test.
Set Test Variable ${counter} ${0}
: FOR ${vm} IN @{computes}
\ Wait Until Keyword Succeeds 20 min 20 sec Check Property Value
... compute-${counter} install_state completed
\ ${counter} Set Variable ${counter+1}
Install Storage Nodes Virtual
[Arguments] ${storages}
[Documentation] Install the compute nodes of the system with given
... storage nodes list.
Set Test Variable ${counter} ${0}
: FOR ${vm} IN @{storages}
\ ${mac_address} Get Qemu VM MAC Address ${vm} stxbr2
\ Run Command
... system host-add -n storage-${counter} -p storage -m ${mac_address}
\ ${listed} Wait Until Keyword Succeeds 2 min 5 sec
... Run Command system host-show storage-${counter}
\ Run Keyword If ${listed.rc}!=0 Run Command
... system host-add -n storage-${counter} -p storage -m ${mac_address}
... True
\ ${counter} Set Variable ${counter+1}
# The reason for a second loop is to add all computes on the first loop
# and verify its installation on the second loop, hence a lot of time is
# save on the test.
Set Test Variable ${counter} ${0}
: FOR ${vm} IN @{storages}
\ Wait Until Keyword Succeeds 20 min 20 sec Check Property Value
... storage-${counter} install_state completed
\ ${counter} Set Variable ${counter+1}
Install Remaining Nodes Baremetal
[Documentation] Install rest of the nodes according to the info on the
... installation yaml file.
@{nodes_list} Get List Of Installation Nodes
# -- Turn On Nodes
: FOR ${node} IN @{nodes_list}
\ &{node_data} Set Variable &{NODES}[${node}]
\ ${bmc_ip} Set Variable &{node_data}[bmc_ip]
\ ${bmc_user} Set Variable &{node_data}[bmc_user]
\ ${pswd} Set Variable &{node_data}[bmc_pswd]
\ ${set_pxe_boot_device} Catenate ipmitool -H ${bmc_ip}
... -U ${bmc_user} -P ${pswd}
... -I lanplus chassis bootparam set bootflag force_pxe
\ ${turn_on_node} Catenate ipmitool -H ${bmc_ip} -U ${bmc_user}
... -P ${pswd} -I lanplus chassis power on
\ Run ${set_pxe_boot_device}
\ Run ${turn_on_node}
# -- Start installation of nodes
: FOR ${node} IN @{nodes_list}
\ &{node_data} Set Variable &{NODES}[${node}]
\ ${name} Set Variable &{node_data}[name]
\ ${personality} Set Variable &{node_data}[personality]
\ ${mac_address} Set Variable &{node_data}[pxe_nic_mac]
\ Run Command
... system host-add -n ${name} -p ${personality} -m ${mac_address}
\ ${listed} Wait Until Keyword Succeeds 2 min 5 sec
... Run Command system host-show ${name}
\ Run Keyword If ${listed.rc}!=0 Run Command
... system host-add -n ${name} -p ${personality} -m ${mac_address}
... True
# -- Monitor installation of nodes
: FOR ${node} IN @{nodes_list}
\ &{node_data} Set Variable &{NODES}[${node}]
\ ${name} Set Variable &{node_data}[name]
\ Wait Until Keyword Succeeds 20 min 5 sec Check Property Value
\ ... ${name} install_state completed
Get List Of Installation Nodes
[Documentation] Return a list of nodes candidate to be installed,
... controller-0 is removed by default
# NODES variable is actually the YAML file imported on variables
${nodes_list} Get Dictionary Keys ${NODES}
Remove Values From List ${nodes_list} controller-0
[Return] @{nodes_list}
Assign Data Interfaces
[Documentation] Set variables for Data interfaces according to the
... configuration selected
@{data_interfaces} Run Keyword IF '${ENVIRONMENT}'=='virtual'
... Create List eth1000 eth1001
... ELSE
... Create List enp24s0f0 enp24s0f1
${data0if} Get From List ${data_interfaces} 0
${data1if} Get From List ${data_interfaces} 1
Set Suite Variable ${data0if}
Set Suite Variable ${data1if}
Turn Off Installation Nodes
[Documentation] Turn off all the nodes that are candidate to be
... installed to avoid them to be booted with already installed
... StarlingX deployment.
@{nodes_list} Get List Of Installation Nodes
: FOR ${node} IN @{nodes_list}
\ &{node_data} Set Variable &{NODES}[${node}]
\ ${bmc_ip} Set Variable &{node_data}[bmc_ip]
\ ${bmc_user} Set Variable &{node_data}[bmc_user]
\ ${pswd} Set Variable &{node_data}[bmc_pswd]
\ ${turn_off_node} Catenate ipmitool -H ${bmc_ip} -U ${bmc_user}
... -P ${pswd} -I lanplus chassis power off
\ Run ${turn_off_node}
Check Host Readiness
[Arguments] ${host} ${wait_time}=5
[Documentation] Verify that host is unlocked, enabled and available.
Wait Until Keyword Succeeds 40 min 5 sec Check Property Value
... ${host} administrative unlocked
Wait Until Keyword Succeeds 20 min 5 sec Check Property Value
... ${host} operational enabled
# Validate that host does not fall a degraded mode after was
# available for some time.
Run Keyword And Ignore Error Check If Host Is In Degraded Mode
... ${host} ${wait_time}
Wait Until Keyword Succeeds 60 min 5 sec Check Property Value
... ${host} availability available
Provide OAM Network Interface
[Arguments] ${controller}
[Documentation] Enables the OAM interface for second controller.
${net_type} Set Variable oam
${class} Set Variable platform
${oam_if} Set Variable ${Config.logical_interface.OAM}
Modify Host Interface ${net_type} ${class} ${controller}
... ${oam_if}
Configure OAM Interface
[Arguments] ${controller}
[Documentation] Enables the OAM interface for master controller.
${oam_if} Set Variable ${Config.logical_interface.OAM}
Run Keyword If '${CONFIGURATION_TYPE}'!='Simplex'
... Remove LO Interfaces ${controller}
${system_cmd} Catenate system host-if-modify ${controller}
... ${oam_if} -c platform
Run Command ${system_cmd}
Run Command system interface-network-assign ${controller} ${oam_if} oam
Remove LO Interfaces
[Arguments] ${controller}
[Documentation] Remove LO interfaces.
Run Command system host-if-modify ${controller} lo -c none
${system_cmd} Catenate SEPARATOR=|
... system interface-network-list ${controller} grep lo
... awk '{print $4}'
${result} Run Command ${system_cmd}
${ifnet_uuids} Convert Response To List ${result}
: FOR ${uuid} IN @{ifnet_uuids}
\ Run Command system interface-network-remove ${uuid}
Configure MGMT Interface
[Arguments] ${controller}
[Documentation] Enables the MGMT interface for master controller.
${mgmt_if} Set Variable ${Config.logical_interface.MGMT}
${system_cmd} Catenate system host-if-modify ${controller}
... ${mgmt_if} -c platform
Run Command ${system_cmd}
${system_cmd} Catenate system host-if-modify ${controller}
... ${mgmt_if} -c platform
Run Command ${system_cmd}
Run Command
... system interface-network-assign ${controller} ${mgmt_if} mgmt
Run Command
... system interface-network-assign ${controller} ${mgmt_if} cluster-host
Setup Cluster Host Interfaces
[Arguments] ${host}
[Documentation] Setup mgmt network as a cluster-host network interface.
Run Command system host-if-modify ${host} mgmt0 True
Run Command system interface-network-assign ${host} mgmt0 cluster-host
Add Ceph Monitor
[Arguments] ${host}
[Documentation] Enable CEPH monitor to the specified host.
Run Command system ceph-mon-add ${host} True
#${mon_uid} Get Ceph Monitor UID ${host}
Wait Until Keyword Succeeds 30 min 10 sec
... Check Property Value Of Command
... system ceph-mon-show ${host} state configured
Add ODS To Tier
[Arguments] ${host}
[Documentation] Enable the ODS on the specified node.
${device} Set Variable /dev/sdb
${tier_name} Set Variable ceph_cluster
${tier_opt} Set Variable ${SPACE}
${cmd} Catenate SEPARATOR=| system host-disk-list ${host}
... grep ${device} awk '{print $2}'
${result} Run Command ${cmd} True
${tier_uuid} Run Keyword If '${host}'=='controller-1'
... Get Tier UUID ${tier_name}
${tier_opt} Set Variable If
... '${host}'=='controller-1' --tier-uuid ${tier_uuid} ${EMPTY}
Run Command
... system host-stor-add ${host} ${result.stdout.strip()} ${tier_opt}
... True
Setup Cluster Host Interfaces Storage Node
[Arguments] ${host}
[Documentation] Setup mgmt network as a cluster-host network interface
... on storage nodes.
${if_uuid} Get Interface UUID ${host} mgmt0
Run Command
... system interface-network-assign ${host} ${if_uuid} cluster-host
... True
Add Storage OSD
[Arguments] ${storage} ${device}
[Documentation] Enables the storage nodes as Object Storage Device (OSD)
${tier_name} Set Variable ceph_cluster
${uid} Get Disk List UID ${storage} ${device}
Run Command system host-stor-add ${storage} ${uid} True 60
Label Remote Storage
[Arguments] ${host}
[Documentation] Enable remote storage for root/ephemeral/swap disks in
... standard storage configurations by labeling the worker nodes.
Run Command system host-label-assign ${host} remote-storage=enabled
... True
Add Interface To Data Network
[Arguments] ${mtu} ${if_name} ${datanetwork} ${host} ${uuid}
[Documentation] Adds an interface to the specified data network.
${option} Set Variable If
... '${host}'=='controler-0' -d -p
${cmd} Catenate system host-if-modify -m ${mtu} -n ${if_name}
... -c data ${host} ${uuid}
Run Command ${cmd} True
Run Command
... system interface-datanetwork-assign ${host} ${uuid} ${datanetwork}
... True
Calcultae Nova Partition Size For Computes
[Arguments] ${host} ${uid} ${cgs_size}
[Documentation] Return a calculated value for nova according to the
... available space.
${disk_space} Get Property Value Of Command
... system host-disk-show ${host} ${uid} available_gib
${disk_space} Fetch From Left ${disk_space} .
${nova_space} Evaluate ${disk_space}-${cgs_size}
[Return] ${nova_space}
Add Disk Partition
[Arguments] ${host} ${uid} ${size} ${status}
[Documentation] Add a partition for specified disk on the specified host
${result} Run Command
... system host-disk-partition-add ${host} ${uid} ${size} -t lvm_phys_vol
... True
${new_uid} Get Property From Result ${result} uuid
Wait Until Keyword Succeeds 30 min 10 sec
... Check Property Value Of Command
... system host-disk-partition-show ${host} ${new_uid} status
... ${status}
[Return] ${new_uid}
Add Local Volume Group
[Arguments] ${host} ${lvg_name}
[Documentation] Adds a local volume group according to given options.
Run Command system host-lvg-add ${host} ${lvg_name} True
Add Physical Volume
[Arguments] ${host} ${lvg name} ${uid}
[Documentation] Adds a physical volume to the specified host.
Run Command system host-pv-add ${host} ${lvg name} ${uid} True
Modify LVG Attributes
[Arguments] ${options} ${host} ${lvg name}
[Documentation] Modify the attributes of a Local Volume Group.
Run Command system host-lvg-modify ${options} ${host} ${lvg name}
Configure Vswitch Type
[Documentation] Deploy OVS-DPDK supported only on baremetal hardware
Run Command system modify --vswitch_type ovs-dpdk
Run Command system host-cpu-modify -f vswitch -p0 1 controller-0

View File

@ -0,0 +1,79 @@
*** Settings ***
Documentation This file contains Keywords to execute Stress test.
... Author(s):
... - Jose Perez Carranza <jose.perez.carranza@intel.com>
... - Juan Carlos Alonso <juan.carlos.alonso@intel.com>
Library Collections
Library SSHLibrary
Library String
Resource Resources/Utils.robot
Variables Variables/Global.py
*** Keywords ***
Stress Suspend Resume Instance
[Arguments] ${vm}
[Documentation] Perform a VM suspend/resume to a VM 10 times.
: FOR ${i} IN RANGE 1 11
\ Suspend Instance ${vm}
\ Resume Instance ${vm}
Stress Set Error Active Instance
[Arguments] ${vm}
[Documentation] Set 'Error' and 'Active' flags to VM 10 times.
: FOR ${i} IN RANGE 1 11
\ Set Error State Instance ${vm} error
\ Set Active State Instance ${vm} active
Stress Pause Unpause Instance
[Arguments] ${vm}
[Documentation] Perform pause/unpause to a VM 10 times.
: FOR ${i} IN RANGE 1 11
\ Pause Instance ${vm}
\ Unpause Instance ${vm}
Stress Stop Start Instance
[Arguments] ${vm}
[Documentation] Perform stop/start to a VM 10 times.
: FOR ${i} IN RANGE 1 11
\ Stop Instance ${vm}
\ Start Instance ${vm}
Stress Lock Unlock Instance
[Arguments] ${vm}
[Documentation] Perform lock/unlock to a VM 10 times.
: FOR ${i} IN RANGE 1 11
\ Lock Instance ${vm}
\ Unlock Instance ${vm}
Stress Reboot Instance
[Arguments] ${vm}
[Documentation] Perfrom a reboot to a VM 10 times.
: FOR ${i} IN RANGE 1 11
\ Reboot Instance ${vm}
Stress Rebuild Instance
[Arguments] ${vm}
[Documentation] Perform a rebuild to a VM 10 times.
: FOR ${i} IN RANGE 1 11
\ Rebuild Instance ${vm}
Stress Rebuild Instance From Volume
[Arguments] ${vm} ${image}
[Documentation] Perform a rebuild from a volume to a VM 10 times.
: FOR ${i} IN RANGE 1 11
\ Rebuild Instance From Volume ${vm} ${image}
Stress Resize Instance
[Arguments] ${vm} ${flavor_1} ${flavor_2}
[Documentation] Perform a resize to a VM 10 times.
: FOR ${i} IN RANGE 1 11
\ Resize Instance ${vm} ${flavor_2}
\ Resize Instance ${vm} ${flavor_1}
Stress Set Property Instance
[Arguments] ${vm} ${properties_1} ${properties_2}
[Documentation] Set/unset properties to a VM 10 times.
: FOR ${i} IN RANGE 1 11
\ Set Instance Property ${vm} ${properties_1}
\ Unset Instance Property ${vm} ${properties_2}

View File

@ -0,0 +1,533 @@
*** Settings ***
Documentation General Utils library. This library has broad scope, it can
... be used by any robot system tests.
...
... This program and the accompanying materials are made available under the
... terms of the Eclipse Public License v1.0 which accompanies this distribution,
... and is available at http://www.eclipse.org/legal/epl-v10.html
...
... Contributor(s):
... - Jose Perez Carranza <jose.perez.carranza@intel.com>
... - Juan Carlos Alonso <juan.carlos.alonso@intel.com>
Library SSHLibrary
Library Collections
Library OperatingSystem
Library Libraries/common.py
Library String
Variables Variables/Global.py
Variables Variables/config_init.py Config
... %{PYTHONPATH}/Config/config.ini
*** Keywords ***
Set Env Vars From Openrc
[Documentation] Sources the openrc form /etc/nova/ to have the admin
... variables exported on the controller.
Run Command source /etc/platform/openrc True
Run Command On Remote System
[Arguments] ${system_ip} ${cmd} ${user} ${password}
... ${prompt} ${prompt_timeout}=30 ${return_stdout}=True
... ${return_stderr}=False
[Documentation] Reduces the common work of running a command on a remote
... system to a single higher level robot keyword, taking care to log in
... with a public key and. The command given is written and the return
... value depends on the passed argument values of return_stdout
... (default: True) and return_stderr (default: False).
... At least one should be True, or the keyword will exit and FAIL.
... If both are True, the resulting return value will be a two element
... list containing both. Otherwise the resulting return value is a
... string.
... No test conditions are checked.
Run Keyword If "${return_stdout}"!="True" and "${return_stderr}"!="True"
... Fail At least one of {return_stdout} or {return_stderr} args
... should be set to True
${current_ssh_connection} SSHLibrary.Get Connection
BuiltIn.Log
... Attempting to execute command "${cmd}" on remote system
... "${system_ip}" by user "${user}" with prompt "${prompt}" and
... password "${password}"
${conn_id} SSHLibrary.Open Connection ${system_ip}
... prompt=${prompt} timeout=${prompt_timeout}
Flexible SSH Login ${user} ${password}
${stdout} ${stderr} SSHLibrary.Execute Command ${cmd}
... return_stderr=True
SSHLibrary.Close Connection
Log ${stderr}
Run Keyword If "${return_stdout}"!="True" Return From Keyword
... ${stderr}
Run Keyword If "${return_stderr}"!="True" Return From Keyword
... ${stdout}
[Return] ${stdout} ${stderr}
Run Command
[Arguments] ${cmd} ${fail_if_error}=False ${timeout}=${TIMEOUT+170}
... ${prompt}=$
[Documentation] Execute a command on controller over ssh connection
... keeping environment visible to the subsequent keywords.Also allows
... the keyword to fail if there is an error, by default this keyword
... will not fail and will return the stderr.
Set Client Configuration timeout=${timeout} prompt=${prompt}
&{result} Create Empty Result Dictionary
Read
Write ${cmd}
${output} Read Until Prompt
${output} Remove Prompt Line ${output}
${rc} Get Return Code
Run Keyword If ${rc} == 0 Set To Dictionary ${result} stdout=${output.strip()}
... ELSE IF ${fail_if_error} == True FAIL ${output}
... ELSE Set To Dictionary ${result} stderr=${output}
Set To Dictionary ${result} rc=${rc}
Log Dictionary ${result}
[Return] ${result}
Execute Sudo Command
[Arguments] ${cmd} ${timeout}=${TIMEOUT+50}
[Documentation] Execute a sudo on controller over ssh connection keeping
... environment visible to the subsequent keywords that will ask for
... password everytime it is run. It is recommended to run sudo commands
... manually using -k option (sudo -k) to find if password is required.
... If password is not required after verify the command manually,
... please use Run Command.
Set Client Configuration timeout=${timeout} prompt=:
Read
Write sudo -k ${cmd}
${output} Read Until Prompt
${output} Run Command ${CLI_USER_PASS}
[Return] ${output}
Stx Suite Setup
[Documentation] Wrapper to setup the environment needed for exercise
... StarlingX features
Open Master Controller Connection
Set Env Vars From Openrc
Stx Suite TearDown
[Documentation] Wrapper to clean up activities on the suite.
Close All Connections
Get Qemu VM MAC Address
[Arguments] ${qemu_vm_name} ${source}
[Documentation] Returns the MAC address of specific source form a
... qemu VM
${qemu_cmd} Catenate SEPARATOR=|
... virsh -c qemu:///system domiflist ${qemu_vm_name} grep ${source}
... awk '{print $5}'
${mac_Adress} Run ${qemu_cmd}
[Return] ${mac_Adress}
Get Return Code
[Documentation] Wrapper to return the code number of last executed
... command
Write echo $?
${rc} Read Until Regexp [0-9]+
Log ${rc}
[Return] ${rc}
Get Compute Nodes
[Documentation] Get the compute nodes and return them in a list.
${system_cmd} Catenate SEPARATOR=| system host-list --nowrap
... grep compute- cut -d '|' -f 3
&{result} Run Command ${system_cmd} True
@{list} Convert Response To List ${result}
[Return] @{list}
Get Storage Nodes
[Documentation] Get the storage nodes and return them in a list.
${system_cmd} Catenate SEPARATOR=| system host-list --nowrap
... grep storage- cut -d '|' -f 3
&{result} Run Command ${system_cmd} True
@{list} Convert Response To List ${result}
[Return] @{list}
Get Compute Interfaces Names
[Arguments] ${host} ${pattern}=[A-Z]
[Documentation] Get all the interfaces names of the given pattern, if
... no pattern is given all the interfaces are retireved in the list.
${system_cmd} Catenate SEPARATOR=|
... system host-if-list --nowrap -a ${host} grep ${pattern}
... cut -d '|' -f 3
&{result} Run Command ${system_cmd} True
@{list} Convert Response To List ${result}
[Return] @{list}
Get Disk List UID
[Arguments] ${host} ${device_node}
[Documentation] Returns the UID of the disk given the device node and
... host
${system_cmd} Catenate SEPARATOR=| system host-disk-list ${host}
... grep ${device_node} awk '{print $2}'
&{result} Run Command ${system_cmd} True
${uid} Get From Dictionary ${result} stdout
[Return] ${uid}
Get Partition UID
[Arguments] ${host} ${device_node}
[Documentation] Returns the UID of the partition given the device node
... and host
${system_cmd} Catenate SEPARATOR=|
... system host-disk-partition-list ${host} grep ${device_node}
... awk '{print $2}'
&{result} Run Command ${system_cmd} True
${uid} Get From Dictionary ${result} stdout
[Return] ${uid}
Get Property Value Of Command
[Arguments] ${cmd} ${property}
[Documentation] Given a command that returns a a table, this command
... returns the specific value of the property specified.
${result} Run Command ${cmd} | grep -w ${property} | awk '{print$4}'
${value} Get From Dictionary ${result} stdout
[Return] ${value.strip()}
Get LVM Storage Backend UID
[Arguments] ${backend}
[Documentation] Returns the UID of the specified Backend.
${result} Run Command
... system storage-backend-list | grep ${backend} | awk '{print $2}'
${value} Get From Dictionary ${result} stdout
[Return] ${value.strip()}
Get Property From Result
[Arguments] ${result} ${property}
[Documentation]
${dict} String To Dict ${result}
${dict} Get From Dictionary ${dict} Property
${dict} Get From Dictionary ${dict} ${property}
${dict} Get From Dictionary ${dict} Value
[Return] ${dict}
Get Release Version
[Documentation] Returns the version of the release under validation.
${cmd_current_version} Catenate cat /etc/build.info |
... grep SW_VERSION | awk '{ split($1, v, "="); print v[2]}'
&{result} Run Command ${cmd_current_version} True
${current_version} Get From Dictionary ${result} stdout
[Return] ${current_version.strip('"')}
Get Tier UUID
[Arguments] ${tier_name}
[Documentation] Returns the TIER uuid
${name} Set Variable storage
${cmd} Catenate system storage-tier-list ${tier_name}
... |grep ${name} |awk '{print $2}'
${uuid} Run Command ${cmd}
[Return] ${uuid.stdout.strip()}
Get All Vms List
[Documentation] Get a list of all the VMs created by the suite.
${cmd} Catenate virsh -c qemu:///system list --all |
... awk '/-[0-9]/{print $2}'
${result} Run ${cmd}
@{vms} Split String ${result}
[Return] @{vms}
Get Root Disk Device
[Arguments] ${host}
[Documentation] Get the root disk partition assigned to the specified
... node
${cmd} Catenate SEPARATOR=| system host-show ${host}
... grep rootfs awk '{print $4}'
${result} Run Command ${cmd}
${root_disk} Set Variable ${result.stdout.strip()}
${cmd} Catenate SEPARATOR=|
... system host-disk-list ${host} --nowrap
... grep ${root_disk} awk '{print $4}'
${root_disk_device} Run Command ${cmd}
[Return] ${root_disk_device.stdout.strip()}
Get Interface UUID
[Arguments] ${host} ${port_name}
[Documentation] Get Interface id of the specified host and port
${cmd} Catenate SEPARATOR=| system host-if-list -a ${host}
... grep ${port_name} awk '{print $2}'
${uuid} Run Command ${cmd}
[Return] ${uuid.stdout.strip()}
Get Interface Information
[Arguments] ${host} ${interface}
[Documentation] Returns a dictionary with the values of the spcecified
... interface.
${cmd} Catenate SEPARATOR=| system host-port-list ${host} --nowrap
... grep ${interface} awk '{ print $2,$4,$8}'
${info} Run Command ${cmd}
${info} Convert Response To List ${info}
[Return] ${info}
Get Ceph Monitor UID
[Arguments] ${host}
[Documentation] Get id of the CEPH monitor of the specified node.
${result} Run Command
... system ceph-mon-list | grep ${host} | awk '{print $2}'
${value} Get From Dictionary ${result} stdout
[Return] ${value.strip()}
Get Net Id
[Arguments] ${network_name}
[Documentation] Retrieve the net id for the given network name
${openstack_cmd} Set Variable openstack network list
${cmd} Catenate SEPARATOR=| ${openstack_cmd}
... grep "${network_name}" awk '{print$2}'
&{result} Run OS Command ${cmd}
${output} Get From Dictionary ${result} stdout
${splitted_output} Split String ${output} ${EMPTY}
${net_id} Get from List ${splitted_output} 0
[Return] ${net_id}
Get Snapshot ID
[Arguments] ${snapshot}
[Documentation] Retrieve the snapshot id for the given snapshot name
${openstack_cmd} Set Variable openstack volume snapshot list
${cmd} Catenate SEPARATOR=| ${openstack_cmd}
... grep "${snapshot}" awk '{print$2}'
&{result} Run OS Command ${cmd}
${output} Get From Dictionary ${result} stdout
${splitted_output} Split String ${output} ${EMPTY}
${snapshot_id} Get from List ${splitted_output} 0
[Return] ${snapshot_id}
Check Property Value
[Arguments] ${host} ${property} ${expected_value}
[Documentation] Validates that property is set correctly to the expected
... value
${current_value} Retrieve Host Property ${host} ${property}
Should Be Equal As Strings ${current_value} ${expected_value}
Check Controller Is Unlocked
[Arguments] ${controller_name}
[Documentation] Validates that controller is successfully unlocked.
Set Env Vars From Openrc
Check Property Value ${controller_name} administrative unlocked
Check Property Value Of Command
[Arguments] ${cmd} ${property} ${expected_value}
[Documentation] Validates that property is set correctly to the expected
... value on the repsonse of command given.
${current_value} Get Property Value Of Command ${cmd} ${property}
Should Be Equal As Strings ${current_value} ${expected_value}
Check If Host Is In Degraded Mode
[Arguments] ${host} ${timeout}
[Documentation] Verify if host fall in a degraded mode during a period
... of specified time.
Wait Until Keyword Succeeds ${timeout} min 10 sec
... Check Property Value ${host} availability degraded
Check Host Task
[Arguments] ${host} ${expected_result}
[Documentation] Get actual task status from given host
${output} Run Command
... system host-show ${host} | grep task | awk -F '|' '{print$3}'
Should Contain ${output.stdout} ${expected_result}
Check System Application Status
[Arguments] ${application} ${status}
[Documentation] Check if openstack applications were applied.
${cmd} Catenate SEPARATOR=| system application-list
... grep ${application} awk '{print $10}'
&{result} Run Command ${cmd}
${value} Get From Dictionary ${result} stdout
Run Keyword If '${value}' == 'apply-failed' System Application Apply ${application}
... ELSE Should Be Equal As Strings ${value} ${status}
Check Field Value
[Arguments] ${component} ${component_name} ${property}
... ${expected_value}
[Documentation] Validates that property is set correctly to the
... expected value.
${current_value} Retrieve Field Property ${component}
... ${component_name} ${property}
Should Be Equal As Strings ${current_value} ${expected_value}
Check Compute Service Property
[Arguments] ${compute} ${expected_value}
[Documentation] Check status instance.
${current_value} Retrieve Field Property Compute ${compute}
Should Be Equal As Strings ${current_value} ${expected_value}
Retrieve Field Property
[Arguments] ${component} ${component_name} ${property}
[Documentation] Returns the spceified value of the property.
${openstack_cmd} Set Variable
... openstack ${component} show ${component_name}
${cmd}= Catenate SEPARATOR=| ${openstack_cmd}
... grep -w ${property} tail -1 awk '{print$4}'
${result} Run OS Command ${cmd}
${value} Get From Dictionary ${result} stdout
[Return] ${value.strip()}
Retrieve Field Property Compute
[Arguments] ${compute}
${openstack_cmd} Set Variable
... openstack compute service list --service nova-compute
${cmd} Catenate SEPARATOR=| ${openstack_cmd}
... grep ${compute} awk '{print$10}'
${result} Run OS Command ${cmd}
${value} Get From Dictionary ${result} stdout
[Return] ${value.strip()}
Retrieve Host Property
[Arguments] ${hostname} ${property}
[Documentation] Returns the spceified value of the property on the
... system host.
${system_cmd} Catenate SEPARATOR=| system host-show ${hostname}
... grep -w ${property} awk '{print$4}'
${result} Run Command ${system_cmd}
${value} Get From Dictionary ${result} stdout
[Return] ${value.strip()}
Modify Host Interface
[Arguments] ${net_type} ${class} ${host} ${interface}
[Documentation] Modify interface attributes according to given options.
${system_cmd} Catenate system host-if-modify
... -n oam0 -c ${class} ${host} ${interface}
Run Command ${system_cmd} True
Run Command
... system interface-network-assign ${host} oam0 ${net_type}
Stage Application Deployment
[Arguments] ${application} ${app_tarball}
[Documentation] Use sysinv to upload the application tarball.
Wait Until Keyword Succeeds 30 min 5 sec
... Check System Application Status platform-integ-apps applied
Run Command system application-upload ${app_tarball} True
Wait Until Keyword Succeeds 30 min 5 sec
... Check System Application Status ${application} uploaded
System Application Apply
[Arguments] ${application}
[Documentation] Run the system aplication apply
Run Command system application-apply ${application} True
Wait Until Keyword Succeeds 90 min 5 min
... Check System Application Status ${application} applied
System Application Remove
[Arguments] ${application}
[Documentation] Run the system aplication remove.
Run Command system application-remove ${application} True
Wait Until Keyword Succeeds 90 min 5 sec
... Check System Application Status ${application} uploaded
System Application Delete
[Arguments] ${application}
[Documentation] Run the system aplication delete.
Run Command system application-delete ${application} True
${cmd} Catenate SEPARATOR=| system application-list
... grep ${application}
&{result} Run Command ${cmd}
${value} Get From Dictionary ${result} stdout
Should Be Empty ${value}
Flexible SSH Login
[Arguments] ${user} ${password}=${EMPTY} ${delay}=0.5s
[Documentation] On active SSH session: if given non-empty password,
... do Login, else do Login With Public Key.
${pwd_length} BuiltIn.Get Length ${password}
# ${pwd_length} is guaranteed to be an integer, so we are safe to evaluate
# it as Python expression.
BuiltIn.Run Keyword And Return If ${pwd_length} > 0 SSHLibrary.Login
... ${user} ${password} delay=${delay}
BuiltIn.Run Keyword And Return SSHLibrary.Login With Public Key
... ${user} ${USER_HOME}/.ssh/${SSH_KEY} ${KEYFILE_PASS}
... delay=${delay}
Connect to Controller Node
[Arguments] ${user} ${password} ${ip_address} ${prompt}=$
... ${timeout}=10s
[Documentation] Stablish a SSH connection to the controller and return
... the connection id
${controller_connection} SSHLibrary.Open_Connection
... ${ip_address} prompt=${prompt} timeout=${timeout}
Flexible SSH Login ${user} ${password}
[Return] ${controller_connection}
Open Master Controller Connection
[Documentation] Establish a SSH connection with the master controller
... to start executing the the suite.
${master_controller_connection} Connect to Controller Node
... ${CONFIG.credentials.STX_DEPLOY_USER_NAME}
... ${CONFIG.credentials.STX_DEPLOY_USER_PSWD}
... ${CONFIG.general.IP_UNIT_0_ADDRESS}
Set Suite Variable ${master_controller_connection}
log ${master_controller_connection}
Generate Secondary Controller Connection
[Arguments] ${controller}
[Documentation] Establish a SSH connection with the secondary controller
... to have it alive.
${controller_ip} Set Variable If '${controller}'=='controller-0'
... ${CONFIG.general.IP_UNIT_0_ADDRESS}
... ${CONFIG.general.IP_UNIT_1_ADDRESS}
${secondary_controller_connection} Connect to Controller Node
... ${CONFIG.credentials.STX_DEPLOY_USER_NAME}
... ${CONFIG.credentials.STX_DEPLOY_USER_PSWD}
... ${controller_ip}
Set Suite Variable ${secondary_controller_connection}
log ${secondary_controller_connection}
# - Set Active connection back to master controller
Run Keyword And Return If ${secondary_controller_connection} is not None
... Switch Controller Connection ${master_controller_connection}
... ${secondary_controller_connection}
Switch Controller Connection
[Arguments] ${new_idx} ${old_idx}
[Documentation] Enable a SSH connection to the new active controller and
... source proper variables.
Switch Connection ${new_idx}
Get Connection ${new_idx}
Run Command whoami
Wait Until Keyword Succeeds 5 min 5 sec Set Env Vars From Openrc
Set Suite Variable ${secondary_controller_connection}
... ${old_idx}
Set Suite Variable ${master_controller_connection} ${new_idx}
Generate SSH Key On Current Host
[Arguments] ${key_path} ${key_name}
[Documentation] Generates a SSH key on the current hots to be used as
... the base for keypair generation.
Run Command ssh-keygen -f ${key_path}/${key_name} -t rsa -P '' True
Create Empty Result Dictionary
[Documentation] Creates an Empty Dictionary with the required structure
... for a response of executed command.
&{result_dict} Create Dictionary stdout=${EMPTY}
Set To Dictionary ${result_dict} stderr=${EMPTY}
Set To Dictionary ${result_dict} rc=${EMPTY}
[Return] ${result_dict}
Remove Prompt Line
[Arguments] ${output}
[Documentation] On the response of the command execution is also
... retrieved the prompt line (because the use of Read until prompt)
... this keyword delete that last line and returns a clean output.
${line_to_remove} Get Line ${output} -1
${clean_out} Remove String ${output} ${line_to_remove}
[Return] ${clean_out}
Wait Until Keyword Fails
[Arguments] ${timeout} ${retry} ${error} ${keyword} @{args}
[Documentation] Waits until executed keyword returns the expected error.
Wait Until Keyword Succeeds ${timeout} ${retry}
... Run Keyword And Expect Error ${error} ${keyword} @{args}
Convert Response To List
[Arguments] ${result}
[Documentation] Given a response dictionary, gets the stdout and split
... it by spaces and return it as a list.
${response} Get From Dictionary ${result} stdout
@{res_in_list} Split String ${response}
[Return] @{res_in_list}
Create Directory On Current Host
[Arguments] ${dir_name} ${dir_path}
[Documentation] Create a directory on specified location inside of host
... that is currently active on ssh connection.
Run Command mkdir ${dir_path}/${dir_name}
[Return] ${dir_path}/${dir_name}
Start Nodes Virtual
[Documentation] Start VMs that will serve as the Nodes of the system.
@{vms_list} Get All Vms List
: FOR ${vm} IN @{vms_list}
\ Run virsh -c qemu:///system start ${vm}
\ Run virt-manager -c qemu:///system --show-domain-console ${vm}

View File

View File

@ -0,0 +1,145 @@
"""Manages execution of bash commands.
Executes bash commands in the underlying system and formats the output
in a consistent manner.
The functions that belong to this package are the ones that meet this criteria:
- All those that rely on running a command in the shell (bash) to work.
- All those related to printing/formatting messages for the console.
- This module should only include functions that are not related with a
specific application.
Note: Since a lot of functions in this package need to run a in a shell, this
package should most of the times only be used in Linux.
"""
from __future__ import print_function
import os
import re
import subprocess
# Defines a color schema for messages.
PURPLE = '\033[95m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
CYAN = '\033[96m'
GREY = '\033[90m'
BLACK = '\033[90m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
DEFAULT = '\033[99m'
END = '\033[0m'
# --------------------------------------------------------
# Functions for printing formatted messages in the console
# --------------------------------------------------------
def message(message_type, msg, end_string=None):
"""Wrapper that provides format to messages based on message type.
The function prints messages of the following type:
'>>> (success) hello world'
'>>> (err) bye bye world'
:param message_type: specifies the type of message
:param msg: the message to be formatted
:param end_string: specifies a different character to end the
message, if None is specified it uses a newline
"""
if message_type == 'err':
print(RED + '>>> (err) ' + END + msg, end=end_string)
elif message_type == 'warn':
print(YELLOW + '>>> (warn) ' + END + msg, end=end_string)
elif message_type == 'info':
print(BLUE + '>>> (info) ' + END + msg, end=end_string)
elif message_type == 'ok':
print(GREEN + '>>> (success) ' + END + msg, end=end_string)
elif message_type == 'statistics':
print(CYAN + '>>> (data) ' + END + msg, end=end_string)
elif message_type == 'cmd':
print(CYAN + '>>> (cmd) ' + END + msg, end=end_string)
elif message_type == 'skip':
print(
BLUE + '>>> (info) ' + END + msg + ' ... [' + YELLOW + 'SKIP' +
END + ']', end=end_string)
else:
raise ValueError('Invalid argument.')
def load_openrc_env_variables():
"""Loading OpenStack credentials
This function will load environment variables needed to run OpenStack
commands through CLI and will return this variables to robot framework
:returns: dictionary -- with environment variables containing OpenStack
credentials loaded.
"""
command = ['bash', '-c', 'source /etc/nova/openrc && env']
proc = subprocess.Popen(
command, stdout=subprocess.PIPE, executable='/bin/bash')
for line in proc.stdout:
(key, _, value) = line.partition('=')
os.environ[key] = value[:-1]
return dict(os.environ)
# -------------------------------------------
# Functions for running commands in the shell
# -------------------------------------------
def run_command(command, raise_exception=False):
"""Runs a shell command in the host.
:param command: the command to be executed
:param raise_exception: if is setup as True it will raise a exception if
the command was not executed correctly
:return: a tuple that contains the exit code of the command executed,
and the output message of the command.
"""
proc = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True,
executable='/bin/bash')
output, error = proc.communicate()
output = output.strip() if output else output
error = error.strip() if error else error
if raise_exception and proc.returncode != 0:
raise RuntimeError('{}: {}'.format(command, error or output))
return proc.returncode, error or output
# ---------------------------------------------
# Functions that rely on running shell commands
# ---------------------------------------------
def is_process_running(process, pid_to_exclude=''):
"""Checks if a process is running.
:param process: the process to check. This can either be a PID or the
command used to start the process
:param pid_to_exclude: int values are accepted only. If this param is set,
this function will exclude the pid on this variable.
:return:
True: if the process is still running.
False: if the process has finished (was not found)
"""
ps_aux = subprocess.Popen(['ps', 'axw'], stdout=subprocess.PIPE)
for element in ps_aux.stdout:
if re.search(process, element.decode('utf-8')):
pid = int(element.split()[0])
if pid_to_exclude and pid == pid_to_exclude:
continue
else:
return True
return False

View File

@ -0,0 +1,12 @@
clouds:
openstack_helm:
region_name: RegionOne
identity_api_version: 3
endpoint_type: internalURL
auth:
username: 'admin'
password: 'PASS'
project_name: 'admin'
project_domain_name: 'default'
user_domain_name: 'default'
auth_url: 'http://keystone.openstack.svc.cluster.local/v3'

View File

@ -0,0 +1,139 @@
"""Provides Useful functions for Robot Framework"""
from __future__ import print_function
import configparser
import datetime
import errno
import os
from robot.api import TestData
class Suite(object):
"""Implement a series of suite attributes
To define properties of a suite recognized by robot-framework.
"""
def __init__(self, name, main_suite_path):
self.name = name
self.main_suite = TestData(source=main_suite_path)
self.path = self.__get_suite_path(self.main_suite)
try:
self.data = TestData(source=self.path)
except TypeError as err:
print('ERROR: Suite {0} not found'.format(self.name))
raise err
def __get_suite_path(self, main_suite):
"""Return path of an specific test suite
Args:
main_suite = main suite test Data
Returns:
found_path = path of the suite found
"""
if main_suite.name == self.name:
return main_suite.source
for child in main_suite.children:
found_path = self.__get_suite_path(child)
if found_path:
return found_path
def get_config():
"""Read configuration file defined on execution directory
Returns:
config = Instance with configuration values parsed
from specified file
"""
config = configparser.ConfigParser()
config.read('stx.config')
return config
def check_results_dir(suite_dir):
"""Check if results directory already exist, if not, create it
Args:
suite_dir = Path to the main suite
Returns:
resdir = Directory where results will be stored
"""
results_dir = os.path.join(suite_dir, 'Results')
if not os.path.exists(results_dir):
os.makedirs(results_dir)
return results_dir
def create_output_dir(res_dir, suite_name):
"""Create directory under results to store the execution results
Args:
res_dir = Results dir where the results will be stored
suite_name = Name of the suite under execution
Returns:
out_dir = Path to the dir created with the results
"""
start_time = datetime.datetime.now()
frmt = '%Y%m%d%H%M%S'
out_dir = '{}/{}_{}'.format(res_dir, start_time.strftime(frmt), suite_name)
os.makedirs(out_dir)
return out_dir
def link_latest_run(suite_dir, out_dir):
"""Create a symlink to point to the latest execution results
Args:
suite_dir = Path to the main suite on the execution dir
out_dir = Output dir where the most recent execution results
are stored
Return:
latest_run = Path of created file with a symlink
"""
latest_run = os.path.join(suite_dir, 'latest-results')
try:
os.symlink(out_dir, latest_run)
except OSError as err:
if err.errno == errno.EEXIST:
os.remove(latest_run)
os.symlink(out_dir, latest_run)
else:
raise err
return latest_run
def list_suites(suite, tree_format):
"""Print in a readable format the list of suites and test cases
Args:
suite = Specific suite data
tree_format = format to be displayed on stdout
"""
print('[S] {}{}'.format(tree_format, suite.name))
if suite.testcase_table.tests:
tree_format += '.....'
for test in suite.testcase_table:
print('(T) {}{}'.format(tree_format, test.name))
else:
tree_format += '....|'
for child in suite.children:
list_suites(child, tree_format)

View File

@ -0,0 +1,137 @@
"""This module provides common functions to set up and use the logger."""
import logging
import logging.config
import logging.handlers
import os
import yaml
def setup_logging_using_config(name, config_file):
"""Sets up a logger according to the provided configuration file.
:param name: the name of the logger, most of the times this should be the
name of the module
:param config_file: the path to the config file to be used to set up the
logger
:return: returns the instance of the logger already configured
"""
with open(config_file, 'r') as file_manager:
config = yaml.safe_load(file_manager.read())
logging.config.dictConfig(config)
# create the logger object
logger = logging.getLogger(name)
return logger
def setup_logging(
name, level='info', log_file='StarlingX.log', root=False,
console_log=True):
"""Sets up a logger according to the desired configuration.
:param name: the name of the logger, most of the times this should be the
name of the module
:param level: the logging level defined for the logger.
Possible values are: notset, debug, info, warn, error, critical.
:param log_file: the path and name of the log file to be used in the logger
:param root: if set to True, the root logger is configured and used, which
implies that it inherits its configuration to all loggers hierarchy,
use this if you want to see logs from all modules, even external
libraries. If set to False, the logger configured is the module's
logger.
:param console_log: if True, the console handler will be added to the
logger, which means that the log will also be shown in the screen,
if False the messages will only be logged to files
:return: returns the instance of the logger already configured
"""
# Determine the correct log level
if level not in ['notset', 'debug', 'info', 'warn', 'error', 'critical']:
level = 'info'
level = getattr(logging, level.upper())
# create the console handler
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
# if the path for the log files does not exist, create it
if len(log_file.rsplit('/', 1)) == 2:
log_path = log_file.rsplit('/', 1)[0]
if not os.path.exists(log_path):
os.makedirs(log_path)
# Create the file handlers
file_handler = logging.handlers.RotatingFileHandler(
log_file,
maxBytes=10485760, # Max file size 10 MB (10 x 1024 x 1024)
backupCount=10 # Number of rotating files
)
file_handler.setLevel(level)
error_log_file = (
'{basename}.error.log'
.format(basename=log_file.replace('.log', ''))
)
error_file_handler = logging.handlers.RotatingFileHandler(
error_log_file,
maxBytes=10485760, # Max file size 10 MB (10 x 1024 x 1024)
backupCount=10 # Number of rotating files
)
error_file_handler.setLevel(logging.ERROR)
# create a formatter
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s',
'%Y-%m-%d %H:%M:%S')
# add the formatter to handlers
console_handler.setFormatter(formatter)
file_handler.setFormatter(formatter)
error_file_handler.setFormatter(formatter)
# create the logger object
if root:
logger = logging.getLogger()
logger.name = name
else:
logger = logging.getLogger(name)
# include log messages from Utils/Libraries/Qemu modules (if any)
logging.getLogger('Utils').setLevel(level)
logging.getLogger('Utils').addHandler(file_handler)
logging.getLogger('Utils').addHandler(error_file_handler)
logging.getLogger('Libraries').setLevel(level)
logging.getLogger('Libraries').addHandler(file_handler)
logging.getLogger('Libraries').addHandler(error_file_handler)
logging.getLogger('Qemu').setLevel(level)
logging.getLogger('Qemu').addHandler(file_handler)
logging.getLogger('Qemu').addHandler(error_file_handler)
# add the console handler only if enabled
if console_log:
logging.getLogger('Utils').addHandler(console_handler)
logging.getLogger('StarlingX').addHandler(console_handler)
# set logging level
logger.setLevel(level)
# if for some reason this setup function is called multiple times
# (some applications could do that as a side effect, for example Flask
# applications), then we need to restrict how many handlers are added to
# the logger. The logger is a singleton, so there can be only one, but
# it could have multiple handlers. So if already has a handler(s) (from a
# previous call) then just use that/those one(s) and don't add more
# handlers.
if not logger.handlers:
# add handlers to the requester module logger
logger.addHandler(file_handler)
logger.addHandler(error_file_handler)
# add the console handler only if enabled
if console_log:
logger.addHandler(console_handler)
# initialize the log with a long line so it is easier to identify when
# one log finishes and another one begins.
logger.info(
'--------------------------------------------------------------')
return logger

View File

@ -0,0 +1,112 @@
"""Provides different network functions"""
import logging
import subprocess
from bash import bash
from elevate import elevate
from ifparser import Ifcfg
from pynetlinux import ifconfig
from pynetlinux import brctl
from Libraries.common import update_config_ini
LOG = logging.getLogger(__name__)
def delete_network_interfaces():
"""Delete network interfaces
This function performs a clean up for the following network interfaces
stxbr[1-4]
"""
# elevate module re-launches the current process with root/admin privileges
# using one of the following mechanisms : sudo (Linux, macOS)
# becoming in root
elevate(graphical=False)
ifdata = Ifcfg(subprocess.check_output(['ifconfig', '-a']))
# Destroy NAT network if exist
try:
bash('sudo virsh net-destroy {}'.format('stx-nat'))
bash('sudo virsh net-undefine {}'.format('stx-nat'))
except IOError:
LOG.warn('NAT network not found')
for interface in range(1, 5):
current_interface = 'stxbr{}'.format(interface)
if current_interface in ifdata.interfaces:
# the network interface exists
net_object = ifdata.get_interface(current_interface)
net_up = net_object.get_values().get('UP')
net_running = net_object.get_values().get('RUNNING')
if net_up or net_running:
# the network interface is up or running
try:
# down and delete the network interface
ifconfig.Interface(current_interface).down()
brctl.Bridge(current_interface).delete()
except IOError:
LOG.warn('[Errno 19] No such device: {}'.format(
current_interface))
def configure_network_interfaces():
"""Configure network interfaces
This function configure the following network interfaces stxbr[1-4]
"""
for interface in range(1, 5):
current_interface = 'stxbr{}'.format(interface)
# adding the network interface
try:
brctl.addbr(current_interface)
except IOError:
LOG.warn('[Errno 17] File exists {}'.format(current_interface))
networks = ['stxbr1 10.10.10.1/24', 'stxbr2 192.168.204.1/24',
'stxbr3', 'stxbr4']
for net in networks:
eval_cmd = bash('sudo ifconfig {} up'.format(net))
if 'ERROR' in eval_cmd.stderr:
LOG.error(eval_cmd.stderr)
raise EnvironmentError(eval_cmd.stderr)
# setting the ip tables
iptables = ('sudo iptables -t nat -A POSTROUTING -s 10.10.10.0/24 -j '
'MASQUERADE')
eval_cmd = bash(iptables)
if 'ERROR' in eval_cmd.stderr:
LOG.error(eval_cmd.stderr)
raise EnvironmentError(eval_cmd.stderr)
def update_networks_config(
network_interfaces, configuration_file, configuration_type):
"""Update a config.ini with the networks from the controller
:param network_interfaces: the network interfaces from the controller
:param configuration_file: the absolute path to the config.ini to be
updated
:param configuration_type: the type of configuration to be updated
"""
if configuration_type == 'simplex':
update_config_ini(config_ini=configuration_file,
config_section='logical_interface',
OAM=network_interfaces[0])
else:
update_config_ini(config_ini=configuration_file,
config_section='logical_interface',
MGMT=network_interfaces[1])
update_config_ini(config_ini=configuration_file,
config_section='logical_interface',
OAM=network_interfaces[0])

View File

@ -0,0 +1,17 @@
heat_template_version: pike
description: Launch an instance with Cirros image.
parameters:
NetID:
type: string
description: Network ID to use for the instance.
resources:
server:
type: OS::Nova::Server
properties:
image: cirros
flavor: f1.small
key_name:
networks:
- network: { get_param: NetID }

View File

@ -0,0 +1,38 @@
apiVersion: v1
kind: Pod
metadata:
name: testpod
spec:
containers:
- name: test-container
image: k8s.gcr.io/busybox
command: [ "sh", "-c"]
args:
- while true; do
echo -en '\n';
printenv MY_NODE_NAME MY_POD_NAME MY_POD_NAMESPACE;
printenv MY_POD_IP MY_POD_SERVICE_ACCOUNT;
sleep 10;
done;
env:
- name: MY_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: MY_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: MY_POD_SERVICE_ACCOUNT
valueFrom:
fieldRef:
fieldPath: spec.serviceAccountName
restartPolicy: Never

View File

@ -0,0 +1,154 @@
"""Provides a library of useful utilities.
This module provides a list of general purpose utilities. Those functions that
are part of a larger domain, for example functions related to networking,
should be provided by a different module.
This module should only include functions that are not related with a
specific application, in other words these methods should be application
agnostic.
"""
from __future__ import print_function
import os
import timeit
import pwd
from bash import bash as ebash
from Config import config
import Utils.bash_utils as bash
def find_owner(element):
"""Find the owner of a file or folder
:param element: which can be a file or folder to check
:return
- the user that own the file or folder
"""
return pwd.getpwuid(os.stat(element).st_uid).pw_name
def isdir(path, sudo=True):
"""Validates if a directory exist in a host.
:param path: the path of the directory to be validated
:param sudo: this needs to be set to True for directories that require
root permission
:return: True if the directory exists, False otherwise
"""
status, _ = bash.run_command(
'{prefix}test -d {path}'.format(
path=path, prefix='sudo ' if sudo else ''))
exist = True if not status else False
return exist
def isfile(path, sudo=True):
"""Validates if a file exist in a host.
:param path: the absolute path of the file to be validated
:param sudo: this needs to be set to True for files that require
root permission
:return: True if the file exists, False otherwise
"""
status, _ = bash.run_command(
'{prefix}test -f {path}'.format(
path=path, prefix='sudo ' if sudo else ''))
exist = True if not status else False
return exist
def timer(action, print_elapsed_time=True):
"""Function that works as a timer, with a start/stop button.
:param action: the action to perform, the valid options are:
- start: start a counter for an operation
- stop: stop the current time
:param print_elapsed_time: if set to False the message is not printed to
console, only returned
:return: the elapsed_time string variable
"""
elapsed_time = 0
if action.lower() == 'start':
start = timeit.default_timer()
os.environ['START_TIME'] = str(start)
elif action.lower() == 'stop':
if 'START_TIME' not in os.environ:
bash.message('err', 'you need to start the timer first')
return None
stop = timeit.default_timer()
total_time = stop - float(os.environ['START_TIME'])
del os.environ['START_TIME']
# output running time in a nice format.
minutes, seconds = divmod(total_time, 60)
hours, minutes = divmod(minutes, 60)
elapsed_time = 'elapsed time ({h}h:{m}m:{s}s)'.format(
h=0 if round(hours, 2) == 0.0 else round(hours, 2),
m=0 if round(minutes, 2) == 0.0 else round(minutes, 2),
s=round(seconds, 2))
if print_elapsed_time:
bash.message('info', elapsed_time)
else:
bash.message('err', '{0}: not allowed'.format(action))
return elapsed_time
def clean_qemu_environment():
"""Clean Qemu/Libvirt environment
This function clean the environment in the current host fulfilling the
following functions:
1. shutting down the current VMs running
2. removing them from Virtual Machine Manager
3. delete their partitions
"""
images_path = '/var/lib/libvirt/images'
vms = ebash(
"virsh list --all | awk 'NR>2 {print $2}'").stdout.strip().split()
partitions = ebash('sudo ls {} | grep .img$'.format(
images_path)).stdout.split()
for vm in vms:
# check if the current image is running to shutting down
cmd = ebash('sudo virsh domstate {}'.format(vm))
stdout = cmd.stdout.strip()
stderr = cmd.stderr.strip()
if stdout == 'running' and 'failed to get domain' not in stderr:
# the vm is running
ebash('sudo virsh destroy {}'.format(vm))
# removing controller/compute from Virtual Machine Manager
ebash(
'sudo virsh undefine {} --remove-all-storage --snapshots-metadata'
.format(vm))
for partition in partitions:
ebash('sudo rm {}/{}'.format(images_path, partition))
def qemu_configuration_files():
"""Custom Qemu configuration files"""
xml = config.get('qemu', 'XML')
config_file = config.get('qemu', 'CONFIG_FILE')
if os.path.isfile(xml):
# deleting default libvirt networks configuration
ebash('sudo rm -rf {}'.format(xml))
parameters = ['user = "root"', 'group = "root"']
for param in parameters:
stdout = ebash("sudo cat {0} | grep -w '^{1}'".format(
config_file, param)).stdout
if not stdout:
# the param not in config_file
ebash("echo '{0}' | sudo tee -a {1}".format(param, config_file))

View File

@ -0,0 +1,76 @@
"""Watch for events on files
This module watch for events in a specific path and print the last line in
console.
"""
from __future__ import print_function
import os
import sys
import time
from bash import bash
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
class CustomHandler(PatternMatchingEventHandler):
"""Watch for events on files"""
patterns = ['*console.txt']
@staticmethod
def process(event):
"""Process the event type
:param event: this param has the following attributes
event.event_type
'modified' | 'created' | 'moved' | 'deleted'
event.is_directory
True | False
event.src_path
path/to/observed/file
"""
# the file will be processed there
# print event.src_path, event.event_type # print now only for debug
last_line = bash('tail -2 {}'.format(event.src_path))
if 'LAST_CONSOLE_LINE' not in os.environ:
os.environ['LAST_CONSOLE_LINE'] = last_line.stdout
print('{}'.format(last_line.stdout))
elif os.environ.get('LAST_CONSOLE_LINE') != last_line.stdout:
os.environ['LAST_CONSOLE_LINE'] = last_line.stdout
print('{}'.format(last_line.stdout))
def on_modified(self, event):
"""Handle on modified events
If the file(s) matches with the patterns variable declared in this
class are modified, this function will call to process call function.
"""
self.process(event)
def on_created(self, event):
"""Handle on created events
If the file(s) matches with the patterns variable declared in this
class are created, this function will call to process call function.
"""
self.process(event)
if __name__ == '__main__':
ARGS = sys.argv[1:]
OBSERVER = Observer()
OBSERVER.schedule(CustomHandler(), path=ARGS[0] if ARGS else '.')
OBSERVER.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
OBSERVER.stop()
OBSERVER.join()

View File

@ -0,0 +1,359 @@
"""
Library of variables commonly used for Horizon tests.
This library contains global variables and XPATHS needed to get web elements
of Horizon web page to navigate through it and use or modify Controllers and
Computes.
Authors:
- Juan Carlos Alonso <juan.carlos.alonso@intel.com>
- Elio Martinez <elio.martinez.monroy@intel.com>
- Juan Pablo Gomez <juan.p.gomez@intel.com>
"""
import getpass
from Config import config
CURRENT_USER = getpass.getuser()
# Default TIMEOUT variable
TIMEOUT = 10
# Variables from configuration file
IP_CONTROLLER_0 = config.get('general', 'IP_UNIT_0_ADDRESS')
IP_CONTROLLER_1 = config.get('general', 'IP_UNIT_1_ADDRESS')
CIRROS_FILE = config.get('general', 'CIRROS_FILE')
CENTOS_FILE = config.get('general', 'CENTOS_FILE')
UBUNTU_FILE = config.get('general', 'UBUNTU_FILE')
WINDOWS_FILE = config.get('general', 'WINDOWS_FILE')
APP_TARBALL = config.get('general', 'APP_TARBALL')
CONTROLLER_TMP_IP = config.get('iso_installer', 'CONTROLLER_TMP_IP')
CIRROS_IMAGE = (
'/home/{USER}/{IMAGE}'.format(USER=CURRENT_USER, IMAGE=CIRROS_FILE))
CENTOS_IMAGE = (
'/home/{USER}/{IMAGE}'.format(USER=CURRENT_USER, IMAGE=CENTOS_FILE))
UBUNTU_IMAGE = (
'/home/{USER}/{IMAGE}'.format(USER=CURRENT_USER, IMAGE=UBUNTU_FILE))
WINDOWS_IMAGE = (
'/home/{USER}/{IMAGE}'.format(USER=CURRENT_USER, IMAGE=WINDOWS_FILE))
APP_TARBALL_FILE = ('{FILE}'.format(FILE=APP_TARBALL))
HORIZON_URL = ('http://{IP}/'.format(IP=IP_CONTROLLER_0))
HORIZON_USERNAME = config.get('dashboard', 'HORIZON_USERNAME')
HORIZON_PASSWORD = config.get('dashboard', 'HORIZON_PASSWORD')
FF_BROWSER = config.get('dashboard', 'BROWSER')
PROFILE = config.get('dashboard', 'PROFILE')
FF_PROFILE = (
'/home/{USER}/.mozilla/firefox/{BROWSER_PROFILE}'
.format(USER=CURRENT_USER, BROWSER_PROFILE=PROFILE))
CLI_USER_NAME = config.get('credentials', 'STX_DEPLOY_USER_NAME')
CLI_USER_PASS = config.get('credentials', 'STX_DEPLOY_USER_PSWD')
STX_ISO_FILE = config.get('general', 'STX_ISO_FILE')
# Variables for Horizon log in page
USERNAME_ID_FIELD = 'id_username'
PASSWORD_ID_FIELD = 'id_password'
LOGIN_ID_BUTTON = 'loginBtn'
HORIZON_PAGE_TITLE = 'Instance Overview - Akraino Edge Stack'
HOST_INV_PAGE_TITLE = 'Host Inventory - Akraino Edge Stack'
# XPATHS for PROJECT Menu
XPATH_PROJECT = (
'//*[@class="nav nav-pills nav-stacked"]//a[contains(., \'Project\')]')
XPATH_PROJECT_API = (
'//*[@id="sidebar-accordion-project-default"]'
'//a[contains(., \'API Access\')]')
XPATH_PROJECT_COMPUTE = (
'//*[@id="sidebar-accordion-project"]//a[contains(., \'Compute\')]')
XPATH_PROJECT_COMPUTE_OVERVIEW = (
'//*[@id="sidebar-accordion-project-compute"]'
'//a[contains(., \'Overview\')]')
XPATH_PROJECT_COMPUTE_INSTANCES = (
'//*[@id="sidebar-accordion-project-compute"]'
'//a[contains(., \'Instances\')]')
XPATH_PROJECT_COMPUTE_SERVER_GROUPS = (
'//*[@id="sidebar-accordion-project-compute"]'
'//a[contains(., \'Server Groups\')]')
XPATH_PROJECT_COMPUTE_IMAGES = (
'//*[@id="sidebar-accordion-project-compute"]//a[contains(., \'Images\')]')
XPATH_PROJECT_COMPUTE_KEYPARS = (
'//*[@id="sidebar-accordion-project-compute"]'
'//a[contains(., \'Key Pairs\')]')
XPATH_PROJECT_NETWORK = (
'//*[@id="sidebar-accordion-project"]//a[contains(., \'Network\')]')
XPATH_PROJECT_NETWORK_TOPOLOGY = (
'//*[@id="sidebar-accordion-project-network"]'
'//a[contains(., \'Network Topology\')]')
XPATH_PROJECT_NETWORK_NETWORKS = (
'//*[@id="sidebar-accordion-project-network"]'
'//a[contains(., \'Networks\')]')
XPATH_PROJECT_NETWORK_ROUTERS = (
'//*[@id="sidebar-accordion-project-network"]'
'//a[contains(., \'Routers\')]')
XPATH_PROJECT_NETWORK_SECURITY = (
'//*[@id="sidebar-accordion-project-network"]'
'//a[contains(., \'Security Groups\')]')
XPATH_PROJECT_NETWORK_FLOATING = (
'//*[@id="sidebar-accordion-project-network"]'
'//a[contains(., \'Floating IPs\')]')
XPATH_PROJECT_ORCHESTRATION = (
'//*[@id="sidebar-accordion-project"]//a[contains(., \'Orchestration\')]')
XPATH_PROJECT_ORCHESTRATION_STACKS = (
'//*[@id="sidebar-accordion-project-orchestration"]'
'//a[contains(., \'Stacks\')]')
XPATH_PROJECT_ORCHESTRATION_RESOURCES = (
'//*[@id="sidebar-accordion-project-orchestration"]'
'//a[contains(., \'Resource Types\')]')
XPATH_PROJECT_ORCHESTRATION_TEMPLATE = (
'//*[@id="sidebar-accordion-project-orchestration"]'
'//a[contains(., \'Template Versions\')]')
# XPATHS for ADMIN Menu
XPATH_ADMIN = (
'//*[@class="nav nav-pills nav-stacked"]//a[contains(., \'Admin\')]')
XPATH_ADMIN_OVERVIEW = (
'//*[@id="sidebar-accordion-admin-default"]//a[contains(., \'Overview\')]')
XPATH_ADMIN_PLATFORM = (
'//*[@id="sidebar-accordion-admin"]//a[contains(., \'Platform\')]')
XPATH_ADMIN_PLATFORM_FAULT = (
'//*[@id="sidebar-accordion-admin-platform"]'
'//a[contains(., \'Fault Management\')]')
XPATH_ADMIN_PLATFORM_SOFTWARE = (
'//*[@id="sidebar-accordion-admin-platform"]'
'//a[contains(., \'Software Management\')]')
XPATH_ADMIN_PLATFORM_HOST = (
'//*[@id="sidebar-accordion-admin-platform"]'
'//a[contains(., \'Host Inventory\')]')
XPATH_ADMIN_PLATFORM_PROVIDER = (
'//*[@id="sidebar-accordion-admin-platform"]'
'//a[contains(., \'Provider Networks\')]')
XPATH_ADMIN_PLATFORM_TOPOLOGY = (
'//*[@id="sidebar-accordion-admin-platform"]'
'//a[contains(., \'Provider Network Topology\')]')
XPATH_ADMIN_PLATFORM_STORAGE = (
'//*[@id="sidebar-accordion-admin-platform"]'
'//a[contains(., \'Storage Overview\')]')
XPATH_ADMIN_PLATFORM_SYSTEM = (
'//*[@id="sidebar-accordion-admin-platform"]'
'//a[contains(., \'System Configuration\')]')
XPATH_ADMIN_COMPUTE = (
'//*[@id="sidebar-accordion-admin"]//a[contains(., \'Compute\')]')
XPATH_ADMIN_COMPUTE_SERVER = (
'//*[@id="sidebar-accordion-admin-compute"]'
'//a[contains(., \'Server Groups\')]')
XPATH_ADMIN_COMPUTE_HYPERVISORS = (
'//*[@id="sidebar-accordion-admin-compute"]'
'//a[contains(., \'Hypervisors\')]')
XPATH_ADMIN_COMPUTE_HOST = (
'//*[@id="sidebar-accordion-admin-compute"]'
'//a[contains(., \'Host Aggregates\')]')
XPATH_ADMIN_COMPUTE_INSTANCES = (
'//*[@id="sidebar-accordion-admin-compute"]'
'//a[contains(., \'Instances\')]')
XPATH_ADMIN_COMPUTE_FLAVORS = (
'//*[@id="sidebar-accordion-admin-compute"]//a[contains(., \'Flavors\')]')
XPATH_ADMIN_COMPUTE_IMAGES = (
'//*[@id="sidebar-accordion-admin-compute"]//a[contains(., \'Images\')]')
XPATH_ADMIN_NETWORK = '{0}admin/networks/'.format(HORIZON_URL)
XPATH_ADMIN_NETWORK_NETWORKS = (
'//*[@id="sidebar-accordion-admin-network"]//a[contains(., \'Networks\')]')
XPATH_ADMIN_NETWORK_ROUTERS = (
'//*[@id="sidebar-accordion-admin-network"]//a[contains(., \'Routers\')]')
XPATH_ADMIN_NETWORK_FLOATING = (
'//*[@id="sidebar-accordion-admin-network"]'
'//a[contains(., \'Floating IPs\')]')
XPATH_ADMIN_SYSTEM = '{0}admin/defaults/'.format(HORIZON_URL)
XPATH_PROVIDER_NET_TOPOLOGY = '{0}admin/host_topology/'.format(HORIZON_URL)
XPATH_ADMIN_SYSTEM_DEFAULTS = (
'//*[@id="sidebar-accordion-admin-admin"]//a[contains(., \'Defaults\')]')
XPATH_ADMIN_SYSTEM_METADATA = (
'//*[@id="sidebar-accordion-admin-admin"]'
'//a[contains(., \'Metadata Definitions\')]')
XPATH_ADMIN_SYSTEM_SYSTEM = (
'//*[@id="sidebar-accordion-admin-admin"]'
'//a[contains(., \'System Information\')]')
XPATH_CHOOSE_IMAGE = '//button[contains(., \'Browse...\')]'
# XPATHS for IDENTITY Menu
XPATH_IDENTITY = (
'//*[@class="nav nav-pills nav-stacked"]//a[contains(., \'Identity\')]')
XPATH_IDENTITY_PROJECTS = (
'//*[@id="sidebar-accordion-identity"]//a[contains(., \'Projects\')]')
XPATH_IDENTITY_USERS = (
'//*[@id="sidebar-accordion-identity"]//a[contains(., \'Users\')]')
XPATH_IDENTITY_GROUPS = (
'//*[@id="sidebar-accordion-identity"]//a[contains(., \'Groups\')]')
XPATH_IDENTITY_ROLES = (
'//*[@id="sidebar-accordion-identity"]//a[contains(., \'Roles\')]')
XPATH_FLAVOR_UPDATE_METADATA_CIRROS_ACTION = (
'//table[@id="flavors"]//tr[contains(.,\'cirros-generic\')]'
'//li[contains(., \'Update Metadata\')]')
XPATH_FLV_SPECIFIC_METADATA_CIRROS_ACTION = (
'//table[@id="flavors"]//tr[contains(.,\'cirros-configurable\')]'
'//li[contains(., \'Update Metadata\')]')
XPATH_FLAVOR_UPDATE_METADATA_CIRROS = (
'//table[@id="flavors"]//tr[contains(., \'cirros-generic\')]'
'//*[@class="btn btn-default btn-sm dropdown-toggle"]')
XPATH_FLAVOR_SPECIFIC_METADATA_CIRROS = (
'//table[@id="flavors"]//tr[contains(.,\'cirros-configurable\')]'
'//*[@class="btn btn-default btn-sm dropdown-toggle"]')
XPATH_FLAVOR_UPDATE_METADATA_CENTOS = (
'//table[@id="flavors"]//tr[contains(., \'centos-generic\')]'
'//*[@class="btn btn-default btn-sm dropdown-toggle"]')
XPATH_FLAVOR_SPECIFIC_METADATA_CENTOS = (
'//table[@id="flavors"]//tr[contains(., \'centos-configurable\')]'
'//*[@class="btn btn-default btn-sm dropdown-toggle"]')
XPATH_FLAVOR_UPDATE_METADATA_UBUNTU = (
'//table[@id="flavors"]//tr[contains(., \'ubuntu-generic\')]'
'//*[@class="btn btn-default btn-sm dropdown-toggle"]')
XPATH_FLAVOR_SPECIFIC_METADATA_UBUNTU = (
'//table[@id="flavors"]//tr[contains(., \'ubuntu-configurable\')]'
'//*[@class="btn btn-default btn-sm dropdown-toggle"]')
XPATH_FLAVOR_METADATA_OPTION = (
'//select[@class="form-control ng-pristine ng-valid ng-scope ng-not-empty '
'ng-valid-required ng-touched"]')
XPATH_FLAVOR_UPDATE_METADATA_CENTOS_ACTION = (
'//table[@id="flavors"]//tr[contains(.,\'centos-generic\')]'
'//li[contains(., \'Update Metadata\')]')
XPATH_FLV_SPECIFIC_METADATA_CENTOS_ACTION = (
'//table[@id="flavors"]//tr[contains(.,\'centos-configurable\')]'
'//li[contains(., \'Update Metadata\')]')
XPATH_FLAVOR_UPDATE_METADATA_UBUNTU_ACTION = (
'//table[@id="flavors"]//tr[contains(.,\'ubuntu-generic\')]'
'//li[contains(., \'Update Metadata\')]')
XPATH_FLV_SPECIFIC_METADATA_UBUNTU_ACTION = (
'//table[@id="flavors"]//tr[contains(.,\'ubuntu-configurable\')]'
'//li[contains(., \'Update Metadata\')]')
XPATH_INSTANCE_CONSOLE = (
'//*[@class="dropdown-menu dropdown-menu-right row_actions"]'
'//a[contains(., \'Console\')]')
XPATH_OPEN_INSTANCE_CONSOLE = (
'//*[@class="alert alert-info"]'
'//a[contains(., \'Click here to show only console\')]')
XPATH_VM_ERROR_LOCATOR = (
'//table[@id="instances"]//tr[contains(., \'vm2\')]'
'//td[contains(., \'Error\')]')
XPATH_RANGE_CONDITION_HEAT = (
'//table[@id="provider_networks"]//tr[contains(., \'providernet-b\')]'
'//td[contains(., \'100-400\')]')
XPATH_GET_VM_COMPUTE = (
'//*[@id="instances"]/tbody//tr[contains(., \'Heart_beat_disabled\')]'
'//*[@class="sortable nowrap-col normal_column"]')
XPATH_METADATA_FILTER = '//div[@class="has-feedback"]'
XPATH_PAUSE_UBUNTU = (
'//table[@id="instances"]//tr[contains(., \'ubuntu-configurable\')]'
'//td[contains(., \'Paused\')]')
XPATH_ACTIVE_UBUNTU = (
'//table[@id="instances"]//tr[contains(., \'ubuntu-configurable\')]'
'//td[contains(., \'Active\')]')
XPATH_ACTION_UBUNTU_CHOOSE = (
'//table[@id="instances"]//tr[contains(., \'ubuntu-configurable\')]'
'//*[@class="btn btn-default btn-sm dropdown-toggle"]')
XPATH_ACTION_VM3_PAUSE = (
'//table[@id="instances"]//tr[contains(., \'centos-configurable\')]'
'//li[contains(., \'Pause Instance\')]')
XPATH_ACTION_VM3_RESUME = (
'//table[@id="instances"]//tr[contains(., \'centos-configurable\')]'
'//li[contains(., \'Resume Instance\')]')
XPATH_ACTION_UBUNTU_RESUME = (
'//table[@id="instances"]//tr[contains(., \'ubuntu-configurable\')]'
'//li[contains(., \'Resume Instance\')]')
XPATH_IMG_SPECIFIC_METADATA_CIRROS_ACTION = (
'//table[@id="images"]//tr[contains(., \'cirros-configurable\')]'
'//li[contains(., \'Update Metadata\')]')
XPATH_IMAGE_SHARED_POLICY_DELETE_DESC1 = (
'//*[@class="fa fa-minus"]')
XPATH_IMAGE_SHARED_POLICY_DELETE_DESC2 = (
'//*[@class="list-group-item ng-scope light-stripe"]'
'//*[@class="fa fa-minus"]')
XPATH_IMAGE_SHARED_POLICY_SELECT = (
'//*[@class="input-group input-group-sm ng-scope"]'
'//option[@label=\'shared\']')
XPATH_IMAGE_SPECIFIC_METADATA_CIRROS = (
'//table[@id="images"]//tr[contains(., \'cirros-configurable\')]'
'//*[@class="btn btn-default btn-sm dropdown-toggle"]')
ADDR_HOST_INVENTORY = '{0}admin/inventory/'.format(HORIZON_URL)
# BUTTONS for static Id's
BUTTON_PROVIDER_NET_TYPE = '//select[@id="id_type"]/option[@value=\'vlan\']'
BUTTON_CREATE_PROVIDER_NET_LAST = '//*[@class="btn btn-primary pull-right"]'
BUTTON_SEGMENTATION_RANGE = (
'//*[@class="btn data-table-action ajax-modal btn-edit"]')
BUTTON_SEGMENTATION_RANGE_2 = (
'//*[@class="sortable anchor normal_column"]'
'//*[contains(., \'providernet-b\')]')
BUTTON_PROJECT_RANGE = (
'//select[@id="id_tenant_id"]//option[contains(., \'admin\')]')
BUTTON_SEGMENTATION_RANGE_ACCEPT = '//*[@class="btn btn-primary pull-right"]'
BUTTON_CREATE_FLAVOR_ACCEPT = '//*[@value="Create Flavor"]'
BUTTON_UNLOCK_COMPUTE0 = (
'//table[@id="hostscompute"]//tr[contains(., \'compute-0\')]'
'//*[@class="btn btn-default btn-sm dropdown-toggle"]')
BUTTON_UNLOCK_COMPUTE0_ACTION = 'hostscompute__row_3__action_unlock'
BUTTON_LOCK_COMPUTE0 = (
'//table[@id="hostscompute"]//tr[contains(., \'compute-0\')]'
'//*[@class="btn btn-default btn-sm dropdown-toggle"]')
BUTTON_LOCK_COMPUTE0_ACTION = 'hostscompute__row_3__action_lock'
XPATH_COMPUTE_0_LOCKED = (
'//table[@id="hostscompute"]//tr[contains(., \'compute-0\')]'
'//td[contains(., \'Locked\')]')
XPATH_COMPUTE_0_STATE = (
'//table[@id="hostscompute"]//tr[contains(., \'compute-0\')]'
'//td[contains(., \'Available\')]')
BUTTON_CTRL_ALT_DEL = '//*[@id="sendCtrlAltDelButton"]'
BUTTON_RANGE_HEAT = (
'//table[@id="provider_networks"]//tr[contains(., \'providernet-b\')]'
'//*[@class="btn btn-default btn-sm dropdown-toggle"]')
BUTTON_RANGE_CREATE = (
'//*[@class="table_actions clearfix"]//*[contains(., \'Create Range\')]')
BUTTON_SECOND_FLAVOR = (
'//tr[contains(., \'m2.sanity\')]//*[@class="btn btn-sm btn-default"]')
BUTTON_IMAGE_UPDATE_METADATA = (
'//table[@class="table table-striped table-rsp table-detail modern'
' ng-scope"]//tr[contains(., \'cirros_sharedpolicy\')]'
'//*[@class="btn btn-default"]')
BUTTON_CIRROS_FLV_HEARTBEAT_ARROW = (
'//tr[contains(., \'cirros-heartbeat\')]//*[@class="btn btn-sm '
'btn-default"]')
# ADDR field for specific uses in go to function
ADDR_PROVIDERNETS_PATH = '{0}admin/providernets/'.format(HORIZON_URL)
ADDR_HOST_COMPUTE_DETAIL_0 = (
'{0}admin/inventory/3/detail/'.format(HORIZON_URL))
ADDR_HOST_COMPUTE_DETAIL_1 = (
'{0}admin/inventory/2/detail/'.format(HORIZON_URL))
ADDR_ADMIN_FLAVORS = '{0}admin/flavors/'.format(HORIZON_URL)
ADDR_ADMIN_IMAGES = '{0}admin/images/'.format(HORIZON_URL)
ADDR_PROJECT_NETWORK = '{0}project/networks/'.format(HORIZON_URL)
ADDR_PROJECT_INSTANCES = '{0}project/instances/'.format(HORIZON_URL)
ADDR_ADMIN_INSTANCES = '{0}admin/instances/'.format(HORIZON_URL)
# EDIT text parameters for special fields
EDIT_NOVA_LOCAL_PARAMETERS = (
'//*[@class="sortable anchor normal_column"]'
'//a[contains(., \'nova-local\')]')
EDIT_LOCAL_VOLUME_GROUP_PARAMETER = (
'//select[@id="id_instance_backing"]'
'/option[contains(., \'Local RAW LVM backed\')]')
PROGRESS_BAR = '//*[@class="progress-text horizon-loading-bar"]'
LOADING_ICON = '//*[@class="modal loading in"]'
# CSS Selectors for specific cases
CSS_CTRL_ALT_DEL = 'sendCtrlAltDelButton'
# CLI Variables
VM_SET_ERROR_FLAG = 'openstack server set --state error'
NOVA_ACTION_SET_ERROR_FLAG2 = (
'openstack server set --state error vm-cirros-configurable-0')
VM_SET_ACTIVE_FLAG = 'openstack server set --state active'
SUSPEND_INSTANCE = 'openstack server suspend vm-cirros-configurable-0'
RESUME_INSTANCE = 'openstack server resume vm-cirros-configurable-0'
CEILOMETER_IMAGE_COMMAND = 'ceilometer statistics -m image.size'

View File

@ -0,0 +1,32 @@
"""Provides a library of useful utilities for Robot Framework"""
import configparser
def get_variables(var_name, config_file):
"""Get variables from a config.ini
This function parse a config.ini file and return a dict
with their values for use with Robot Framework as variables
:param var_name: the variable used for make reference in robot, e.g:
*** Settings ***
Variables Variables/ConfigInit.py Config %{PYTHONPATH}/Config/config.ini
*** Variables ***
${kernel_option} ${CONFIG.iso_installer.KERNEL_OPTION}
:param config_file: the config.ini to parse
:return
- variables: the dict with all values from config.init
"""
configurations = configparser.ConfigParser()
configurations.read(config_file)
variables = dict()
for section in configurations.sections():
for key, value in configurations.items(section):
var = '{}.{}.{}'.format(var_name, section, key)
variables[var] = str(value)
return variables