add performance tests
Description: reasons.py: added duplex_system reason heat_helper.py: replaced 'is' with '==' host_helper.py : in modify_host_memory and get_host_memories converted key to str from int unused variables in string format network_helper.py: in add_icmp_and_tcp_rules removed "protocol" and added egress for icmp unused variables in string format replaced 'is' with '==' security_helper.py: unused variables in string format storage_helper.py: replaced 'is' with '==' unused variables in string format pytest.ini: added robotperformance marker performance: added performance directory containing performance tests fixture_resources.py: added missing ',' in _RESOURCE_TYPES list pre_checks_and_configs.py: added no_duplex telnet.py: replace 'NotImplemented' with 'NotImplementedError' parse_log.py: replaced 'is' with '==' Signed-off-by: George Postolache <george.postolache@intel.com> Change-Id: I32e84aac33102e57f4b0eb28d9aca0f6684e6b64
This commit is contained in:
parent
29654e6485
commit
f25ebd7b0d
@ -38,4 +38,5 @@ class SkipSysType:
|
|||||||
SMALL_FOOTPRINT = "Skip for small footprint lab"
|
SMALL_FOOTPRINT = "Skip for small footprint lab"
|
||||||
LESS_THAN_TWO_CONTROLLERS = "Less than two controllers on system"
|
LESS_THAN_TWO_CONTROLLERS = "Less than two controllers on system"
|
||||||
SIMPLEX_SYSTEM = 'Not applicable to Simplex system'
|
SIMPLEX_SYSTEM = 'Not applicable to Simplex system'
|
||||||
|
DUPLEX_SYSTEM = 'Not applicable to Duplex system'
|
||||||
SIMPLEX_ONLY = 'Only applicable to Simplex system'
|
SIMPLEX_ONLY = 'Only applicable to Simplex system'
|
||||||
|
@ -248,12 +248,12 @@ def get_heat_params(param_name=None):
|
|||||||
Returns (str): return None if failure or the val for the given param
|
Returns (str): return None if failure or the val for the given param
|
||||||
|
|
||||||
"""
|
"""
|
||||||
if param_name is 'NETWORK':
|
if param_name == 'NETWORK':
|
||||||
net_id = network_helper.get_mgmt_net_id()
|
net_id = network_helper.get_mgmt_net_id()
|
||||||
return network_helper.get_net_name_from_id(net_id=net_id)
|
return network_helper.get_net_name_from_id(net_id=net_id)
|
||||||
elif param_name is 'FLAVOR':
|
elif param_name == 'FLAVOR':
|
||||||
return 'small_ded'
|
return 'small_ded'
|
||||||
elif param_name is 'IMAGE':
|
elif param_name == 'IMAGE':
|
||||||
return GuestImages.DEFAULT['guest']
|
return GuestImages.DEFAULT['guest']
|
||||||
else:
|
else:
|
||||||
return None
|
return None
|
||||||
|
@ -1850,7 +1850,7 @@ def modify_host_memory(host, proc, gib_1g=None, gib_4k_range=None,
|
|||||||
args = ''
|
args = ''
|
||||||
if not actual_mems:
|
if not actual_mems:
|
||||||
actual_mems = _get_actual_mems(host=host)
|
actual_mems = _get_actual_mems(host=host)
|
||||||
mib_avail, page_1g = actual_mems[proc]
|
mib_avail, page_1g = actual_mems[str(proc)]
|
||||||
|
|
||||||
if gib_1g is not None:
|
if gib_1g is not None:
|
||||||
page_1g = gib_1g
|
page_1g = gib_1g
|
||||||
@ -3930,7 +3930,7 @@ def get_traffic_control_rates(dev, con_ssh=None):
|
|||||||
|
|
||||||
traffic_classes[class_name] = rates
|
traffic_classes[class_name] = rates
|
||||||
|
|
||||||
LOG.info("Traffic classes for {}: ".format(dev, traffic_classes))
|
LOG.info("Traffic classes for {}: {}".format(dev, traffic_classes))
|
||||||
return traffic_classes
|
return traffic_classes
|
||||||
|
|
||||||
|
|
||||||
@ -4030,8 +4030,7 @@ def get_host_memories(host, headers=('app_hp_avail_2M',), proc_id=None,
|
|||||||
values_all_procs.append(vals)
|
values_all_procs.append(vals)
|
||||||
|
|
||||||
if rtn_dict:
|
if rtn_dict:
|
||||||
values_all_procs = {procs[i]: values_all_procs[i] for i in
|
values_all_procs = {str(procs[i]): values_all_procs[i] for i in range(len(procs))}
|
||||||
range(len(procs))}
|
|
||||||
|
|
||||||
return values_all_procs
|
return values_all_procs
|
||||||
|
|
||||||
|
@ -871,18 +871,18 @@ def add_icmp_and_tcp_rules(security_group, auth_info=Tenant.get('admin'),
|
|||||||
"""
|
"""
|
||||||
security_rules = get_security_group_rules(
|
security_rules = get_security_group_rules(
|
||||||
con_ssh=con_ssh, auth_info=auth_info, group=security_group,
|
con_ssh=con_ssh, auth_info=auth_info, group=security_group,
|
||||||
protocol='ingress', **{'IP Protocol': ('tcp', 'icmp')})
|
**{'IP Protocol': ('tcp', 'icmp')})
|
||||||
if len(security_rules) >= 2:
|
if len(security_rules) >= 3:
|
||||||
LOG.info("Security group rules for {} already exist to allow ping and "
|
LOG.info("Security group rules for {} already exist to allow ping and "
|
||||||
"ssh".format(security_group))
|
"ssh".format(security_group))
|
||||||
return
|
return
|
||||||
|
|
||||||
LOG.info("Create icmp and ssh security group rules for {} with best "
|
LOG.info("Create icmp and ssh security group rules for {} with best "
|
||||||
"effort".format(security_group))
|
"effort".format(security_group))
|
||||||
for rules in (('icmp', None), ('tcp', 22)):
|
for rules in (('icmp', None, None), ('icmp', None, True), ('tcp', 22, None)):
|
||||||
protocol, dst_port = rules
|
protocol, dst_port, egress = rules
|
||||||
create_security_group_rule(group=security_group, protocol=protocol,
|
create_security_group_rule(group=security_group, protocol=protocol,
|
||||||
dst_port=dst_port, fail_ok=True,
|
dst_port=dst_port, egress=egress, fail_ok=True,
|
||||||
auth_info=auth_info, cleanup=cleanup)
|
auth_info=auth_info, cleanup=cleanup)
|
||||||
|
|
||||||
|
|
||||||
@ -2454,7 +2454,7 @@ def get_eth_for_mac(ssh_client, mac_addr, timeout=VMTimeout.IF_ADD,
|
|||||||
end_time = time.time() + timeout
|
end_time = time.time() + timeout
|
||||||
while time.time() < end_time:
|
while time.time() < end_time:
|
||||||
if not vshell:
|
if not vshell:
|
||||||
if mac_addr in ssh_client.exec_cmd('ip addr'.format(mac_addr))[1]:
|
if mac_addr in ssh_client.exec_cmd('ip addr')[1]:
|
||||||
code, output = ssh_client.exec_cmd(
|
code, output = ssh_client.exec_cmd(
|
||||||
'ip addr | grep --color=never -B 1 "{}"'.format(mac_addr))
|
'ip addr | grep --color=never -B 1 "{}"'.format(mac_addr))
|
||||||
# sample output:
|
# sample output:
|
||||||
@ -4770,7 +4770,7 @@ def get_ip_for_eth(ssh_client, eth_name):
|
|||||||
"30.0.0.2"
|
"30.0.0.2"
|
||||||
|
|
||||||
"""
|
"""
|
||||||
if eth_name in ssh_client.exec_cmd('ip addr'.format(eth_name))[1]:
|
if eth_name in ssh_client.exec_cmd('ip addr')[1]:
|
||||||
output = ssh_client.exec_cmd('ip addr show {}'.format(eth_name),
|
output = ssh_client.exec_cmd('ip addr show {}'.format(eth_name),
|
||||||
fail_ok=False)[1]
|
fail_ok=False)[1]
|
||||||
if re.search('inet {}'.format(Networks.IPV4_IP), output):
|
if re.search('inet {}'.format(Networks.IPV4_IP), output):
|
||||||
@ -5597,8 +5597,6 @@ def create_qos(name=None, tenant_name=None, description=None, scheduler=None,
|
|||||||
check_dict['policies'][key] = value
|
check_dict['policies'][key] = value
|
||||||
else:
|
else:
|
||||||
args += " --{} '{}'".format(key, value)
|
args += " --{} '{}'".format(key, value)
|
||||||
if key is 'tenant-id':
|
|
||||||
key = 'tenant_id'
|
|
||||||
check_dict[key] = value
|
check_dict[key] = value
|
||||||
|
|
||||||
LOG.info("Creating QoS with args: {}".format(args))
|
LOG.info("Creating QoS with args: {}".format(args))
|
||||||
@ -5609,7 +5607,7 @@ def create_qos(name=None, tenant_name=None, description=None, scheduler=None,
|
|||||||
|
|
||||||
table_ = table_parser.table(output)
|
table_ = table_parser.table(output)
|
||||||
for key, exp_value in check_dict.items():
|
for key, exp_value in check_dict.items():
|
||||||
if key is 'policies':
|
if key == 'policies':
|
||||||
actual_value = eval(
|
actual_value = eval(
|
||||||
table_parser.get_value_two_col_table(table_, key))
|
table_parser.get_value_two_col_table(table_, key))
|
||||||
else:
|
else:
|
||||||
|
@ -217,7 +217,7 @@ class LdapUserManager(object, metaclass=Singleton):
|
|||||||
(
|
(
|
||||||
'yes',
|
'yes',
|
||||||
# ("{}@{}'s password:".format(user_name, hostname_ip),),
|
# ("{}@{}'s password:".format(user_name, hostname_ip),),
|
||||||
(r".*@.*'s password: ".format(hostname_ip),),
|
(r".*@{}'s password: ".format(hostname_ip),),
|
||||||
('Failed to get password prompt',)
|
('Failed to get password prompt',)
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
|
@ -560,7 +560,7 @@ def add_storage_backend(backend='ceph', ceph_mon_gib='20', ceph_mon_dev=None,
|
|||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if backend is not 'ceph':
|
if backend != 'ceph':
|
||||||
msg = "Invalid backend {} specified. Valid choices are {}".format(
|
msg = "Invalid backend {} specified. Valid choices are {}".format(
|
||||||
backend, ['ceph'])
|
backend, ['ceph'])
|
||||||
if fail_ok:
|
if fail_ok:
|
||||||
@ -742,7 +742,7 @@ def add_ceph_mon(host, con_ssh=None, fail_ok=False):
|
|||||||
else:
|
else:
|
||||||
raise exceptions.HostError(msg)
|
raise exceptions.HostError(msg)
|
||||||
|
|
||||||
cmd = 'ceph-mon-add'.format(host)
|
cmd = 'ceph-mon-add'
|
||||||
|
|
||||||
rc, output = cli.system(cmd, host, ssh_client=con_ssh, fail_ok=fail_ok)
|
rc, output = cli.system(cmd, host, ssh_client=con_ssh, fail_ok=fail_ok)
|
||||||
if rc != 0:
|
if rc != 0:
|
||||||
|
@ -5,6 +5,7 @@ log_print = False
|
|||||||
markers =
|
markers =
|
||||||
sanity: mark test for sanity run
|
sanity: mark test for sanity run
|
||||||
robotsanity: temporary mark for the tests from robotframework
|
robotsanity: temporary mark for the tests from robotframework
|
||||||
|
robotperformance: temporary mark for the tests from robotframework
|
||||||
cpe_sanity: mark tests for cpe sanity
|
cpe_sanity: mark tests for cpe sanity
|
||||||
storage_sanity: mark tests for storage sanity
|
storage_sanity: mark tests for storage sanity
|
||||||
sx_sanity: mark tests for simplex sanity
|
sx_sanity: mark tests for simplex sanity
|
||||||
|
142
automated-pytest-suite/testcases/performance/conftest.py
Executable file
142
automated-pytest-suite/testcases/performance/conftest.py
Executable file
@ -0,0 +1,142 @@
|
|||||||
|
# Do NOT remove following imports. Needed for test fixture discovery purpose
|
||||||
|
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
from pytest import fixture
|
||||||
|
|
||||||
|
from testfixtures.resource_mgmt import delete_resources_func, delete_resources_class, \
|
||||||
|
delete_resources_module, delete_resources_session
|
||||||
|
from testfixtures.recover_hosts import hosts_recover_func, hosts_recover_class
|
||||||
|
from testfixtures.recover_hosts import hosts_recover_module
|
||||||
|
from testfixtures.recover_hosts import HostsToRecover
|
||||||
|
from consts.stx import AppStatus, GuestImages, FlavorSpec
|
||||||
|
from keywords import container_helper, glance_helper, host_helper
|
||||||
|
from keywords import network_helper, nova_helper, vm_helper, system_helper
|
||||||
|
from utils import cli, table_parser
|
||||||
|
from utils.tis_log import LOG
|
||||||
|
from consts.stx import FlavorSpec, GuestImages
|
||||||
|
from testfixtures.verify_fixtures import *
|
||||||
|
from testfixtures.pre_checks_and_configs import *
|
||||||
|
|
||||||
|
CIRROS_PARAMS = {
|
||||||
|
"flavor_name": "f1.small",
|
||||||
|
"flavor_vcpus": 1,
|
||||||
|
"flavor_ram": 2048,
|
||||||
|
"flavor_disk": 60,
|
||||||
|
"properties": {FlavorSpec.MEM_PAGE_SIZE: 'large'},
|
||||||
|
"image_name": "cirros",
|
||||||
|
"image_file": os.path.join(GuestImages.DEFAULT["image_dir"], "cirros-0.4.0-x86_64-disk.img"),
|
||||||
|
"disk_format": "qcow2"
|
||||||
|
}
|
||||||
|
DICT_PARAMS = [CIRROS_PARAMS]
|
||||||
|
|
||||||
|
|
||||||
|
@fixture(params=DICT_PARAMS, scope="module")
|
||||||
|
def create_flavors_and_images(request):
|
||||||
|
# TODO need to check with add_default_specs set to True on baremetal
|
||||||
|
LOG.fixture_step("Creating flavor and image")
|
||||||
|
fl_id = nova_helper.create_flavor(name=request.param['flavor_name'],
|
||||||
|
vcpus=request.param['flavor_vcpus'],
|
||||||
|
ram=request.param['flavor_ram'],
|
||||||
|
root_disk=request.param['flavor_disk'],
|
||||||
|
properties=request.param['properties'], is_public=True,
|
||||||
|
add_default_specs=False, cleanup="module")[1]
|
||||||
|
LOG.error(request.param['image_file'])
|
||||||
|
im_id = glance_helper.create_image(name=request.param['image_name'],
|
||||||
|
source_image_file=request.param['image_file'],
|
||||||
|
disk_format=request.param['disk_format'],
|
||||||
|
cleanup="module")[1]
|
||||||
|
return {
|
||||||
|
"flavor": fl_id,
|
||||||
|
"image": im_id
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@fixture(scope="module")
|
||||||
|
def create_network_performance():
|
||||||
|
"""
|
||||||
|
Create network and subnetwork used in sanity_openstack tests
|
||||||
|
"""
|
||||||
|
LOG.fixture_step("Creating net and subnet")
|
||||||
|
net_id = network_helper.create_network(name="network-1", cleanup="module")[1]
|
||||||
|
subnet_id = network_helper.create_subnet(name="subnet", network="network-1",
|
||||||
|
subnet_range="192.168.0.0/24", dhcp=True,
|
||||||
|
ip_version=4, cleanup="module")[1]
|
||||||
|
return net_id, subnet_id
|
||||||
|
|
||||||
|
|
||||||
|
# this should be modified to call boot_vm_openstack when implemented
|
||||||
|
@fixture(scope="module")
|
||||||
|
def launch_instances(create_flavors_and_images, create_network_performance):
|
||||||
|
LOG.fixture_step("Creating instances")
|
||||||
|
net_id_list = list()
|
||||||
|
net_id_list.append({"net-id": create_network_performance[0]})
|
||||||
|
host = host_helper.get_hypervisors()[0]
|
||||||
|
vm_id = vm_helper.boot_vm(flavor=create_flavors_and_images["flavor"],
|
||||||
|
nics=net_id_list, source="image",
|
||||||
|
source_id=create_flavors_and_images["image"],
|
||||||
|
vm_host=host, cleanup="module")[1]
|
||||||
|
# TODO check power state RUNNING?
|
||||||
|
return vm_id
|
||||||
|
|
||||||
|
# TODO maybe teardown to revert values to older versions
|
||||||
|
@fixture(scope="module")
|
||||||
|
def ovs_dpdk_1_core():
|
||||||
|
LOG.fixture_step("Review the ovs-dpdk vswitch be in just 1 core")
|
||||||
|
vswitch_type = "ovs-dpdk"
|
||||||
|
cpu_function = "vswitch"
|
||||||
|
proc = "0"
|
||||||
|
host_list = host_helper.get_hypervisors()
|
||||||
|
for host in host_list:
|
||||||
|
with host_helper.ssh_to_host(host) as node_ssh:
|
||||||
|
cmd = "cat /proc/meminfo | grep Hugepagesize | awk '{print $2}'"
|
||||||
|
hp = int(node_ssh.exec_cmd(cmd=cmd, fail_ok=False, get_exit_code=False)[1])
|
||||||
|
mem = host_helper.get_host_memories(host=host,
|
||||||
|
headers=("app_hp_avail_2M",
|
||||||
|
"app_hp_avail_1G",
|
||||||
|
"mem_avail(MiB)",
|
||||||
|
"vs_hp_total"))
|
||||||
|
if hp == 1048576:
|
||||||
|
if int(mem[proc][3]) < 2 or mem[proc][1] < 10:
|
||||||
|
HostsToRecover.add(hostnames=host, scope="module")
|
||||||
|
host_helper.lock_host(host=host)
|
||||||
|
if int(mem[proc][3]) < 2:
|
||||||
|
args = ' -f vswitch -1G {} {} {}'.format(2, host, proc)
|
||||||
|
cli.system('host-memory-modify', args)
|
||||||
|
host_helper.modify_host_cpu(host=host, cpu_function=cpu_function,
|
||||||
|
**{"p{}".format(proc): 1})
|
||||||
|
# TODO maybe find a better option than sleep since we can't wait for applyying
|
||||||
|
# container_helper.wait_for_apps_status(apps='stx-openstack',
|
||||||
|
# status=AppStatus.APPLYING)
|
||||||
|
time.sleep(60)
|
||||||
|
container_helper.wait_for_apps_status(apps='stx-openstack',
|
||||||
|
status=AppStatus.APPLIED,
|
||||||
|
check_interval=30)
|
||||||
|
if mem[proc][1] < 10:
|
||||||
|
args = ' -1G {} {} {}'.format(10, host, proc)
|
||||||
|
cli.system('host-memory-modify', args)
|
||||||
|
host_helper.unlock_host(host=host)
|
||||||
|
if hp == 2048:
|
||||||
|
if int(mem[proc][3]) < 512 or mem[proc][0] < 2500:
|
||||||
|
host_helper.lock_host(host=host)
|
||||||
|
if int(mem[proc][3]) < 512:
|
||||||
|
system_helper.modify_system(**{"vswitch_type": vswitch_type})
|
||||||
|
vswitch_args = ' -f vswitch -2M {} {} {}'.format(512, host, proc)
|
||||||
|
cli.system('host-memory-modify', vswitch_args)
|
||||||
|
host_helper.modify_host_cpu(host=host, cpu_function=cpu_function,
|
||||||
|
**{"p{}".format(proc): 1})
|
||||||
|
# TODO maybe find a better option than sleep since we can't wait for applyying
|
||||||
|
# container_helper.wait_for_apps_status(apps='stx-openstack',
|
||||||
|
# status=AppStatus.APPLIED)
|
||||||
|
time.sleep(60)
|
||||||
|
container_helper.wait_for_apps_status(apps='stx-openstack',
|
||||||
|
status=AppStatus.APPLIED,
|
||||||
|
check_interval=30)
|
||||||
|
if mem[proc][0] < 2500:
|
||||||
|
args = ' -2M {} {} {}'.format(2500, host, proc)
|
||||||
|
cli.system('host-memory-modify', args)
|
||||||
|
host_helper.unlock_host(host=host)
|
||||||
|
|
||||||
|
test_table = host_helper.get_host_cpu_list_table(host=host)
|
||||||
|
curr_assigned_function_list = table_parser.get_values(test_table, "assigned_function")
|
||||||
|
assert "vSwitch" in curr_assigned_function_list
|
79
automated-pytest-suite/testcases/performance/test_detect_failed_compute.py
Executable file
79
automated-pytest-suite/testcases/performance/test_detect_failed_compute.py
Executable file
@ -0,0 +1,79 @@
|
|||||||
|
###
|
||||||
|
#
|
||||||
|
# Copyright (c) 2020 Intel Corporation
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
# Performance test to check the time until detection of failed compute
|
||||||
|
###
|
||||||
|
|
||||||
|
import datetime
|
||||||
|
|
||||||
|
from pytest import mark
|
||||||
|
|
||||||
|
from consts.timeout import HostTimeout
|
||||||
|
from consts.stx import HostAdminState, HostOperState, HostAvailState
|
||||||
|
from keywords import system_helper, host_helper, kube_helper
|
||||||
|
from utils.clients import ssh
|
||||||
|
from utils.tis_log import LOG
|
||||||
|
|
||||||
|
|
||||||
|
@mark.robotperformance
|
||||||
|
def test_detect_failed_compute(no_simplex, no_duplex):
|
||||||
|
con_ssh = ssh.ControllerClient.get_active_controller()
|
||||||
|
active_controller = system_helper.get_active_controller_name()
|
||||||
|
compute_host = system_helper.get_computes(administrative=HostAdminState.UNLOCKED,
|
||||||
|
operational=HostOperState.ENABLED,
|
||||||
|
availability=HostAvailState.AVAILABLE)[0]
|
||||||
|
|
||||||
|
compute_su_prompt = r'.*compute\-([0-9]){1,}\:/home/sysadmin#'
|
||||||
|
cmd_get_offset = ("ntpq -p | grep {} -A1 | "
|
||||||
|
"tail -1 | awk '{{print$8}}'".format(active_controller))
|
||||||
|
cmd_magic_keys_enable = ("echo 1 > /proc/sys/kernel/sysrq")
|
||||||
|
cmd_get_start_date = ("python -c \"import datetime; "
|
||||||
|
"print str(datetime.datetime.now())[:-3]\"")
|
||||||
|
cmd_get_end_date = ("cat /var/log/mtcAgent.log | "
|
||||||
|
"grep \"{} MNFA new candidate\" | "
|
||||||
|
"tail -1 | awk '{{print$1}}'".format(compute_host))
|
||||||
|
cmd_trigger_reboot = ("echo b > /proc/sysrq-trigger")
|
||||||
|
|
||||||
|
res = list()
|
||||||
|
|
||||||
|
for i in range(20):
|
||||||
|
LOG.tc_step("Start of iter {}".format(i))
|
||||||
|
st = str()
|
||||||
|
offset = float()
|
||||||
|
with host_helper.ssh_to_host(compute_host) as node_ssh:
|
||||||
|
offset = float(node_ssh.exec_cmd(cmd=cmd_get_offset, get_exit_code=False)[1])/1000
|
||||||
|
node_ssh.send_sudo(cmd="su")
|
||||||
|
node_ssh.expect(compute_su_prompt)
|
||||||
|
node_ssh.send_sudo(cmd=cmd_magic_keys_enable)
|
||||||
|
node_ssh.expect(compute_su_prompt)
|
||||||
|
st = node_ssh.exec_cmd(cmd=cmd_get_start_date, get_exit_code=False,
|
||||||
|
blob=compute_su_prompt)[1]
|
||||||
|
node_ssh.exec_sudo_cmd(cmd_trigger_reboot, get_exit_code=False)
|
||||||
|
|
||||||
|
system_helper.wait_for_hosts_states(compute_host, check_interval=20,
|
||||||
|
availability=HostAvailState.AVAILABLE)
|
||||||
|
pods_health = kube_helper.wait_for_pods_healthy(check_interval=20,
|
||||||
|
timeout=HostTimeout.REBOOT)
|
||||||
|
assert pods_health is True, "Check PODs health has failed"
|
||||||
|
|
||||||
|
st_date = datetime.datetime.fromtimestamp(
|
||||||
|
datetime.datetime.strptime(st, '%Y-%m-%d %H:%M:%S.%f').timestamp() - offset)
|
||||||
|
et = con_ssh.exec_cmd(cmd=cmd_get_end_date, get_exit_code=False)[1]
|
||||||
|
et_date = datetime.datetime.strptime(et, '%Y-%m-%dT%H:%M:%S.%f')
|
||||||
|
diff = et_date - st_date
|
||||||
|
LOG.info("\noffset = {}\nstart time = {}\nend time = {}".format(offset, st, et))
|
||||||
|
LOG.info("\ndiff = {}".format(diff))
|
||||||
|
res.append(diff)
|
||||||
|
|
||||||
|
def calc_avg(lst):
|
||||||
|
rtrn_sum = datetime.timedelta()
|
||||||
|
for i in lst:
|
||||||
|
LOG.info("Iter {}: {}".format(lst.index(i), i))
|
||||||
|
rtrn_sum += i
|
||||||
|
return rtrn_sum/len(lst)
|
||||||
|
|
||||||
|
final_res = calc_avg(res)
|
||||||
|
LOG.info("Avg time is : {}".format(final_res))
|
@ -0,0 +1,90 @@
|
|||||||
|
###
|
||||||
|
#
|
||||||
|
# Copyright (c) 2020 Intel Corporation
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
# Performance test to check the time untill detection of failed controller
|
||||||
|
###
|
||||||
|
|
||||||
|
import datetime
|
||||||
|
|
||||||
|
from pytest import mark
|
||||||
|
|
||||||
|
from consts.timeout import HostTimeout
|
||||||
|
from consts.stx import HostAvailState
|
||||||
|
from keywords import system_helper, host_helper, kube_helper
|
||||||
|
from utils.clients import ssh
|
||||||
|
from utils.tis_log import LOG
|
||||||
|
|
||||||
|
|
||||||
|
@mark.robotperformance
|
||||||
|
def test_detect_failed_controller(no_simplex):
|
||||||
|
con_ssh = ssh.ControllerClient.get_active_controller()
|
||||||
|
active_controller, controller_host = system_helper.get_active_standby_controllers()
|
||||||
|
|
||||||
|
controller_su_prompt = r'.*controller\-([0-9]){1,}\:/home/sysadmin#'
|
||||||
|
cmd_get_offset = ("ntpq -p | grep {} -A1 | "
|
||||||
|
"tail -1 | awk '{{print$8}}'".format(active_controller))
|
||||||
|
cmd_magic_keys_enable = ("echo 1 > /proc/sys/kernel/sysrq")
|
||||||
|
cmd_get_start_date = ("python -c \"import datetime; "
|
||||||
|
"print str(datetime.datetime.now())[:-3]\"")
|
||||||
|
cmd_get_end_date = ("cat /var/log/mtcAgent.log | "
|
||||||
|
"grep --color=never \"{} MNFA new candidate\" | "
|
||||||
|
"tail -1 | awk '{{print$1}}'".format(controller_host))
|
||||||
|
cmd_get_recovered_date = ("cat /var/log/mtcAgent.log | "
|
||||||
|
"grep --color=never '{} unlocked-enabled-available' | "
|
||||||
|
"tail -1 | awk '{{print$1}}'".format(controller_host))
|
||||||
|
cmd_trigger_reboot = ("echo b > /proc/sysrq-trigger")
|
||||||
|
|
||||||
|
res = list()
|
||||||
|
rec_res = list()
|
||||||
|
|
||||||
|
for i in range(20):
|
||||||
|
LOG.tc_step("Start of iter {}".format(i))
|
||||||
|
st = str()
|
||||||
|
offset = float()
|
||||||
|
with host_helper.ssh_to_host(controller_host) as node_ssh:
|
||||||
|
offset = float(node_ssh.exec_cmd(cmd=cmd_get_offset, get_exit_code=False)[1])/1000
|
||||||
|
node_ssh.send_sudo(cmd="su")
|
||||||
|
node_ssh.expect(controller_su_prompt)
|
||||||
|
node_ssh.send_sudo(cmd=cmd_magic_keys_enable)
|
||||||
|
node_ssh.expect(controller_su_prompt)
|
||||||
|
st = node_ssh.exec_cmd(cmd=cmd_get_start_date, get_exit_code=False,
|
||||||
|
blob=controller_su_prompt)[1]
|
||||||
|
node_ssh.exec_sudo_cmd(cmd_trigger_reboot, get_exit_code=False)
|
||||||
|
|
||||||
|
system_helper.wait_for_hosts_states(controller_host, check_interval=20,
|
||||||
|
availability=HostAvailState.AVAILABLE)
|
||||||
|
pods_health = kube_helper.wait_for_pods_healthy(check_interval=20,
|
||||||
|
timeout=HostTimeout.REBOOT)
|
||||||
|
assert pods_health is True, "Check PODs health has failed"
|
||||||
|
|
||||||
|
st_date = datetime.datetime.fromtimestamp(
|
||||||
|
datetime.datetime.strptime(st, '%Y-%m-%d %H:%M:%S.%f').timestamp() - offset)
|
||||||
|
et = con_ssh.exec_cmd(cmd=cmd_get_end_date, get_exit_code=False)[1]
|
||||||
|
et_date = datetime.datetime.strptime(et, '%Y-%m-%dT%H:%M:%S.%f')
|
||||||
|
er = con_ssh.exec_cmd(cmd=cmd_get_recovered_date, get_exit_code=False)[1]
|
||||||
|
er_date = datetime.datetime.strptime(er, '%Y-%m-%dT%H:%M:%S.%f')
|
||||||
|
diff = et_date - st_date
|
||||||
|
rec_diff = er_date - st_date
|
||||||
|
LOG.info(("\noffset = {}\n"
|
||||||
|
"start time = {}\n"
|
||||||
|
"end time = {}\n"
|
||||||
|
"recover time = {}".format(offset, st, et, er)))
|
||||||
|
LOG.info("\ndiff = {}".format(diff))
|
||||||
|
LOG.info("\nrecover diff = {}".format(rec_diff))
|
||||||
|
res.append(diff)
|
||||||
|
rec_res.append(rec_diff)
|
||||||
|
|
||||||
|
def calc_avg(lst):
|
||||||
|
rtrn_sum = datetime.timedelta()
|
||||||
|
for i in lst:
|
||||||
|
LOG.info("Iter {}: {}".format(lst.index(i), i))
|
||||||
|
rtrn_sum += i
|
||||||
|
return rtrn_sum/len(lst)
|
||||||
|
|
||||||
|
final_res = calc_avg(res)
|
||||||
|
final_rec_res = calc_avg(rec_res)
|
||||||
|
LOG.info("Avg time is : {}".format(final_res))
|
||||||
|
LOG.info("Avg rec time is : {}".format(final_rec_res))
|
98
automated-pytest-suite/testcases/performance/test_detection_failed_vm.py
Executable file
98
automated-pytest-suite/testcases/performance/test_detection_failed_vm.py
Executable file
@ -0,0 +1,98 @@
|
|||||||
|
###
|
||||||
|
#
|
||||||
|
# Copyright (c) 2020 Intel Corporation
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
# Performance test to check the time untill detection of failed VM
|
||||||
|
###
|
||||||
|
|
||||||
|
import datetime
|
||||||
|
import re
|
||||||
|
import time
|
||||||
|
from pytest import mark
|
||||||
|
|
||||||
|
from consts.auth import HostLinuxUser
|
||||||
|
from keywords import host_helper, vm_helper
|
||||||
|
from utils import exceptions
|
||||||
|
from utils.clients import ssh
|
||||||
|
from utils.tis_log import LOG
|
||||||
|
|
||||||
|
|
||||||
|
@mark.robotperformance
|
||||||
|
def test_detection_of_failed_instance(launch_instances):
|
||||||
|
con_ssh = ssh.ControllerClient.get_active_controller()
|
||||||
|
start_date_cmd = ("python -c \"import datetime; "
|
||||||
|
"print str(datetime.datetime.now())[:-3]\"")
|
||||||
|
kill_cmd = (start_date_cmd + "&& sudo pkill -SIGKILL qemu")
|
||||||
|
vm_host = vm_helper.get_vm_host(launch_instances)
|
||||||
|
vm_name = vm_helper.get_vm_name_from_id(launch_instances)
|
||||||
|
end_date_cmd = ("grep -r \"{}\" /var/log/nfv-vim.log | "
|
||||||
|
"grep \"powering-off\" | "
|
||||||
|
"tail -1 | "
|
||||||
|
"awk '{{print$1}}'".format(vm_name))
|
||||||
|
|
||||||
|
res = list()
|
||||||
|
|
||||||
|
for i in range(20):
|
||||||
|
LOG.tc_step("Start of iter {}".format(i))
|
||||||
|
try:
|
||||||
|
st = str()
|
||||||
|
et = str()
|
||||||
|
|
||||||
|
vm_helper.get_vms()
|
||||||
|
|
||||||
|
with host_helper.ssh_to_host(vm_host, con_ssh=con_ssh) as con_0_ssh:
|
||||||
|
end_time = time.time() + 120
|
||||||
|
while time.time() < end_time:
|
||||||
|
con_0_ssh.send(cmd="pgrep qemu")
|
||||||
|
con_0_ssh.expect()
|
||||||
|
matches = re.findall("\n([0-9]+)\n", con_0_ssh.cmd_output)
|
||||||
|
time.sleep(5)
|
||||||
|
if matches:
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
raise exceptions.TimeoutException("Timed out waiting for qemu process")
|
||||||
|
|
||||||
|
con_0_ssh.send(cmd=kill_cmd)
|
||||||
|
index = con_0_ssh.expect(["Password:", con_0_ssh.prompt])
|
||||||
|
st = con_0_ssh.cmd_output.splitlines()[1]
|
||||||
|
if index == 0:
|
||||||
|
con_0_ssh.send(HostLinuxUser.get_password())
|
||||||
|
con_0_ssh.expect()
|
||||||
|
|
||||||
|
st_date = datetime.datetime.strptime(st, '%Y-%m-%d %H:%M:%S.%f')
|
||||||
|
et_date = None
|
||||||
|
|
||||||
|
end_time = time.time() + 120
|
||||||
|
while time.time() < end_time:
|
||||||
|
et = con_ssh.exec_cmd(cmd=end_date_cmd)[1]
|
||||||
|
try:
|
||||||
|
et_date = datetime.datetime.strptime(et, '%Y-%m-%dT%H:%M:%S.%f')
|
||||||
|
if et_date < st_date:
|
||||||
|
time.sleep(5)
|
||||||
|
continue
|
||||||
|
except ValueError:
|
||||||
|
time.sleep(5)
|
||||||
|
continue
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
raise exceptions.TimeoutException("Timed out waiting for end time")
|
||||||
|
|
||||||
|
diff = et_date - st_date
|
||||||
|
LOG.info("\nstart time = {}\nend time = {}".format(st, et))
|
||||||
|
LOG.info("\ndiff = {}".format(diff))
|
||||||
|
res.append(diff)
|
||||||
|
finally:
|
||||||
|
time.sleep(5)
|
||||||
|
vm_helper.start_vms(launch_instances)
|
||||||
|
|
||||||
|
def calc_avg(lst):
|
||||||
|
rtrn_sum = datetime.timedelta()
|
||||||
|
for i in lst:
|
||||||
|
LOG.info("Iter {}: {}".format(lst.index(i), i))
|
||||||
|
rtrn_sum += i
|
||||||
|
return rtrn_sum/len(lst)
|
||||||
|
|
||||||
|
final_res = calc_avg(res)
|
||||||
|
LOG.info("Avg time is : {}".format(final_res))
|
@ -0,0 +1,79 @@
|
|||||||
|
###
|
||||||
|
#
|
||||||
|
# Copyright (c) 2020 Intel Corporation
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
# Performance test dpdk live migrate latency
|
||||||
|
###
|
||||||
|
|
||||||
|
import datetime
|
||||||
|
from pytest import mark
|
||||||
|
|
||||||
|
from keywords import vm_helper
|
||||||
|
from utils import cli
|
||||||
|
from utils.clients import ssh
|
||||||
|
from utils.tis_log import LOG
|
||||||
|
|
||||||
|
|
||||||
|
@mark.robotperformance
|
||||||
|
def test_dpdk_live_migrate_latency(ovs_dpdk_1_core, launch_instances, no_simplex, no_duplex):
|
||||||
|
con_ssh = ssh.ControllerClient.get_active_controller()
|
||||||
|
prev_st = None
|
||||||
|
prev_et = None
|
||||||
|
res = list()
|
||||||
|
|
||||||
|
for i in range(20):
|
||||||
|
LOG.tc_step("Start of iter {}".format(i))
|
||||||
|
vm_host = vm_helper.get_vm_host(launch_instances)
|
||||||
|
cmd_get_pod_name = ("kubectl get pods -n openstack | "
|
||||||
|
"grep --color=never nova-compute-{} | "
|
||||||
|
"awk '{{print$1}}'".format(vm_host))
|
||||||
|
pod_name = con_ssh.exec_cmd(cmd=cmd_get_pod_name)[1].rstrip().lstrip()
|
||||||
|
cmd_get_start_date = ("kubectl -n openstack logs {} -c nova-compute | "
|
||||||
|
"grep --color=never 'instance: {}' | "
|
||||||
|
"grep --color=never 'pre_live_migration on destination host' | "
|
||||||
|
"tail -1 | "
|
||||||
|
"awk '{{print $1 \" \" $2}}'".format(pod_name, launch_instances))
|
||||||
|
cmd_get_end_date = ("kubectl -n openstack logs {} -c nova-compute | "
|
||||||
|
"grep --color=never 'instance: {}' | "
|
||||||
|
"egrep --color=never "
|
||||||
|
"'Migrating instance to [a-zA-Z]+-[0-9] finished successfully' | "
|
||||||
|
"tail -1 | "
|
||||||
|
"awk '{{print $1 \" \" $2}}'".format(pod_name, launch_instances))
|
||||||
|
|
||||||
|
vm_helper.live_migrate_vm(vm_id=launch_instances)
|
||||||
|
|
||||||
|
st = con_ssh.exec_cmd(cmd=cmd_get_start_date)[1]
|
||||||
|
et = con_ssh.exec_cmd(cmd=cmd_get_end_date)[1]
|
||||||
|
st_date = datetime.datetime.strptime(st, '%Y-%m-%d %H:%M:%S.%f')
|
||||||
|
et_date = datetime.datetime.strptime(et, '%Y-%m-%d %H:%M:%S.%f')
|
||||||
|
if i == 0:
|
||||||
|
prev_st = st_date
|
||||||
|
prev_et = et_date
|
||||||
|
elif i > 0:
|
||||||
|
if st_date <= prev_st or et_date <= prev_et:
|
||||||
|
msg = ("new start time {} is less "
|
||||||
|
"or equal than old start time {}\n"
|
||||||
|
"or new end time {} is less "
|
||||||
|
"or equal than old end time "
|
||||||
|
"{}".format(st_date, prev_st, et_date, prev_et))
|
||||||
|
LOG.error(msg)
|
||||||
|
raise Exception(msg)
|
||||||
|
else:
|
||||||
|
prev_st = st_date
|
||||||
|
prev_et = et_date
|
||||||
|
diff = et_date - st_date
|
||||||
|
LOG.info("\nstart time = {}\nend time = {}".format(st, et))
|
||||||
|
LOG.info("\ndiff = {}".format(diff))
|
||||||
|
res.append(diff)
|
||||||
|
|
||||||
|
def calc_avg(lst):
|
||||||
|
rtrn_sum = datetime.timedelta()
|
||||||
|
for i in lst:
|
||||||
|
LOG.info("Iter {}: {}".format(lst.index(i), i))
|
||||||
|
rtrn_sum += i
|
||||||
|
return rtrn_sum/len(lst)
|
||||||
|
|
||||||
|
final_res = calc_avg(res)
|
||||||
|
LOG.info("Avg time is : {}".format(final_res))
|
@ -0,0 +1,42 @@
|
|||||||
|
###
|
||||||
|
#
|
||||||
|
# Copyright (c) 2020 Intel Corporation
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
# Measure OS latency. The term latency, when used in the context of the RT Kernel,
|
||||||
|
# is the time interval between the occurrence of an event and the time when that event is "handled"
|
||||||
|
#
|
||||||
|
###
|
||||||
|
|
||||||
|
from pytest import mark
|
||||||
|
|
||||||
|
from keywords import host_helper
|
||||||
|
from utils.tis_log import LOG
|
||||||
|
|
||||||
|
|
||||||
|
@mark.robotperformance
|
||||||
|
@mark.skip(reason="In Progress")
|
||||||
|
def test_host_guest_latency():
|
||||||
|
with host_helper.ssh_to_host("compute-0") as node_ssh:
|
||||||
|
# cmd = "cyclictest -m -n -p 95 –D 12h -h 20 -a 3-5 -t 3"
|
||||||
|
cmd = "cyclictest -m -n -p 95 -l 1000 -h 20 -a 1-2 -t 3"
|
||||||
|
res = node_ssh.exec_sudo_cmd(cmd=cmd, fail_ok=False, expect_timeout=15)[1]
|
||||||
|
LOG.info("res = {}".format(res))
|
||||||
|
for line in res.splitlines():
|
||||||
|
if "Min Latencies:" in line:
|
||||||
|
min_lat = line.split(":")[1]
|
||||||
|
t1_min, t2_min, t3_min = min_lat.split()
|
||||||
|
elif "Avg Latencies:" in line:
|
||||||
|
avg_lat = line.split(":")[1]
|
||||||
|
t1_avg, t2_avg, t3_avg = avg_lat.split()
|
||||||
|
elif "Max Latencies:" in line:
|
||||||
|
max_lat = line.split(":")[1]
|
||||||
|
t1_max, t2_max, t3_max = max_lat.split()
|
||||||
|
LOG.info("\n"
|
||||||
|
"\tT1\tT2\tT3\n"
|
||||||
|
"min:\t{}\t{}\t{}\n"
|
||||||
|
"avg:\t{}\t{}\t{}\n"
|
||||||
|
"max:\t{}\t{}\t{}\n".format(t1_min, t2_min, t3_min,
|
||||||
|
t1_avg, t2_avg, t3_avg,
|
||||||
|
t1_max, t2_max, t3_max))
|
80
automated-pytest-suite/testcases/performance/test_measure_swact_recover.py
Executable file
80
automated-pytest-suite/testcases/performance/test_measure_swact_recover.py
Executable file
@ -0,0 +1,80 @@
|
|||||||
|
###
|
||||||
|
#
|
||||||
|
# Copyright (c) 2020 Intel Corporation
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
# Performance test to check measure how fast the standby controller
|
||||||
|
# becomes active and keeps the whole system running as usual
|
||||||
|
###
|
||||||
|
|
||||||
|
import datetime
|
||||||
|
|
||||||
|
from pytest import mark
|
||||||
|
|
||||||
|
from keywords import host_helper, system_helper, kube_helper
|
||||||
|
from utils.clients import ssh
|
||||||
|
from utils.tis_log import LOG
|
||||||
|
|
||||||
|
|
||||||
|
@mark.robotperformance
|
||||||
|
def test_measure_swact_recover(no_simplex):
|
||||||
|
cmd_get_start_date = ("python -c \"import datetime; "
|
||||||
|
"print str(datetime.datetime.now())[:-3]\"")
|
||||||
|
|
||||||
|
res = list()
|
||||||
|
|
||||||
|
try:
|
||||||
|
for i in range(2):
|
||||||
|
LOG.tc_step("Start of iter {}".format(i))
|
||||||
|
con_ssh = ssh.ControllerClient.get_active_controller()
|
||||||
|
|
||||||
|
LOG.info("Get active/standby controllers")
|
||||||
|
active_controller, standby_controller = system_helper.get_active_standby_controllers()
|
||||||
|
|
||||||
|
cmd_get_offset = ("ntpq -p | grep {} -A1 | "
|
||||||
|
"tail -1 | awk '{{print$8}}'".format(active_controller))
|
||||||
|
cmd_get_start_date = ("cat /var/log/mtcAgent.log | "
|
||||||
|
"grep \"{} Action=swact\" | "
|
||||||
|
"tail -1 | awk '{{print$1}}'".format(active_controller))
|
||||||
|
cmd_get_end_date = ("cat /var/log/mtcAgent.log | "
|
||||||
|
"grep \"{} Task: Swact: Complete\" | "
|
||||||
|
"tail -1 | awk '{{print$1}}'".format(active_controller))
|
||||||
|
|
||||||
|
LOG.info("Start swact action")
|
||||||
|
host_helper.swact_host(hostname=active_controller)
|
||||||
|
kube_helper.wait_for_nodes_ready(
|
||||||
|
hosts=(active_controller, standby_controller),
|
||||||
|
check_interval=20)
|
||||||
|
|
||||||
|
LOG.info("Calculate swact time")
|
||||||
|
con_ssh = ssh.ControllerClient.get_active_controller()
|
||||||
|
with host_helper.ssh_to_host(active_controller, con_ssh=con_ssh) as con_0_ssh:
|
||||||
|
con_0_ssh.exec_cmd(cmd="cat /var/log/mtcAgent.log", get_exit_code=False)
|
||||||
|
st = con_0_ssh.exec_cmd(cmd=cmd_get_start_date, get_exit_code=False)[1]
|
||||||
|
st_date = datetime.datetime.strptime(st, '%Y-%m-%dT%H:%M:%S.%f')
|
||||||
|
offset = float(con_ssh.exec_cmd(cmd=cmd_get_offset, get_exit_code=False)[1])/1000
|
||||||
|
et = con_ssh.exec_cmd(cmd=cmd_get_end_date, get_exit_code=False)[1]
|
||||||
|
et_date = datetime.datetime.fromtimestamp(
|
||||||
|
datetime.datetime.strptime(et, '%Y-%m-%dT%H:%M:%S.%f').timestamp() - offset)
|
||||||
|
diff = et_date - st_date
|
||||||
|
LOG.info("\nstart time = {}\nend time = {}".format(st, et))
|
||||||
|
LOG.info("\ndiff = {}".format(diff))
|
||||||
|
res.append(diff)
|
||||||
|
finally:
|
||||||
|
active_controller, standby_controller = system_helper.get_active_standby_controllers()
|
||||||
|
if active_controller != "controller-0":
|
||||||
|
host_helper.swact_host(hostname=active_controller)
|
||||||
|
kube_helper.wait_for_nodes_ready(
|
||||||
|
hosts=(active_controller, standby_controller),
|
||||||
|
check_interval=20)
|
||||||
|
|
||||||
|
def calc_avg(lst):
|
||||||
|
rtrn_sum = datetime.timedelta()
|
||||||
|
for i in lst:
|
||||||
|
LOG.info("Iter {}: {}".format(lst.index(i), i))
|
||||||
|
rtrn_sum += i
|
||||||
|
return rtrn_sum/len(lst)
|
||||||
|
|
||||||
|
final_res = calc_avg(res)
|
||||||
|
LOG.info("Avg time is : {}".format(final_res))
|
@ -0,0 +1,132 @@
|
|||||||
|
###
|
||||||
|
#
|
||||||
|
# Copyright (c) 2020 Intel Corporation
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
# Performance test to measure the passthrough value of moving
|
||||||
|
# a file of 1 GB size from one instance to another with
|
||||||
|
# open source virtual switch with DPDK running in 1 core.
|
||||||
|
#
|
||||||
|
###
|
||||||
|
|
||||||
|
from pytest import mark, fixture
|
||||||
|
|
||||||
|
from keywords import host_helper, keystone_helper, network_helper, vm_helper
|
||||||
|
from utils.tis_log import LOG
|
||||||
|
|
||||||
|
FILE_SIZE = 10 # 10G
|
||||||
|
IMAGE_USER = 'cirros'
|
||||||
|
IMAGE_PASS = 'gocubsgo'
|
||||||
|
|
||||||
|
|
||||||
|
# TODO move to utils?
|
||||||
|
def get_host_and_ns(netid, host_list):
|
||||||
|
for host in host_list:
|
||||||
|
with host_helper.ssh_to_host(host) as node_ssh:
|
||||||
|
cmd = 'ip netns | grep --color=never {}'.format(netid)
|
||||||
|
ns = node_ssh.exec_cmd(cmd=cmd)[1]
|
||||||
|
if ns and netid in ns.split()[0]:
|
||||||
|
return (host, ns.split()[0])
|
||||||
|
return (None, None)
|
||||||
|
|
||||||
|
@fixture(scope="module")
|
||||||
|
def create_instances(create_flavors_and_images, create_network_performance):
|
||||||
|
LOG.fixture_step("Creating instances")
|
||||||
|
net_id_list = list()
|
||||||
|
net_id_list.append({"net-id": create_network_performance[0]})
|
||||||
|
host = host_helper.get_hypervisors()[1]
|
||||||
|
vm_id_1 = vm_helper.boot_vm(flavor=create_flavors_and_images["flavor"],
|
||||||
|
nics=net_id_list, source="image",
|
||||||
|
source_id=create_flavors_and_images["image"],
|
||||||
|
vm_host=host, cleanup="module")[1]
|
||||||
|
vm_id_2 = vm_helper.boot_vm(flavor=create_flavors_and_images["flavor"],
|
||||||
|
nics=net_id_list, source="image",
|
||||||
|
source_id=create_flavors_and_images["image"],
|
||||||
|
vm_host=host, cleanup="module")[1]
|
||||||
|
vm_ip_1 = vm_helper.get_vm_values(vm_id=vm_id_1, fields='addresses')[0].split("=")[1]
|
||||||
|
vm_ip_2 = vm_helper.get_vm_values(vm_id=vm_id_2, fields='addresses')[0].split("=")[1]
|
||||||
|
return {"vm_id_1": vm_id_1,
|
||||||
|
"vm_id_2": vm_id_2,
|
||||||
|
"vm_ip_1": vm_ip_1,
|
||||||
|
"vm_ip_2": vm_ip_2}
|
||||||
|
|
||||||
|
|
||||||
|
@mark.robotperformance
|
||||||
|
def test_vswitch_line_rate_1core(ovs_dpdk_1_core, create_instances, create_network_performance,
|
||||||
|
no_simplex, no_duplex):
|
||||||
|
|
||||||
|
LOG.tc_step("Add icmp and tcp rules")
|
||||||
|
project_id = keystone_helper.get_projects(name='admin')[0]
|
||||||
|
security_group = network_helper.get_security_groups(project=project_id)[0]
|
||||||
|
network_helper.add_icmp_and_tcp_rules(security_group=security_group)
|
||||||
|
|
||||||
|
LOG.tc_step("Get original vswitch_type and assigned_function properties")
|
||||||
|
host_list = host_helper.get_hypervisors()
|
||||||
|
|
||||||
|
LOG.tc_step("Sync instance with compute containing ns for ssh")
|
||||||
|
host, ns = get_host_and_ns(netid=create_network_performance[0], host_list=host_list)
|
||||||
|
assert ns is not None, "namespace not found on host list {}".format(host_list)
|
||||||
|
if host_list[1] != host:
|
||||||
|
vm_helper.live_migrate_vm(vm_id=create_instances["vm_id_1"], destination_host=host)
|
||||||
|
vm_helper.live_migrate_vm(vm_id=create_instances["vm_id_2"], destination_host=host)
|
||||||
|
|
||||||
|
LOG.tc_step("Connect to compute node containing images")
|
||||||
|
with host_helper.ssh_to_host(host) as node_ssh:
|
||||||
|
LOG.tc_step("Create huge file on {}".format(create_instances["vm_id_1"]))
|
||||||
|
ssh_cmd = ('ip netns exec {}'
|
||||||
|
' ssh-keygen -R "{}"'
|
||||||
|
''.format(ns, create_instances["vm_ip_1"]))
|
||||||
|
node_ssh.send_sudo(cmd=ssh_cmd)
|
||||||
|
node_ssh.expect()
|
||||||
|
ssh_cmd = ('ip netns exec {} '
|
||||||
|
'ssh -o StrictHostKeyChecking=no '
|
||||||
|
'{}@{} "dd if=/dev/zero of=/tmp/test_file count={} bs=1G"'
|
||||||
|
''.format(ns,
|
||||||
|
IMAGE_USER,
|
||||||
|
create_instances["vm_ip_1"],
|
||||||
|
FILE_SIZE))
|
||||||
|
node_ssh.send_sudo(cmd=ssh_cmd)
|
||||||
|
node_ssh.expect(['password:', 'Password:'], timeout=10, searchwindowsize=100)
|
||||||
|
node_ssh.send(cmd=IMAGE_PASS)
|
||||||
|
index = node_ssh.expect([r'{}\+0 records out'.format(FILE_SIZE)], timeout=180)
|
||||||
|
assert index == 0, "File created successfully"
|
||||||
|
|
||||||
|
LOG.tc_step("Copy created file from {} to {}".format(create_instances["vm_id_1"],
|
||||||
|
create_instances["vm_id_2"]))
|
||||||
|
|
||||||
|
res = list()
|
||||||
|
|
||||||
|
for i in range(2):
|
||||||
|
LOG.tc_step("Start of iter {}".format(i))
|
||||||
|
ssh_cmd = ('ip netns exec {}'
|
||||||
|
' ssh-keygen -R "{}"'
|
||||||
|
''.format(ns, create_instances["vm_ip_1"]))
|
||||||
|
node_ssh.send_sudo(cmd=ssh_cmd)
|
||||||
|
node_ssh.expect()
|
||||||
|
ssh_cmd = ('ip netns exec {} '
|
||||||
|
'ssh -o StrictHostKeyChecking=no '
|
||||||
|
'{}@{} "ls -lrtsh /tmp/test_file;'
|
||||||
|
' echo start=$(date +%s%N);'
|
||||||
|
' time scp -vvv /tmp/test_file {}@{};'
|
||||||
|
' echo end=$(date +%s%N)"'
|
||||||
|
''.format(ns,
|
||||||
|
IMAGE_USER,
|
||||||
|
create_instances["vm_ip_1"],
|
||||||
|
IMAGE_USER,
|
||||||
|
create_instances["vm_ip_2"]))
|
||||||
|
node_ssh.send_sudo(cmd=ssh_cmd)
|
||||||
|
node_ssh.expect(['password:', 'Password:'], timeout=10, searchwindowsize=100)
|
||||||
|
node_ssh.send(cmd=IMAGE_PASS)
|
||||||
|
index = node_ssh.expect(timeout=120)
|
||||||
|
assert index == 0, "File tranfered successfully"
|
||||||
|
real_time = None
|
||||||
|
for line in node_ssh.cmd_output.split("\n"):
|
||||||
|
if "real" in line:
|
||||||
|
real_time = int(line.split()[1][:1]) * 60 + float(line.split()[2][:-1])
|
||||||
|
LOG.info("real time = {}".format(real_time))
|
||||||
|
rate = FILE_SIZE * 1000 / real_time
|
||||||
|
res.append(rate)
|
||||||
|
|
||||||
|
final_res = sum(res) / len(res)
|
||||||
|
LOG.info("Avg time is : {} MB/s".format(round(final_res, 3)))
|
@ -11,7 +11,7 @@ VALID_SCOPES = ['function', 'class', 'module', 'session']
|
|||||||
_RESOURCE_TYPES = ['vm', 'volume', 'volume_type', 'volume_qos',
|
_RESOURCE_TYPES = ['vm', 'volume', 'volume_type', 'volume_qos',
|
||||||
'flavor', 'image', 'server_group', 'router',
|
'flavor', 'image', 'server_group', 'router',
|
||||||
'subnet', 'floating_ip', 'heat_stack', 'port',
|
'subnet', 'floating_ip', 'heat_stack', 'port',
|
||||||
'trunk', 'network', 'security_group_rule'
|
'trunk', 'network', 'security_group_rule',
|
||||||
'security_group', 'network_qos', 'vol_snapshot', 'aggregate',
|
'security_group', 'network_qos', 'vol_snapshot', 'aggregate',
|
||||||
'port_pair', 'port_pair_group', 'flow_classifier',
|
'port_pair', 'port_pair_group', 'flow_classifier',
|
||||||
'port_chain', 'datanetwork', 'providernet']
|
'port_chain', 'datanetwork', 'providernet']
|
||||||
|
@ -72,6 +72,13 @@ def no_simplex():
|
|||||||
skip(SkipSysType.SIMPLEX_SYSTEM)
|
skip(SkipSysType.SIMPLEX_SYSTEM)
|
||||||
|
|
||||||
|
|
||||||
|
@fixture(scope='session')
|
||||||
|
def no_duplex():
|
||||||
|
LOG.fixture_step("(Session) Skip if Duplex")
|
||||||
|
if system_helper.is_aio_duplex():
|
||||||
|
skip(SkipSysType.DUPLEX_SYSTEM)
|
||||||
|
|
||||||
|
|
||||||
@fixture(scope='session')
|
@fixture(scope='session')
|
||||||
def simplex_only():
|
def simplex_only():
|
||||||
LOG.fixture_step("(Session) Skip if not Simplex")
|
LOG.fixture_step("(Session) Skip if not Simplex")
|
||||||
|
@ -184,7 +184,7 @@ class TelnetClient(Telnet):
|
|||||||
if char.isalpha() or char in valid_chars:
|
if char.isalpha() or char in valid_chars:
|
||||||
code = chr(ord(char.upper()) - 64)
|
code = chr(ord(char.upper()) - 64)
|
||||||
else:
|
else:
|
||||||
raise NotImplemented("ctrl+{} is not supported".format(char))
|
raise NotImplementedError("ctrl+{} is not supported".format(char))
|
||||||
self.logger.info("Send: ctrl+{}".format(char))
|
self.logger.info("Send: ctrl+{}".format(char))
|
||||||
self.write(code.encode())
|
self.write(code.encode())
|
||||||
|
|
||||||
|
@ -164,7 +164,7 @@ def parse_test_steps(log_dir, failures_only=True):
|
|||||||
if "Setup started for:" in line:
|
if "Setup started for:" in line:
|
||||||
if failures_only:
|
if failures_only:
|
||||||
split_line = line.split('::test_', 1)
|
split_line = line.split('::test_', 1)
|
||||||
if len(split_line) is 2:
|
if len(split_line) == 2:
|
||||||
test_name = 'test_' + split_line[1].replace('\n',
|
test_name = 'test_' + split_line[1].replace('\n',
|
||||||
'')
|
'')
|
||||||
if test_name in failed_tests:
|
if test_name in failed_tests:
|
||||||
|
Loading…
Reference in New Issue
Block a user