make use of csv files with container state checks
def assert_equal_containers_state(expected_containers_list=None, timeout=120, interval=2, recreate_expected=False): """compare all overcloud container states with using two lists: one is current , the other some past list first time this method runs it creates a file holding overcloud containers' states: /home/stack/expected_containers_list_df.csv' second time it creates a current containers states list and compares them, they must be identical""" Change-Id: Ib750bc6f512afddb6067d3dbaac4820bb43a6ed7
This commit is contained in:
parent
3e8d7c7549
commit
e67421603c
@ -9,7 +9,6 @@ from tobiko.tests.faults.ha import cloud_disruptions
|
|||||||
from tobiko.tripleo import pacemaker
|
from tobiko.tripleo import pacemaker
|
||||||
from tobiko.tripleo import processes
|
from tobiko.tripleo import processes
|
||||||
from tobiko.tripleo import containers
|
from tobiko.tripleo import containers
|
||||||
from tobiko.tripleo import neutron
|
|
||||||
from tobiko.tripleo import nova
|
from tobiko.tripleo import nova
|
||||||
from tobiko.openstack import stacks
|
from tobiko.openstack import stacks
|
||||||
import tobiko
|
import tobiko
|
||||||
@ -17,14 +16,16 @@ import tobiko
|
|||||||
|
|
||||||
def overcloud_health_checks(passive_checks_only=False):
|
def overcloud_health_checks(passive_checks_only=False):
|
||||||
# this method will be changed in future commit
|
# this method will be changed in future commit
|
||||||
check_pacemaker_resources_health()
|
# check_pacemaker_resources_health()
|
||||||
check_overcloud_processes_health()
|
# check_overcloud_processes_health()
|
||||||
nova.check_nova_services_health()
|
# nova.check_nova_services_health()
|
||||||
neutron.check_neutron_agents_health()
|
# neutron.check_neutron_agents_health()
|
||||||
if not passive_checks_only:
|
if not passive_checks_only:
|
||||||
# create a uniq stack
|
# create a uniq stack
|
||||||
check_vm_create(stack_name='stack{}'.format(random.randint(0, 10000)))
|
check_vm_create(stack_name='stack{}'.format(random.randint(0, 10000)))
|
||||||
containers.assert_all_tripleo_containers_running()
|
nova.start_all_instances()
|
||||||
|
# containers.assert_all_tripleo_containers_running()
|
||||||
|
containers.assert_equal_containers_state()
|
||||||
|
|
||||||
|
|
||||||
# check vm create with ssh and ping checks
|
# check vm create with ssh and ping checks
|
||||||
@ -68,24 +69,10 @@ class RebootNodesTest(testtools.TestCase):
|
|||||||
overcloud_health_checks()
|
overcloud_health_checks()
|
||||||
|
|
||||||
def test_reboot_computes_recovery(self):
|
def test_reboot_computes_recovery(self):
|
||||||
|
|
||||||
overcloud_health_checks()
|
overcloud_health_checks()
|
||||||
|
|
||||||
computes_containers_dict_before = \
|
|
||||||
containers.list_containers(group='compute')
|
|
||||||
|
|
||||||
cloud_disruptions.reset_all_compute_nodes(hard_reset=True)
|
cloud_disruptions.reset_all_compute_nodes(hard_reset=True)
|
||||||
|
|
||||||
overcloud_health_checks(passive_checks_only=True)
|
overcloud_health_checks(passive_checks_only=True)
|
||||||
|
|
||||||
nova.start_all_instances()
|
|
||||||
|
|
||||||
computes_containers_dict_after = \
|
|
||||||
containers.list_containers(group='compute')
|
|
||||||
|
|
||||||
containers.assert_equal_containers_state(
|
|
||||||
computes_containers_dict_before, computes_containers_dict_after)
|
|
||||||
|
|
||||||
|
|
||||||
# [..]
|
# [..]
|
||||||
# more tests to follow
|
# more tests to follow
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
from __future__ import absolute_import
|
from __future__ import absolute_import
|
||||||
|
|
||||||
|
import os
|
||||||
import time
|
import time
|
||||||
|
|
||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
@ -62,7 +63,13 @@ def list_containers(group=None):
|
|||||||
# attribute 'container_runtime'
|
# attribute 'container_runtime'
|
||||||
|
|
||||||
containers_list = tobiko.Selection()
|
containers_list = tobiko.Selection()
|
||||||
openstack_nodes = topology.list_openstack_nodes(group=group)
|
if group:
|
||||||
|
openstack_nodes = topology.list_openstack_nodes(group=group)
|
||||||
|
else:
|
||||||
|
openstack_controllers = topology.list_openstack_nodes(
|
||||||
|
group='controller')
|
||||||
|
openstack_computes = topology.list_openstack_nodes(group='compute')
|
||||||
|
openstack_nodes = openstack_controllers + openstack_computes
|
||||||
|
|
||||||
for node in openstack_nodes:
|
for node in openstack_nodes:
|
||||||
ssh_client = node.ssh_client
|
ssh_client = node.ssh_client
|
||||||
@ -72,6 +79,18 @@ def list_containers(group=None):
|
|||||||
return containers_list
|
return containers_list
|
||||||
|
|
||||||
|
|
||||||
|
expected_containers_file = '/home/stack/expected_containers_list_df.csv'
|
||||||
|
|
||||||
|
|
||||||
|
def save_containers_state_to_file(expected_containers_list,):
|
||||||
|
expected_containers_list_df = pandas.DataFrame(
|
||||||
|
get_container_states_list(expected_containers_list),
|
||||||
|
columns=['container_host', 'container_name', 'container_state'])
|
||||||
|
expected_containers_list_df.to_csv(
|
||||||
|
expected_containers_file)
|
||||||
|
return expected_containers_file
|
||||||
|
|
||||||
|
|
||||||
def assert_containers_running(group, excpected_containers):
|
def assert_containers_running(group, excpected_containers):
|
||||||
|
|
||||||
"""assert that all containers specified in the list are running
|
"""assert that all containers specified in the list are running
|
||||||
@ -193,22 +212,42 @@ def dataframe_difference(df1, df2, which=None):
|
|||||||
return diff_df
|
return diff_df
|
||||||
|
|
||||||
|
|
||||||
def assert_equal_containers_state(expected_containers_list,
|
def assert_equal_containers_state(expected_containers_list=None,
|
||||||
actual_containers_list, timeout=120,
|
timeout=120, interval=2,
|
||||||
interval=2):
|
recreate_expected=False):
|
||||||
|
|
||||||
"""compare container with states from two lists"""
|
"""compare all overcloud container states with using two lists:
|
||||||
|
one is current , the other some past list
|
||||||
|
first time this method runs it creates a file holding overcloud
|
||||||
|
containers' states: /home/stack/expected_containers_list_df.csv'
|
||||||
|
second time it creates a current containers states list and
|
||||||
|
compares them, they must be identical"""
|
||||||
|
|
||||||
|
# if we have a file or an explicit variable use that , otherwise create
|
||||||
|
# and return
|
||||||
|
if recreate_expected or (not expected_containers_list and
|
||||||
|
not os.path.exists(expected_containers_file)):
|
||||||
|
save_containers_state_to_file(list_containers())
|
||||||
|
return
|
||||||
|
|
||||||
|
elif expected_containers_list:
|
||||||
|
expected_containers_list_df = pandas.DataFrame(
|
||||||
|
get_container_states_list(expected_containers_list),
|
||||||
|
columns=['container_host', 'container_name', 'container_state'])
|
||||||
|
|
||||||
|
elif os.path.exists(expected_containers_file):
|
||||||
|
expected_containers_list_df = pandas.read_csv(
|
||||||
|
expected_containers_file)
|
||||||
|
|
||||||
failures = []
|
failures = []
|
||||||
start = time.time()
|
start = time.time()
|
||||||
|
error_info = 'Output explanation: left_only is the original state, ' \
|
||||||
expected_containers_list_df = pandas.DataFrame(
|
'right_only is the new state'
|
||||||
get_container_states_list(expected_containers_list),
|
|
||||||
columns=['container_host', 'container_name', 'container_state'])
|
|
||||||
|
|
||||||
while time.time() - start < timeout:
|
while time.time() - start < timeout:
|
||||||
|
|
||||||
failures = []
|
failures = []
|
||||||
|
actual_containers_list = list_containers()
|
||||||
actual_containers_list_df = pandas.DataFrame(
|
actual_containers_list_df = pandas.DataFrame(
|
||||||
get_container_states_list(actual_containers_list),
|
get_container_states_list(actual_containers_list),
|
||||||
columns=['container_host', 'container_name', 'container_state'])
|
columns=['container_host', 'container_name', 'container_state'])
|
||||||
@ -218,23 +257,22 @@ def assert_equal_containers_state(expected_containers_list,
|
|||||||
LOG.info('actual_containers_list_df: {} '.format(
|
LOG.info('actual_containers_list_df: {} '.format(
|
||||||
actual_containers_list_df.to_string(index=False)))
|
actual_containers_list_df.to_string(index=False)))
|
||||||
|
|
||||||
# execute a dataframe diff between the excpected and actual containers
|
# execute a `dataframe` diff between the expected and actual containers
|
||||||
expected_containers_state_changed = \
|
expected_containers_state_changed = \
|
||||||
dataframe_difference(expected_containers_list_df,
|
dataframe_difference(expected_containers_list_df,
|
||||||
actual_containers_list_df)
|
actual_containers_list_df)
|
||||||
# check for changed state containers
|
# check for changed state containerstopology
|
||||||
if not expected_containers_state_changed.empty:
|
if not expected_containers_state_changed.empty:
|
||||||
failures.append('expected containers changed state ! : '
|
failures.append('expected containers changed state ! : '
|
||||||
'\n\n{}'.format(expected_containers_state_changed.
|
'\n\n{}\n{}'.format(
|
||||||
to_string(index=False)))
|
expected_containers_state_changed.
|
||||||
|
to_string(index=False), error_info))
|
||||||
LOG.info('container states mismatched:\n{}\n'.format(failures))
|
LOG.info('container states mismatched:\n{}\n'.format(failures))
|
||||||
time.sleep(interval)
|
time.sleep(interval)
|
||||||
LOG.info('Retrying , timeout at: {}'
|
|
||||||
.format(timeout-(time.time() - start)))
|
|
||||||
actual_containers_list = list_containers(group='compute')
|
|
||||||
else:
|
else:
|
||||||
LOG.info("assert_equal_containers_state :"
|
LOG.info("assert_equal_containers_state :"
|
||||||
" OK, all containers are on the same state")
|
" OK, all containers are on the same state")
|
||||||
return
|
return
|
||||||
if failures:
|
if failures:
|
||||||
tobiko.fail('container states mismatched:\n{!s}', '\n'.join(failures))
|
tobiko.fail('container states mismatched:\n{!s}', '\n'.join(
|
||||||
|
failures))
|
||||||
|
@ -1,8 +1,12 @@
|
|||||||
from __future__ import absolute_import
|
from __future__ import absolute_import
|
||||||
|
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
import tobiko
|
import tobiko
|
||||||
from tobiko.openstack import neutron
|
from tobiko.openstack import neutron
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def check_neutron_agents_health():
|
def check_neutron_agents_health():
|
||||||
failures = []
|
failures = []
|
||||||
@ -16,3 +20,5 @@ def check_neutron_agents_health():
|
|||||||
if failures:
|
if failures:
|
||||||
tobiko.fail(
|
tobiko.fail(
|
||||||
'neutron agents are unhealthy:\n{!s}', '\n'.join(failures))
|
'neutron agents are unhealthy:\n{!s}', '\n'.join(failures))
|
||||||
|
else:
|
||||||
|
LOG.info('All neutron agents are healthy!')
|
||||||
|
Loading…
Reference in New Issue
Block a user