add compute reboot faults tests with added boot vm checks
related bz : https://bugzilla.redhat.com/show_bug.cgi?id=1797892 Change-Id: Id60ea234827d9568e64ae52313df632a0a42aea4
This commit is contained in:
parent
6bebf49459
commit
55c42252a2
@ -10,14 +10,21 @@ from oslo_log import log
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
def reset_all_controller_nodes_sequentially():
|
||||
def reset_all_controller_nodes(hard_reset=False):
|
||||
|
||||
# reboot all controllers and wait for ssh Up on them
|
||||
# hard reset is simultaneous while soft is sequential
|
||||
if hard_reset:
|
||||
reset_method = 'sudo chmod o+w /proc/sysrq-trigger;' \
|
||||
'sudo echo b > /proc/sysrq-trigger'
|
||||
else:
|
||||
reset_method = 'sudo reboot'
|
||||
nodes = topology.list_openstack_nodes(group='controller')
|
||||
for controller in nodes:
|
||||
sh.execute("sudo reboot", ssh_client=controller.ssh_client,
|
||||
expect_exit_status=None)
|
||||
LOG.info('rebooted {}'.format(controller.name))
|
||||
# using ssh_client.connect we use a fire and forget reboot method
|
||||
controller.ssh_client.connect().exec_command(reset_method)
|
||||
LOG.info('reboot exec: {} on server: {}'.format(reset_method,
|
||||
controller.name))
|
||||
tobiko.cleanup_fixture(controller.ssh_client)
|
||||
|
||||
for controller in topology.list_openstack_nodes(group='controller'):
|
||||
@ -25,3 +32,25 @@ def reset_all_controller_nodes_sequentially():
|
||||
ssh_client=controller.ssh_client,
|
||||
expect_exit_status=None).stdout
|
||||
LOG.info('{} is up '.format(controller_checked))
|
||||
|
||||
|
||||
def reset_all_compute_nodes(hard_reset=False):
|
||||
|
||||
# reboot all computes and wait for ssh Up on them
|
||||
# hard reset is simultaneous while soft is sequential
|
||||
if hard_reset:
|
||||
reset_method = 'sudo chmod o+w /proc/sysrq-trigger;' \
|
||||
'sudo echo b > /proc/sysrq-trigger'
|
||||
else:
|
||||
reset_method = 'sudo reboot'
|
||||
for compute in topology.list_openstack_nodes(group='compute'):
|
||||
# using ssh_client.connect we use a fire and forget reboot method
|
||||
compute.ssh_client.connect().exec_command(reset_method)
|
||||
LOG.info('reboot exec: {} on server: {}'.format(reset_method,
|
||||
compute.name))
|
||||
tobiko.cleanup_fixture(compute.ssh_client)
|
||||
|
||||
for compute in topology.list_openstack_nodes(group='compute'):
|
||||
compute_checked = sh.execute("hostname", ssh_client=compute.ssh_client,
|
||||
expect_exit_status=None).stdout
|
||||
LOG.info('{} is up '.format(compute_checked))
|
||||
|
@ -1,21 +1,42 @@
|
||||
from __future__ import absolute_import
|
||||
|
||||
import testtools
|
||||
|
||||
from tobiko.shell import ping
|
||||
from tobiko.shell import sh
|
||||
from tobiko.tests.faults.ha import cloud_disruptions
|
||||
from tobiko.tripleo import pacemaker
|
||||
from tobiko.tripleo import processes
|
||||
from tobiko.openstack import stacks
|
||||
import tobiko
|
||||
|
||||
|
||||
def nodes_health_check():
|
||||
# this method will be changed in future commit
|
||||
check_pacemaker_resources_health()
|
||||
check_overcloud_processes_health()
|
||||
|
||||
# TODO:
|
||||
# Test existing created servers
|
||||
# ServerStackResourcesTest().test_server_create()
|
||||
|
||||
|
||||
# check vm create with ssh and ping checks
|
||||
def check_vm_create(stack_name):
|
||||
'''stack_name: unique stack name ,
|
||||
so that each time a new vm is created'''
|
||||
# create a vm
|
||||
stack = stacks.CirrosServerStackFixture(
|
||||
stack_name=stack_name)
|
||||
tobiko.reset_fixture(stack)
|
||||
stack.wait_for_create_complete()
|
||||
# Test SSH connectivity to floating IP address
|
||||
sh.get_hostname(ssh_client=stack.ssh_client)
|
||||
|
||||
# Test ICMP connectivity to floating IP address
|
||||
ping.ping_until_received(
|
||||
stack.floating_ip_address).assert_replied()
|
||||
|
||||
|
||||
# check cluster failed statuses
|
||||
def check_pacemaker_resources_health():
|
||||
return pacemaker.PacemakerResourcesStatus().all_healthy
|
||||
@ -25,28 +46,6 @@ def check_overcloud_processes_health():
|
||||
return processes.OvercloudProcessesStatus(
|
||||
).basic_overcloud_processes_running
|
||||
|
||||
# TODO:
|
||||
# class ServerStackResourcesTest(testtools.TestCase):
|
||||
#
|
||||
# """Tests connectivity via floating IPs"""
|
||||
#
|
||||
# #: Resources stack with floating IP and Nova server
|
||||
# # TODO move down :
|
||||
# # stack = tobiko.required_setup_fixture(stacks.CirrosServerStackFixture)
|
||||
# # stack = tobiko.setup(my_instace) #tobiko.setup(my_instace)
|
||||
#
|
||||
# # TODO new instances of the class , give a uniq stack name
|
||||
# # TODO : create a new CirrosServerStackFixture ?
|
||||
# # CirrosServerStackNameFixture(stack_name='my-unique-id')
|
||||
# # tobiko.setup(my_instace) -> tobiko.cleanup(my_instance)
|
||||
# def test_create_vm(self):
|
||||
# """Test SSH connectivity to floating IP address"""
|
||||
# stack = tobiko.setup(my_instace) # tobiko.setup(my_instace)
|
||||
# tobiko.cleanup(my_instance)
|
||||
# # TODO : add check if old vm is there
|
||||
# hostname = sh.get_hostname(ssh_client=self.stack.ssh_client)
|
||||
# self.assertEqual(self.stack.server_name.lower(), hostname)
|
||||
|
||||
|
||||
class RebootNodesTest(testtools.TestCase):
|
||||
|
||||
@ -56,9 +55,15 @@ class RebootNodesTest(testtools.TestCase):
|
||||
|
||||
def test_reboot_controllers_recovery(self):
|
||||
nodes_health_check()
|
||||
cloud_disruptions.reset_all_controller_nodes_sequentially()
|
||||
cloud_disruptions.reset_all_controller_nodes()
|
||||
nodes_health_check()
|
||||
check_vm_create(stack_name=self.id())
|
||||
|
||||
def test_reboot_computes_recovery(self):
|
||||
nodes_health_check()
|
||||
cloud_disruptions.reset_all_compute_nodes(hard_reset=True)
|
||||
nodes_health_check()
|
||||
check_vm_create(stack_name=self.id())
|
||||
|
||||
# [..]
|
||||
# more tests to folow
|
||||
|
Loading…
Reference in New Issue
Block a user