Replace deprecated methods by it's successors

Replace deprecated methods by it's successors
Drop stdout_len and stderr_len as useless and unused

Change-Id: I558e864240c9d8bed7e9acd4dd1406c33d066800
Closes-bug: #1604688
This commit is contained in:
Alexey Stepanov
2016-07-20 10:13:28 +03:00
parent 10f64b741d
commit fadd06d129
8 changed files with 97 additions and 113 deletions

View File

@@ -16,12 +16,12 @@ from __future__ import division
import hashlib import hashlib
import json import json
from time import sleep
import os import os
import re import re
from time import sleep
from devops.error import TimeoutError from devops.error import TimeoutError
from devops.helpers.helpers import _wait from devops.helpers.helpers import wait_pass
from devops.helpers.helpers import wait from devops.helpers.helpers import wait
from netaddr import IPAddress from netaddr import IPAddress
from netaddr import IPNetwork from netaddr import IPNetwork
@@ -36,6 +36,7 @@ from six.moves.urllib.error import HTTPError
from six.moves.urllib.error import URLError from six.moves.urllib.error import URLError
# pylint: enable=import-error # pylint: enable=import-error
# pylint: disable=redefined-builtin # pylint: disable=redefined-builtin
# noinspection PyUnresolvedReferences
from six.moves import xrange from six.moves import xrange
# pylint: enable=redefined-builtin # pylint: enable=redefined-builtin
import yaml import yaml
@@ -69,7 +70,7 @@ def check_cinder_status(ip):
ip=ip, ip=ip,
cmd=cmd cmd=cmd
) )
cinder_services = ''.join(result['stdout']) cinder_services = result['stdout_str']
logger.debug('>$ cinder service-list\n{}'.format(cinder_services)) logger.debug('>$ cinder service-list\n{}'.format(cinder_services))
if result['exit_code'] == 0: if result['exit_code'] == 0:
return all(' up ' in x.split('enabled')[1] return all(' up ' in x.split('enabled')[1]
@@ -289,12 +290,12 @@ def restore_check_sum(ip):
ip=ip, ip=ip,
cmd="if [ -e /etc/fuel/data ]; then echo Restored!!; fi" cmd="if [ -e /etc/fuel/data ]; then echo Restored!!; fi"
) )
assert_true("Restored!!" in ''.join(res['stdout']).strip(), assert_true("Restored!!" in res['stdout_str'],
'Test file /etc/fuel/data ' 'Test file /etc/fuel/data '
'was not restored!!! {0}'.format(res['stderr'])) 'was not restored!!! {0}'.format(res['stderr']))
logger.info("Restore check md5sum") logger.info("Restore check md5sum")
md5sum_backup = ssh_manager.execute(ip, "cat /etc/fuel/sum") md5sum_backup = ssh_manager.execute(ip, "cat /etc/fuel/sum")
assert_true(''.join(md5sum_backup['stdout']).strip(), assert_true(md5sum_backup['stdout_str'],
'Command cat /etc/fuel/sum ' 'Command cat /etc/fuel/sum '
'failed with {0}'.format(md5sum_backup['stderr'])) 'failed with {0}'.format(md5sum_backup['stderr']))
md5sum_restore = ssh_manager.execute( md5sum_restore = ssh_manager.execute(
@@ -339,7 +340,7 @@ def check_mysql(ip, node_name):
logger.info('MySQL daemon is started on {0}'.format(node_name)) logger.info('MySQL daemon is started on {0}'.format(node_name))
# TODO(astudenov): add timeout_msg # TODO(astudenov): add timeout_msg
_wait( wait_pass(
lambda: assert_equal( lambda: assert_equal(
ssh_manager.execute( ssh_manager.execute(
ip, ip,
@@ -348,13 +349,13 @@ def check_mysql(ip, node_name):
'MySQL resource is NOT running on {0}'.format(node_name)), 'MySQL resource is NOT running on {0}'.format(node_name)),
timeout=120) timeout=120)
try: try:
wait(lambda: ''.join(ssh_manager.execute( wait(lambda: ssh_manager.execute(
ip, check_galera_cmd)['stdout']).rstrip() == 'Synced', timeout=600, ip, check_galera_cmd)['stdout_str'] == 'Synced', timeout=600,
timeout_msg='galera status != "Synced" on node {!r} with ip {}' timeout_msg='galera status != "Synced" on node {!r} with ip {}'
''.format(node_name, ip)) ''.format(node_name, ip))
except TimeoutError: except TimeoutError:
logger.error('galera status is {0}'.format(''.join(ssh_manager.execute( logger.error('galera status is {0}'.format(ssh_manager.execute(
ip, check_galera_cmd)['stdout']).rstrip())) ip, check_galera_cmd)['stdout_str']))
raise raise
@@ -704,21 +705,19 @@ def external_dns_check(ip):
logger.debug("provided to test dns is {}".format(provided_dns)) logger.debug("provided to test dns is {}".format(provided_dns))
cluster_dns = [] cluster_dns = []
for dns in provided_dns: for dns in provided_dns:
ext_dns_ip = ''.join( ext_dns_ip = ssh_manager.execute(
ssh_manager.execute( ip=ip,
ip=ip, cmd="grep {0} /etc/resolv.dnsmasq.conf | "
cmd="grep {0} /etc/resolv.dnsmasq.conf | " "awk {{'print $2'}}".format(dns)
"awk {{'print $2'}}".format(dns) )["stdout_str"]
)["stdout"]).rstrip()
cluster_dns.append(ext_dns_ip) cluster_dns.append(ext_dns_ip)
logger.debug("external dns in conf is {}".format(cluster_dns)) logger.debug("external dns in conf is {}".format(cluster_dns))
assert_equal(set(provided_dns), set(cluster_dns), assert_equal(set(provided_dns), set(cluster_dns),
"/etc/resolv.dnsmasq.conf does not contain external dns ip") "/etc/resolv.dnsmasq.conf does not contain external dns ip")
command_hostname = ''.join( command_hostname = ssh_manager.execute(
ssh_manager.execute(ip, ip,
"host {0} | awk {{'print $5'}}" "host {0} | awk {{'print $5'}}".format(PUBLIC_TEST_IP)
.format(PUBLIC_TEST_IP)) )["stdout_str"]
["stdout"]).rstrip()
hostname = 'google-public-dns-a.google.com.' hostname = 'google-public-dns-a.google.com.'
assert_equal(command_hostname, hostname, assert_equal(command_hostname, hostname,
"Can't resolve hostname") "Can't resolve hostname")
@@ -756,11 +755,10 @@ def external_ntp_check(ip, vrouter_vip):
logger.debug("provided to test ntp is {}".format(provided_ntp)) logger.debug("provided to test ntp is {}".format(provided_ntp))
cluster_ntp = [] cluster_ntp = []
for ntp in provided_ntp: for ntp in provided_ntp:
ext_ntp_ip = ''.join( ext_ntp_ip = ssh_manager.execute(
ssh_manager.execute( ip=ip,
ip=ip, cmd="awk '/^server +{0}/{{print $2}}' "
cmd="awk '/^server +{0}/{{print $2}}' " "/etc/ntp.conf".format(ntp))["stdout_str"]
"/etc/ntp.conf".format(ntp))["stdout"]).rstrip()
cluster_ntp.append(ext_ntp_ip) cluster_ntp.append(ext_ntp_ip)
logger.debug("external ntp in conf is {}".format(cluster_ntp)) logger.debug("external ntp in conf is {}".format(cluster_ntp))
assert_equal(set(provided_ntp), set(cluster_ntp), assert_equal(set(provided_ntp), set(cluster_ntp),
@@ -778,9 +776,9 @@ def external_ntp_check(ip, vrouter_vip):
def check_swift_ring(ip): def check_swift_ring(ip):
for ring in ['object', 'account', 'container']: for ring in ['object', 'account', 'container']:
res = ''.join(ssh_manager.execute( res = ssh_manager.execute(
ip, "swift-ring-builder /etc/swift/{0}.builder".format( ip, "swift-ring-builder /etc/swift/{0}.builder".format(
ring))['stdout']) ring))['stdout_str']
logger.debug("swift ring builder information is {0}".format(res)) logger.debug("swift ring builder information is {0}".format(res))
balance = re.search('(\d+.\d+) balance', res).group(1) balance = re.search('(\d+.\d+) balance', res).group(1)
assert_true(float(balance) < 10, assert_true(float(balance) < 10,
@@ -1116,8 +1114,8 @@ def check_hiera_hosts(nodes, cmd):
result = ssh_manager.execute_on_remote( result = ssh_manager.execute_on_remote(
ip=node['ip'], ip=node['ip'],
cmd=cmd cmd=cmd
)['stdout'] )['stdout_str']
hosts = ''.join(result).strip().split(',') hosts = result.split(',')
logger.debug("hosts on {0} are {1}".format(node['hostname'], hosts)) logger.debug("hosts on {0} are {1}".format(node['hostname'], hosts))
if not hiera_hosts: if not hiera_hosts:

View File

@@ -204,9 +204,7 @@ class SSHManager(object):
result = self.execute(ip=ip, port=port, cmd=cmd) result = self.execute(ip=ip, port=port, cmd=cmd)
result['stdout_str'] = ''.join(result['stdout']).strip() result['stdout_str'] = ''.join(result['stdout']).strip()
result['stdout_len'] = len(result['stdout'])
result['stderr_str'] = ''.join(result['stderr']).strip() result['stderr_str'] = ''.join(result['stderr']).strip()
result['stderr_len'] = len(result['stderr'])
details_log = ( details_log = (
"Host: {host}\n" "Host: {host}\n"

View File

@@ -484,9 +484,7 @@ def run_on_remote_get_results(remote, cmd, clear=False, err_msg=None,
result = remote.execute(cmd) result = remote.execute(cmd)
result['stdout_str'] = ''.join(result['stdout']).strip() result['stdout_str'] = ''.join(result['stdout']).strip()
result['stdout_len'] = len(result['stdout'])
result['stderr_str'] = ''.join(result['stderr']).strip() result['stderr_str'] = ''.join(result['stderr']).strip()
result['stderr_len'] = len(result['stderr'])
details_log = ( details_log = (
"Host: {host}\n" "Host: {host}\n"

View File

@@ -545,9 +545,9 @@ class EnvironmentModel(object):
out = self.ssh_manager.execute( out = self.ssh_manager.execute(
ip=self.ssh_manager.admin_ip, ip=self.ssh_manager.admin_ip,
cmd=command cmd=command
)['stdout'] )['stdout_str']
assert_true(self.get_admin_node_ip() in "".join(out), assert_true(self.get_admin_node_ip() in out,
"dhcpcheck doesn't discover master ip") "dhcpcheck doesn't discover master ip")
def bootstrap_image_check(self): def bootstrap_image_check(self):
@@ -612,19 +612,17 @@ class EnvironmentModel(object):
logger.info('Searching for updates..') logger.info('Searching for updates..')
update_command = 'yum clean expire-cache; yum update -y' update_command = 'yum clean expire-cache; yum update -y'
update_result = self.ssh_manager.execute( update_result = self.ssh_manager.execute_on_remote(
ip=self.ssh_manager.admin_ip, ip=self.ssh_manager.admin_ip,
cmd=update_command cmd=update_command,
err_msg='Packages update failed, inspect logs for details'
) )
logger.info('Result of "{1}" command on master node: ' logger.info('Result of "{1}" command on master node: '
'{0}'.format(update_result, update_command)) '{0}'.format(update_result, update_command))
assert_equal(int(update_result['exit_code']), 0,
'Packages update failed, '
'inspect logs for details')
# Check if any packets were updated and update was successful # Check if any packets were updated and update was successful
yum_output = ''.join(update_result['stdout']) yum_output = update_result['stdout_str']
match_updated_count = re.search(r'Upgrade\s+(\d+)\s+Package', match_updated_count = re.search(r'Upgrade\s+(\d+)\s+Package',
yum_output) yum_output)
# In case of package replacement, the new one is marked as # In case of package replacement, the new one is marked as
@@ -653,15 +651,13 @@ class EnvironmentModel(object):
cmd = 'bootstrap_admin_node.sh;' cmd = 'bootstrap_admin_node.sh;'
result = self.ssh_manager.execute( result = self.ssh_manager.execute_on_remote(
ip=self.ssh_manager.admin_ip, ip=self.ssh_manager.admin_ip,
cmd=cmd cmd=cmd,
err_msg='bootstrap failed, inspect logs for details',
) )
logger.info('Result of "{1}" command on master node: ' logger.info('Result of "{1}" command on master node: '
'{0}'.format(result, cmd)) '{0}'.format(result, cmd))
assert_equal(int(result['exit_code']), 0,
'bootstrap failed, '
'inspect logs for details')
# Modifies a resolv.conf on the Fuel master node and returns # Modifies a resolv.conf on the Fuel master node and returns
# its original content. # its original content.

View File

@@ -31,7 +31,7 @@ except ImportError:
# pylint: disable=no-member # pylint: disable=no-member
DevopsObjNotFound = Node.DoesNotExist DevopsObjNotFound = Node.DoesNotExist
# pylint: enable=no-member # pylint: enable=no-member
from devops.helpers.helpers import _wait from devops.helpers.helpers import wait_pass
from devops.helpers.helpers import wait from devops.helpers.helpers import wait
import netaddr import netaddr
from proboscis.asserts import assert_equal from proboscis.asserts import assert_equal
@@ -164,7 +164,7 @@ class FuelWebClient29(object):
networks_count=2, timeout=300): networks_count=2, timeout=300):
logger.info('Assert cluster services are UP') logger.info('Assert cluster services are UP')
# TODO(astudenov): add timeout_msg # TODO(astudenov): add timeout_msg
_wait( wait_pass(
lambda: self.get_cluster_status( lambda: self.get_cluster_status(
os_conn, os_conn,
smiles_count=smiles_count, smiles_count=smiles_count,
@@ -181,10 +181,10 @@ class FuelWebClient29(object):
.format(timeout)) .format(timeout))
with QuietLogger(logging.ERROR): with QuietLogger(logging.ERROR):
# TODO(astudenov): add timeout_msg # TODO(astudenov): add timeout_msg
_wait(lambda: self.run_ostf(cluster_id, wait_pass(lambda: self.run_ostf(cluster_id,
test_sets=['ha'], test_sets=['ha'],
should_fail=should_fail), should_fail=should_fail),
interval=20, timeout=timeout) interval=20, timeout=timeout)
logger.info('OSTF HA tests passed successfully.') logger.info('OSTF HA tests passed successfully.')
else: else:
logger.debug('Cluster {0} is not in HA mode, OSTF HA tests ' logger.debug('Cluster {0} is not in HA mode, OSTF HA tests '
@@ -199,10 +199,10 @@ class FuelWebClient29(object):
.format(timeout)) .format(timeout))
with QuietLogger(): with QuietLogger():
# TODO(astudenov): add timeout_msg # TODO(astudenov): add timeout_msg
_wait(lambda: self.run_ostf(cluster_id, wait_pass(lambda: self.run_ostf(cluster_id,
test_sets=['sanity'], test_sets=['sanity'],
should_fail=should_fail), should_fail=should_fail),
interval=10, timeout=timeout) interval=10, timeout=timeout)
logger.info('OSTF Sanity checks passed successfully.') logger.info('OSTF Sanity checks passed successfully.')
@logwrap @logwrap

View File

@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
from devops.helpers.helpers import _wait from devops.helpers.helpers import wait_pass
from proboscis import test from proboscis import test
from proboscis.asserts import assert_equal from proboscis.asserts import assert_equal
from proboscis.asserts import assert_false from proboscis.asserts import assert_false
@@ -58,8 +58,8 @@ class TestsConfigDBAPI(TestBasic):
install_configdb(master_node_ip=self.ssh_manager.admin_ip) install_configdb(master_node_ip=self.ssh_manager.admin_ip)
logger.debug('Waiting for ConfigDB') logger.debug('Waiting for ConfigDB')
_wait(lambda: self.fuel_web.client.get_components(), wait_pass(lambda: self.fuel_web.client.get_components(),
timeout=45) timeout=45)
logger.debug('Get env and component data') logger.debug('Get env and component data')
components = self.fuel_web.client.get_components() components = self.fuel_web.client.get_components()

View File

@@ -13,7 +13,7 @@
# under the License. # under the License.
import time import time
from devops.helpers.helpers import _wait from devops.helpers.helpers import wait_pass
from devops.helpers.helpers import wait from devops.helpers.helpers import wait
from proboscis import asserts from proboscis import asserts
from proboscis import test from proboscis import test
@@ -137,21 +137,20 @@ class CICMaintenanceMode(TestBasic):
[dregular_ctrl.name]) [dregular_ctrl.name])
# Wait until RabbitMQ cluster is UP # Wait until RabbitMQ cluster is UP
_wait(lambda: wait_pass(lambda:
self.fuel_web.run_single_ostf_test( self.fuel_web.run_single_ostf_test(
cluster_id, test_sets=['ha'], cluster_id, test_sets=['ha'],
test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get( test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get(
'RabbitMQ availability')), 'RabbitMQ availability')),
timeout=1500) timeout=1500)
logger.info('RabbitMQ cluster is available') logger.info('RabbitMQ cluster is available')
# TODO(astudenov): add timeout_msg wait_pass(lambda:
_wait(lambda: self.fuel_web.run_single_ostf_test(
self.fuel_web.run_single_ostf_test( cluster_id, test_sets=['sanity'],
cluster_id, test_sets=['sanity'], test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get(
test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get( 'Check that required services are running')),
'Check that required services are running')), timeout=1500)
timeout=1500)
logger.info("Required services are running") logger.info("Required services are running")
# TODO(astudenov): add timeout_msg # TODO(astudenov): add timeout_msg
@@ -250,21 +249,21 @@ class CICMaintenanceMode(TestBasic):
[dregular_ctrl.name]) [dregular_ctrl.name])
# Wait until RabbitMQ cluster is UP # Wait until RabbitMQ cluster is UP
_wait(lambda: wait_pass(lambda:
self.fuel_web.run_single_ostf_test( self.fuel_web.run_single_ostf_test(
cluster_id, test_sets=['ha'], cluster_id, test_sets=['ha'],
test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get( test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get(
'RabbitMQ availability')), 'RabbitMQ availability')),
timeout=1500) timeout=1500)
logger.info('RabbitMQ cluster is available') logger.info('RabbitMQ cluster is available')
# Wait until all Openstack services are UP # Wait until all Openstack services are UP
_wait(lambda: wait_pass(lambda:
self.fuel_web.run_single_ostf_test( self.fuel_web.run_single_ostf_test(
cluster_id, test_sets=['sanity'], cluster_id, test_sets=['sanity'],
test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get( test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get(
'Check that required services are running')), 'Check that required services are running')),
timeout=1500) timeout=1500)
logger.info("Required services are running") logger.info("Required services are running")
try: try:
@@ -423,21 +422,21 @@ class CICMaintenanceMode(TestBasic):
[dregular_ctrl.name]) [dregular_ctrl.name])
# Wait until RabbitMQ cluster is UP # Wait until RabbitMQ cluster is UP
_wait(lambda: wait_pass(lambda:
self.fuel_web.run_single_ostf_test( self.fuel_web.run_single_ostf_test(
cluster_id, test_sets=['ha'], cluster_id, test_sets=['ha'],
test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get( test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get(
'RabbitMQ availability')), 'RabbitMQ availability')),
timeout=1500) timeout=1500)
logger.info('RabbitMQ cluster is available') logger.info('RabbitMQ cluster is available')
# TODO(astudenov): add timeout_msg # TODO(astudenov): add timeout_msg
_wait(lambda: wait_pass(lambda:
self.fuel_web.run_single_ostf_test( self.fuel_web.run_single_ostf_test(
cluster_id, test_sets=['sanity'], cluster_id, test_sets=['sanity'],
test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get( test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get(
'Check that required services are running')), 'Check that required services are running')),
timeout=1500) timeout=1500)
logger.info("Required services are running") logger.info("Required services are running")
try: try:

View File

@@ -16,7 +16,7 @@ import re
import time import time
from devops.error import TimeoutError from devops.error import TimeoutError
from devops.helpers.helpers import _wait from devops.helpers.helpers import wait_pass
from devops.helpers.helpers import tcp_ping from devops.helpers.helpers import tcp_ping
from devops.helpers.helpers import wait from devops.helpers.helpers import wait
from proboscis.asserts import assert_equal from proboscis.asserts import assert_equal
@@ -518,7 +518,7 @@ class TestHaFailoverBase(TestBasic):
remote.execute("iptables -D OUTPUT 1 -m owner --uid-owner heat -m" remote.execute("iptables -D OUTPUT 1 -m owner --uid-owner heat -m"
" state --state NEW,ESTABLISHED,RELATED") " state --state NEW,ESTABLISHED,RELATED")
# TODO(astudenov): add timeout_msg # TODO(astudenov): add timeout_msg
_wait(lambda: assert_true(ocf_success in ''.join( wait_pass(lambda: assert_true(ocf_success in ''.join(
remote.execute(ocf_status)['stdout']).rstrip()), timeout=240) remote.execute(ocf_status)['stdout']).rstrip()), timeout=240)
newpid = ''.join(remote.execute('pgrep {0}' newpid = ''.join(remote.execute('pgrep {0}'
.format(heat_name))['stdout']) .format(heat_name))['stdout'])
@@ -1115,8 +1115,8 @@ class TestHaFailoverBase(TestBasic):
@logwrap @logwrap
def _get_pcm_nodes(remote, pure=False): def _get_pcm_nodes(remote, pure=False):
nodes = {} nodes = {}
pcs_status = remote.execute('pcs status nodes')['stdout'] pcs_status = remote.execute('pcs status nodes')['stdout_str']
pcm_nodes = yaml.load(''.join(pcs_status).strip()) pcm_nodes = yaml.load(pcs_status)
for status in ('Online', 'Offline', 'Standby'): for status in ('Online', 'Offline', 'Standby'):
list_nodes = (pcm_nodes['Pacemaker Nodes'] list_nodes = (pcm_nodes['Pacemaker Nodes']
[status] or '').split() [status] or '').split()
@@ -1132,8 +1132,7 @@ class TestHaFailoverBase(TestBasic):
for remote in ctrl_remotes: for remote in ctrl_remotes:
pcs_nodes = _get_pcm_nodes(remote) pcs_nodes = _get_pcm_nodes(remote)
# TODO: FIXME: Rewrite using normal SSHManager and node name # TODO: FIXME: Rewrite using normal SSHManager and node name
node_name = ''.join( node_name = remote.execute('hostname -f')['stdout_str']
remote.execute('hostname -f')['stdout']).strip()
logger.debug( logger.debug(
"Status of pacemaker nodes on node {0}: {1}". "Status of pacemaker nodes on node {0}: {1}".
format(node_name, pcs_nodes)) format(node_name, pcs_nodes))
@@ -1167,31 +1166,27 @@ class TestHaFailoverBase(TestBasic):
for count in xrange(500): for count in xrange(500):
logger.debug('Checking splitbrain in the loop, ' logger.debug('Checking splitbrain in the loop, '
'count number: {0}'.format(count)) 'count number: {0}'.format(count))
# TODO(astudenov): add timeout_msg wait_pass(
_wait(
lambda: assert_equal( lambda: assert_equal(
remote_controller.execute( remote_controller.execute(
'killall -TERM corosync')['exit_code'], 0, 'killall -TERM corosync')['exit_code'], 0,
'Corosync was not killed on controller, ' 'Corosync was not killed on controller, '
'see debug log, count-{0}'.format(count)), timeout=20) 'see debug log, count-{0}'.format(count)), timeout=20)
# TODO(astudenov): add timeout_msg wait_pass(
_wait(
lambda: assert_true( lambda: assert_true(
_check_all_pcs_nodes_status( _check_all_pcs_nodes_status(
live_remotes, [controller_node['fqdn']], live_remotes, [controller_node['fqdn']],
'Offline'), 'Offline'),
'Caught splitbrain, see debug log, ' 'Caught splitbrain, see debug log, '
'count-{0}'.format(count)), timeout=20) 'count-{0}'.format(count)), timeout=20)
# TODO(astudenov): add timeout_msg wait_pass(
_wait(
lambda: assert_equal( lambda: assert_equal(
remote_controller.execute( remote_controller.execute(
'service corosync start && service pacemaker ' 'service corosync start && service pacemaker '
'restart')['exit_code'], 0, 'restart')['exit_code'], 0,
'Corosync was not started, see debug log,' 'Corosync was not started, see debug log,'
' count-{0}'.format(count)), timeout=20) ' count-{0}'.format(count)), timeout=20)
# TODO(astudenov): add timeout_msg wait_pass(
_wait(
lambda: assert_true( lambda: assert_true(
_check_all_pcs_nodes_status( _check_all_pcs_nodes_status(
ctrl_remotes, pcs_nodes_online, 'Online'), ctrl_remotes, pcs_nodes_online, 'Online'),