Replace deprecated methods by it's successors

Replace deprecated methods by it's successors
Drop stdout_len and stderr_len as useless and unused

Change-Id: I558e864240c9d8bed7e9acd4dd1406c33d066800
Closes-bug: #1604688
This commit is contained in:
Alexey Stepanov 2016-07-20 10:13:28 +03:00
parent 10f64b741d
commit fadd06d129
8 changed files with 97 additions and 113 deletions

View File

@ -16,12 +16,12 @@ from __future__ import division
import hashlib
import json
from time import sleep
import os
import re
from time import sleep
from devops.error import TimeoutError
from devops.helpers.helpers import _wait
from devops.helpers.helpers import wait_pass
from devops.helpers.helpers import wait
from netaddr import IPAddress
from netaddr import IPNetwork
@ -36,6 +36,7 @@ from six.moves.urllib.error import HTTPError
from six.moves.urllib.error import URLError
# pylint: enable=import-error
# pylint: disable=redefined-builtin
# noinspection PyUnresolvedReferences
from six.moves import xrange
# pylint: enable=redefined-builtin
import yaml
@ -69,7 +70,7 @@ def check_cinder_status(ip):
ip=ip,
cmd=cmd
)
cinder_services = ''.join(result['stdout'])
cinder_services = result['stdout_str']
logger.debug('>$ cinder service-list\n{}'.format(cinder_services))
if result['exit_code'] == 0:
return all(' up ' in x.split('enabled')[1]
@ -289,12 +290,12 @@ def restore_check_sum(ip):
ip=ip,
cmd="if [ -e /etc/fuel/data ]; then echo Restored!!; fi"
)
assert_true("Restored!!" in ''.join(res['stdout']).strip(),
assert_true("Restored!!" in res['stdout_str'],
'Test file /etc/fuel/data '
'was not restored!!! {0}'.format(res['stderr']))
logger.info("Restore check md5sum")
md5sum_backup = ssh_manager.execute(ip, "cat /etc/fuel/sum")
assert_true(''.join(md5sum_backup['stdout']).strip(),
assert_true(md5sum_backup['stdout_str'],
'Command cat /etc/fuel/sum '
'failed with {0}'.format(md5sum_backup['stderr']))
md5sum_restore = ssh_manager.execute(
@ -339,7 +340,7 @@ def check_mysql(ip, node_name):
logger.info('MySQL daemon is started on {0}'.format(node_name))
# TODO(astudenov): add timeout_msg
_wait(
wait_pass(
lambda: assert_equal(
ssh_manager.execute(
ip,
@ -348,13 +349,13 @@ def check_mysql(ip, node_name):
'MySQL resource is NOT running on {0}'.format(node_name)),
timeout=120)
try:
wait(lambda: ''.join(ssh_manager.execute(
ip, check_galera_cmd)['stdout']).rstrip() == 'Synced', timeout=600,
wait(lambda: ssh_manager.execute(
ip, check_galera_cmd)['stdout_str'] == 'Synced', timeout=600,
timeout_msg='galera status != "Synced" on node {!r} with ip {}'
''.format(node_name, ip))
except TimeoutError:
logger.error('galera status is {0}'.format(''.join(ssh_manager.execute(
ip, check_galera_cmd)['stdout']).rstrip()))
logger.error('galera status is {0}'.format(ssh_manager.execute(
ip, check_galera_cmd)['stdout_str']))
raise
@ -704,21 +705,19 @@ def external_dns_check(ip):
logger.debug("provided to test dns is {}".format(provided_dns))
cluster_dns = []
for dns in provided_dns:
ext_dns_ip = ''.join(
ssh_manager.execute(
ip=ip,
cmd="grep {0} /etc/resolv.dnsmasq.conf | "
"awk {{'print $2'}}".format(dns)
)["stdout"]).rstrip()
ext_dns_ip = ssh_manager.execute(
ip=ip,
cmd="grep {0} /etc/resolv.dnsmasq.conf | "
"awk {{'print $2'}}".format(dns)
)["stdout_str"]
cluster_dns.append(ext_dns_ip)
logger.debug("external dns in conf is {}".format(cluster_dns))
assert_equal(set(provided_dns), set(cluster_dns),
"/etc/resolv.dnsmasq.conf does not contain external dns ip")
command_hostname = ''.join(
ssh_manager.execute(ip,
"host {0} | awk {{'print $5'}}"
.format(PUBLIC_TEST_IP))
["stdout"]).rstrip()
command_hostname = ssh_manager.execute(
ip,
"host {0} | awk {{'print $5'}}".format(PUBLIC_TEST_IP)
)["stdout_str"]
hostname = 'google-public-dns-a.google.com.'
assert_equal(command_hostname, hostname,
"Can't resolve hostname")
@ -756,11 +755,10 @@ def external_ntp_check(ip, vrouter_vip):
logger.debug("provided to test ntp is {}".format(provided_ntp))
cluster_ntp = []
for ntp in provided_ntp:
ext_ntp_ip = ''.join(
ssh_manager.execute(
ip=ip,
cmd="awk '/^server +{0}/{{print $2}}' "
"/etc/ntp.conf".format(ntp))["stdout"]).rstrip()
ext_ntp_ip = ssh_manager.execute(
ip=ip,
cmd="awk '/^server +{0}/{{print $2}}' "
"/etc/ntp.conf".format(ntp))["stdout_str"]
cluster_ntp.append(ext_ntp_ip)
logger.debug("external ntp in conf is {}".format(cluster_ntp))
assert_equal(set(provided_ntp), set(cluster_ntp),
@ -778,9 +776,9 @@ def external_ntp_check(ip, vrouter_vip):
def check_swift_ring(ip):
for ring in ['object', 'account', 'container']:
res = ''.join(ssh_manager.execute(
res = ssh_manager.execute(
ip, "swift-ring-builder /etc/swift/{0}.builder".format(
ring))['stdout'])
ring))['stdout_str']
logger.debug("swift ring builder information is {0}".format(res))
balance = re.search('(\d+.\d+) balance', res).group(1)
assert_true(float(balance) < 10,
@ -1116,8 +1114,8 @@ def check_hiera_hosts(nodes, cmd):
result = ssh_manager.execute_on_remote(
ip=node['ip'],
cmd=cmd
)['stdout']
hosts = ''.join(result).strip().split(',')
)['stdout_str']
hosts = result.split(',')
logger.debug("hosts on {0} are {1}".format(node['hostname'], hosts))
if not hiera_hosts:

View File

@ -204,9 +204,7 @@ class SSHManager(object):
result = self.execute(ip=ip, port=port, cmd=cmd)
result['stdout_str'] = ''.join(result['stdout']).strip()
result['stdout_len'] = len(result['stdout'])
result['stderr_str'] = ''.join(result['stderr']).strip()
result['stderr_len'] = len(result['stderr'])
details_log = (
"Host: {host}\n"

View File

@ -484,9 +484,7 @@ def run_on_remote_get_results(remote, cmd, clear=False, err_msg=None,
result = remote.execute(cmd)
result['stdout_str'] = ''.join(result['stdout']).strip()
result['stdout_len'] = len(result['stdout'])
result['stderr_str'] = ''.join(result['stderr']).strip()
result['stderr_len'] = len(result['stderr'])
details_log = (
"Host: {host}\n"

View File

@ -545,9 +545,9 @@ class EnvironmentModel(object):
out = self.ssh_manager.execute(
ip=self.ssh_manager.admin_ip,
cmd=command
)['stdout']
)['stdout_str']
assert_true(self.get_admin_node_ip() in "".join(out),
assert_true(self.get_admin_node_ip() in out,
"dhcpcheck doesn't discover master ip")
def bootstrap_image_check(self):
@ -612,19 +612,17 @@ class EnvironmentModel(object):
logger.info('Searching for updates..')
update_command = 'yum clean expire-cache; yum update -y'
update_result = self.ssh_manager.execute(
update_result = self.ssh_manager.execute_on_remote(
ip=self.ssh_manager.admin_ip,
cmd=update_command
cmd=update_command,
err_msg='Packages update failed, inspect logs for details'
)
logger.info('Result of "{1}" command on master node: '
'{0}'.format(update_result, update_command))
assert_equal(int(update_result['exit_code']), 0,
'Packages update failed, '
'inspect logs for details')
# Check if any packets were updated and update was successful
yum_output = ''.join(update_result['stdout'])
yum_output = update_result['stdout_str']
match_updated_count = re.search(r'Upgrade\s+(\d+)\s+Package',
yum_output)
# In case of package replacement, the new one is marked as
@ -653,15 +651,13 @@ class EnvironmentModel(object):
cmd = 'bootstrap_admin_node.sh;'
result = self.ssh_manager.execute(
result = self.ssh_manager.execute_on_remote(
ip=self.ssh_manager.admin_ip,
cmd=cmd
cmd=cmd,
err_msg='bootstrap failed, inspect logs for details',
)
logger.info('Result of "{1}" command on master node: '
'{0}'.format(result, cmd))
assert_equal(int(result['exit_code']), 0,
'bootstrap failed, '
'inspect logs for details')
# Modifies a resolv.conf on the Fuel master node and returns
# its original content.

View File

@ -31,7 +31,7 @@ except ImportError:
# pylint: disable=no-member
DevopsObjNotFound = Node.DoesNotExist
# pylint: enable=no-member
from devops.helpers.helpers import _wait
from devops.helpers.helpers import wait_pass
from devops.helpers.helpers import wait
import netaddr
from proboscis.asserts import assert_equal
@ -164,7 +164,7 @@ class FuelWebClient29(object):
networks_count=2, timeout=300):
logger.info('Assert cluster services are UP')
# TODO(astudenov): add timeout_msg
_wait(
wait_pass(
lambda: self.get_cluster_status(
os_conn,
smiles_count=smiles_count,
@ -181,10 +181,10 @@ class FuelWebClient29(object):
.format(timeout))
with QuietLogger(logging.ERROR):
# TODO(astudenov): add timeout_msg
_wait(lambda: self.run_ostf(cluster_id,
test_sets=['ha'],
should_fail=should_fail),
interval=20, timeout=timeout)
wait_pass(lambda: self.run_ostf(cluster_id,
test_sets=['ha'],
should_fail=should_fail),
interval=20, timeout=timeout)
logger.info('OSTF HA tests passed successfully.')
else:
logger.debug('Cluster {0} is not in HA mode, OSTF HA tests '
@ -199,10 +199,10 @@ class FuelWebClient29(object):
.format(timeout))
with QuietLogger():
# TODO(astudenov): add timeout_msg
_wait(lambda: self.run_ostf(cluster_id,
test_sets=['sanity'],
should_fail=should_fail),
interval=10, timeout=timeout)
wait_pass(lambda: self.run_ostf(cluster_id,
test_sets=['sanity'],
should_fail=should_fail),
interval=10, timeout=timeout)
logger.info('OSTF Sanity checks passed successfully.')
@logwrap

View File

@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
from devops.helpers.helpers import _wait
from devops.helpers.helpers import wait_pass
from proboscis import test
from proboscis.asserts import assert_equal
from proboscis.asserts import assert_false
@ -58,8 +58,8 @@ class TestsConfigDBAPI(TestBasic):
install_configdb(master_node_ip=self.ssh_manager.admin_ip)
logger.debug('Waiting for ConfigDB')
_wait(lambda: self.fuel_web.client.get_components(),
timeout=45)
wait_pass(lambda: self.fuel_web.client.get_components(),
timeout=45)
logger.debug('Get env and component data')
components = self.fuel_web.client.get_components()

View File

@ -13,7 +13,7 @@
# under the License.
import time
from devops.helpers.helpers import _wait
from devops.helpers.helpers import wait_pass
from devops.helpers.helpers import wait
from proboscis import asserts
from proboscis import test
@ -137,21 +137,20 @@ class CICMaintenanceMode(TestBasic):
[dregular_ctrl.name])
# Wait until RabbitMQ cluster is UP
_wait(lambda:
self.fuel_web.run_single_ostf_test(
cluster_id, test_sets=['ha'],
test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get(
'RabbitMQ availability')),
timeout=1500)
wait_pass(lambda:
self.fuel_web.run_single_ostf_test(
cluster_id, test_sets=['ha'],
test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get(
'RabbitMQ availability')),
timeout=1500)
logger.info('RabbitMQ cluster is available')
# TODO(astudenov): add timeout_msg
_wait(lambda:
self.fuel_web.run_single_ostf_test(
cluster_id, test_sets=['sanity'],
test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get(
'Check that required services are running')),
timeout=1500)
wait_pass(lambda:
self.fuel_web.run_single_ostf_test(
cluster_id, test_sets=['sanity'],
test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get(
'Check that required services are running')),
timeout=1500)
logger.info("Required services are running")
# TODO(astudenov): add timeout_msg
@ -250,21 +249,21 @@ class CICMaintenanceMode(TestBasic):
[dregular_ctrl.name])
# Wait until RabbitMQ cluster is UP
_wait(lambda:
self.fuel_web.run_single_ostf_test(
cluster_id, test_sets=['ha'],
test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get(
'RabbitMQ availability')),
timeout=1500)
wait_pass(lambda:
self.fuel_web.run_single_ostf_test(
cluster_id, test_sets=['ha'],
test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get(
'RabbitMQ availability')),
timeout=1500)
logger.info('RabbitMQ cluster is available')
# Wait until all Openstack services are UP
_wait(lambda:
self.fuel_web.run_single_ostf_test(
cluster_id, test_sets=['sanity'],
test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get(
'Check that required services are running')),
timeout=1500)
wait_pass(lambda:
self.fuel_web.run_single_ostf_test(
cluster_id, test_sets=['sanity'],
test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get(
'Check that required services are running')),
timeout=1500)
logger.info("Required services are running")
try:
@ -423,21 +422,21 @@ class CICMaintenanceMode(TestBasic):
[dregular_ctrl.name])
# Wait until RabbitMQ cluster is UP
_wait(lambda:
self.fuel_web.run_single_ostf_test(
cluster_id, test_sets=['ha'],
test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get(
'RabbitMQ availability')),
timeout=1500)
wait_pass(lambda:
self.fuel_web.run_single_ostf_test(
cluster_id, test_sets=['ha'],
test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get(
'RabbitMQ availability')),
timeout=1500)
logger.info('RabbitMQ cluster is available')
# TODO(astudenov): add timeout_msg
_wait(lambda:
self.fuel_web.run_single_ostf_test(
cluster_id, test_sets=['sanity'],
test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get(
'Check that required services are running')),
timeout=1500)
wait_pass(lambda:
self.fuel_web.run_single_ostf_test(
cluster_id, test_sets=['sanity'],
test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get(
'Check that required services are running')),
timeout=1500)
logger.info("Required services are running")
try:

View File

@ -16,7 +16,7 @@ import re
import time
from devops.error import TimeoutError
from devops.helpers.helpers import _wait
from devops.helpers.helpers import wait_pass
from devops.helpers.helpers import tcp_ping
from devops.helpers.helpers import wait
from proboscis.asserts import assert_equal
@ -518,7 +518,7 @@ class TestHaFailoverBase(TestBasic):
remote.execute("iptables -D OUTPUT 1 -m owner --uid-owner heat -m"
" state --state NEW,ESTABLISHED,RELATED")
# TODO(astudenov): add timeout_msg
_wait(lambda: assert_true(ocf_success in ''.join(
wait_pass(lambda: assert_true(ocf_success in ''.join(
remote.execute(ocf_status)['stdout']).rstrip()), timeout=240)
newpid = ''.join(remote.execute('pgrep {0}'
.format(heat_name))['stdout'])
@ -1115,8 +1115,8 @@ class TestHaFailoverBase(TestBasic):
@logwrap
def _get_pcm_nodes(remote, pure=False):
nodes = {}
pcs_status = remote.execute('pcs status nodes')['stdout']
pcm_nodes = yaml.load(''.join(pcs_status).strip())
pcs_status = remote.execute('pcs status nodes')['stdout_str']
pcm_nodes = yaml.load(pcs_status)
for status in ('Online', 'Offline', 'Standby'):
list_nodes = (pcm_nodes['Pacemaker Nodes']
[status] or '').split()
@ -1132,8 +1132,7 @@ class TestHaFailoverBase(TestBasic):
for remote in ctrl_remotes:
pcs_nodes = _get_pcm_nodes(remote)
# TODO: FIXME: Rewrite using normal SSHManager and node name
node_name = ''.join(
remote.execute('hostname -f')['stdout']).strip()
node_name = remote.execute('hostname -f')['stdout_str']
logger.debug(
"Status of pacemaker nodes on node {0}: {1}".
format(node_name, pcs_nodes))
@ -1167,31 +1166,27 @@ class TestHaFailoverBase(TestBasic):
for count in xrange(500):
logger.debug('Checking splitbrain in the loop, '
'count number: {0}'.format(count))
# TODO(astudenov): add timeout_msg
_wait(
wait_pass(
lambda: assert_equal(
remote_controller.execute(
'killall -TERM corosync')['exit_code'], 0,
'Corosync was not killed on controller, '
'see debug log, count-{0}'.format(count)), timeout=20)
# TODO(astudenov): add timeout_msg
_wait(
wait_pass(
lambda: assert_true(
_check_all_pcs_nodes_status(
live_remotes, [controller_node['fqdn']],
'Offline'),
'Caught splitbrain, see debug log, '
'count-{0}'.format(count)), timeout=20)
# TODO(astudenov): add timeout_msg
_wait(
wait_pass(
lambda: assert_equal(
remote_controller.execute(
'service corosync start && service pacemaker '
'restart')['exit_code'], 0,
'Corosync was not started, see debug log,'
' count-{0}'.format(count)), timeout=20)
# TODO(astudenov): add timeout_msg
_wait(
wait_pass(
lambda: assert_true(
_check_all_pcs_nodes_status(
ctrl_remotes, pcs_nodes_online, 'Online'),