Replace deprecated methods by it's successors

Replace deprecated methods by it's successors
Drop stdout_len and stderr_len as useless and unused

Change-Id: I558e864240c9d8bed7e9acd4dd1406c33d066800
Closes-bug: #1604688
(cherry picked from commit fadd06d)
This commit is contained in:
Alexey Stepanov
2016-07-30 21:37:50 +03:00
parent 3c12547353
commit 2820a0965e
10 changed files with 197 additions and 220 deletions

View File

@@ -15,11 +15,12 @@ import hashlib
import json
import os
import re
from time import sleep
import traceback
import urllib2
from devops.error import TimeoutError
from devops.helpers.helpers import _wait
from devops.helpers.helpers import wait_pass
from devops.helpers.helpers import wait
import yaml
@@ -40,8 +41,6 @@ from netaddr import IPNetwork
from proboscis.asserts import assert_equal
from proboscis.asserts import assert_true
from time import sleep
@logwrap
def check_cinder_status(remote):
@@ -176,7 +175,7 @@ def check_ceph_image_size(remote, expected_size, device='vdc'):
if not ret:
logger.error("Partition not present! {}: ".format(
remote.check_call("df -m")))
raise Exception
raise Exception()
logger.debug("Partitions: {part}".format(part=ret))
assert_true(abs(float(ret[0].rstrip()) / float(expected_size) - 1) < 0.1,
"size {0} is not equal"
@@ -282,7 +281,8 @@ def enable_feature_group(env, group):
except (urllib2.HTTPError, urllib2.URLError):
return False
wait(check_api_group_enabled, interval=10, timeout=60 * 20)
wait(check_api_group_enabled, interval=10, timeout=60 * 20,
timeout_msg='Failed to enable feature group - {!r}'.format(group))
@logwrap
@@ -364,9 +364,10 @@ def check_mysql(remote, node_name):
except TimeoutError:
logger.error('MySQL daemon is down on {0}'.format(node_name))
raise
_wait(lambda: assert_equal(remote.execute(check_crm_cmd)['exit_code'], 0,
'MySQL resource is NOT running on {0}'.format(
node_name)), timeout=60)
wait_pass(lambda: assert_equal(
remote.execute(check_crm_cmd)['exit_code'], 0,
'MySQL resource is NOT running on {0}'.format(
node_name)), timeout=60)
try:
wait(lambda: ''.join(remote.execute(
check_galera_cmd)['stdout']).rstrip() == 'Synced', timeout=600)
@@ -540,10 +541,11 @@ def check_stats_on_collector(collector_remote, postgres_actions, master_uuid):
# Check that important data (clusters number, nodes number, nodes roles,
# user's email, used operation system, OpenStack stats) is saved correctly
for stat_type in general_stats.keys():
assert_true(type(summ_stats[stat_type]) == general_stats[stat_type],
"Installation structure in Collector's DB doesn't contain"
"the following stats: {0}".format(stat_type))
for stat_type in general_stats:
assert_true(
isinstance(summ_stats[stat_type], general_stats[stat_type]),
"Installation structure in Collector's DB doesn't contain"
"the following stats: {0}".format(stat_type))
real_clusters_number = int(postgres_actions.run_query(
db='nailgun', query='select count(*) from clusters;'))
@@ -599,7 +601,7 @@ def check_stats_private_info(collector_remote, postgres_actions,
_has_private_data = False
# Check that stats doesn't contain private data (e.g.
# specific passwords, settings, emails)
for _private in private_data.keys():
for _private in private_data:
_regex = r'(?P<key>"\S+"): (?P<value>[^:]*"{0}"[^:]*)'.format(
private_data[_private])
for _match in re.finditer(_regex, data):
@@ -615,7 +617,7 @@ def check_stats_private_info(collector_remote, postgres_actions,
_has_private_data = True
# Check that stats doesn't contain private types of data (e.g. any kind
# of passwords)
for _data_type in secret_data_types.keys():
for _data_type in secret_data_types:
_regex = (r'(?P<secret>"[^"]*{0}[^"]*": (\{{[^\}}]+\}}|\[[^\]+]\]|'
r'"[^"]+"))').format(secret_data_types[_data_type])

View File

@@ -17,6 +17,7 @@ import os
import posixpath
import re
import traceback
from warnings import warn
from devops.helpers.helpers import wait
from devops.models.node import SSHClient
@@ -162,11 +163,17 @@ class SSHManager(object):
"""
if assert_ec_equal is None:
assert_ec_equal = [0]
orig_result = self.execute(ip=ip, port=port, cmd=cmd)
remote = self._get_remote(ip=ip, port=port)
orig_result = remote.check_call(
command=cmd,
error_info=err_msg,
expected=assert_ec_equal,
raise_on_err=raise_on_assert
)
# Now create fallback result
# TODO(astepanov): switch to SSHClient output after tests adoptation
# TODO(astepanov): process whole parameters on SSHClient().check_call()
result = {
'stdout': orig_result['stdout'],
@@ -176,43 +183,8 @@ class SSHManager(object):
'stderr_str': ''.join(orig_result['stderr']).strip(),
}
details_log = (
"Host: {host}\n"
"Command: '{cmd}'\n"
"Exit code: {code}\n"
"STDOUT:\n{stdout}\n"
"STDERR:\n{stderr}".format(
host=ip, cmd=cmd, code=result['exit_code'],
stdout=result['stdout_str'], stderr=result['stderr_str']
))
if result['exit_code'] not in assert_ec_equal:
error_msg = (
err_msg or
"Unexpected exit_code returned: actual {0}, expected {1}."
"".format(
result['exit_code'],
' '.join(map(str, assert_ec_equal))))
log_msg = (
"{0} Command: '{1}' "
"Details:\n{2}".format(
error_msg, cmd, details_log))
logger.error(log_msg)
if raise_on_assert:
raise Exception(log_msg)
else:
logger.debug(details_log)
if jsonify:
try:
result['stdout_json'] = \
self._json_deserialize(result['stdout_str'])
except Exception:
error_msg = (
"Unable to deserialize output of command"
" '{0}' on host {1}".format(cmd, ip))
logger.error(error_msg)
raise Exception(error_msg)
result['stdout_json'] = orig_result.stdout_json
return result
@@ -223,6 +195,10 @@ class SSHManager(object):
:return: obj
:raise: Exception
"""
warn(
'_json_deserialize is not used anymore and will be removed later',
DeprecationWarning)
if isinstance(json_string, list):
json_string = ''.join(json_string).strip()
@@ -247,9 +223,9 @@ class SSHManager(object):
remote = self._get_remote(ip=ip, port=port)
return remote.download(destination, target)
def exist_on_remote(self, ip, path, port=22):
def exists_on_remote(self, ip, path, port=22):
remote = self._get_remote(ip=ip, port=port)
return remote.exist(path)
return remote.exists(path)
def isdir_on_remote(self, ip, path, port=22):
remote = self._get_remote(ip=ip, port=port)

View File

@@ -350,7 +350,7 @@ def cond_upload(remote, source, target, condition=''):
return 0
files_count = 0
for rootdir, subdirs, files in os.walk(source):
for rootdir, _, files in os.walk(source):
targetdir = os.path.normpath(
os.path.join(
target,
@@ -514,7 +514,9 @@ def get_network_template(template_name):
@logwrap
def get_net_settings(remote, skip_interfaces=set()):
def get_net_settings(remote, skip_interfaces=None):
if skip_interfaces is None:
skip_interfaces = set()
net_settings = dict()
interface_cmd = ('awk \'$1~/:/{split($1,iface,":"); print iface[1]}\''
' /proc/net/dev')
@@ -634,8 +636,7 @@ def get_node_hiera_roles(remote):
cmd = 'hiera roles'
roles = ''.join(run_on_remote(remote, cmd)).strip()
# Content string with roles like a ["ceph-osd", "controller"] to list
roles = map(lambda s: s.strip('" '), roles.strip("[]").split(','))
return roles
return [role.strip('" ') for role in roles.strip("[]").split(',')]
class RunLimit(object):

View File

@@ -258,7 +258,8 @@ class EnvironmentModel(object):
) % params
return keys
def get_target_devs(self, devops_nodes):
@staticmethod
def get_target_devs(devops_nodes):
return [
interface.target_dev for interface in [
val for var in map(lambda node: node.interfaces, devops_nodes)
@@ -331,10 +332,8 @@ class EnvironmentModel(object):
self.resume_environment()
def nailgun_nodes(self, devops_nodes):
return map(
lambda node: self.fuel_web.get_nailgun_node_by_devops_node(node),
devops_nodes
)
return [self.fuel_web.get_nailgun_node_by_devops_node(node)
for node in devops_nodes]
def check_slaves_are_ready(self):
devops_nodes = [node for node in self.d_env.nodes().slaves
@@ -356,7 +355,7 @@ class EnvironmentModel(object):
if not self.d_env.has_snapshot(name):
return False
logger.info('We have snapshot with such name: %s' % name)
logger.info('We have snapshot with such name: {:s}'.format(name))
logger.info("Reverting the snapshot '{0}' ....".format(name))
self.d_env.revert(name)

View File

@@ -344,8 +344,8 @@ class FuelWebClient(object):
task = self.task_wait(task, timeout, interval)
assert_equal(
'error', task['status'],
"Task '{name}' has incorrect status. {} != {}".format(
task['status'], 'error', name=task["name"]
"Task '{name}' has incorrect status. {status} != {exp}".format(
status=task['status'], exp='error', name=task["name"]
)
)
@@ -376,17 +376,31 @@ class FuelWebClient(object):
def get_rabbit_running_nodes(self, ctrl_node):
ip = self.get_node_ip_by_devops_name(ctrl_node)
cmd = 'rabbitmqctl cluster_status'
rabbit_status = ''.join(
self.ssh_manager.execute(ip, cmd)['stdout']
).strip()
# If any rabbitmq nodes failed, we have return(70) from rabbitmqctl
# Acceptable list:
# 0 | EX_OK | Self-explanatory
# 69 | EX_UNAVAILABLE | Failed to connect to node
# 70 | EX_SOFTWARE | Any other error discovered when running command
# | | against live node
# 75 | EX_TEMPFAIL | Temporary failure (e.g. something timed out)
rabbit_status = self.ssh_manager.execute_on_remote(
ip, cmd, raise_on_assert=False, assert_ec_equal=[0, 69, 70, 75]
)['stdout_str']
rabbit_status = re.sub(r',\n\s*', ',', rabbit_status)
rabbit_nodes = re.search(
found_nodes = re.search(
"\{running_nodes,\[([^\]]*)\]\}",
rabbit_status).group(1).replace("'", "").split(',')
rabbit_status)
if not found_nodes:
logger.info(
'No running rabbitmq nodes found on {0}. Status:\n {1}'.format(
ctrl_node, rabbit_status))
return []
rabbit_nodes = found_nodes.group(1).replace("'", "").split(',')
logger.debug('rabbit nodes are {}'.format(rabbit_nodes))
nodes = [node.replace('rabbit@', "") for node in rabbit_nodes]
hostname_prefix = ''.join(self.ssh_manager.execute(
ip, 'hiera node_name_prefix_for_messaging')['stdout']).strip()
hostname_prefix = self.ssh_manager.execute_on_remote(
ip, 'hiera node_name_prefix_for_messaging', raise_on_assert=False
)['stdout_str']
if hostname_prefix not in ('', 'nil'):
nodes = [n.replace(hostname_prefix, "") for n in nodes]
return nodes
@@ -1094,17 +1108,15 @@ class FuelWebClient(object):
logger.info('Wait for task {0} seconds: {1}'.format(
timeout, pretty_log(task, indent=1)))
start = time.time()
try:
wait(
lambda: (self.client.get_task(task['id'])['status']
not in ('pending', 'running')),
interval=interval,
timeout=timeout
)
except TimeoutError:
raise TimeoutError(
"Waiting task \"{task}\" timeout {timeout} sec "
"was exceeded: ".format(task=task["name"], timeout=timeout))
wait(
lambda: (self.client.get_task(task['id'])['status']
not in ('pending', 'running')),
interval=interval,
timeout=timeout,
timeout_msg='Waiting task {0!r} timeout {1} sec '
'was exceeded'.format(task['name'], timeout))
took = time.time() - start
task = self.client.get_task(task['id'])
logger.info('Task finished. Took {0} seconds. {1}'.format(
@@ -1114,21 +1126,15 @@ class FuelWebClient(object):
@logwrap
def task_wait_progress(self, task, timeout, interval=5, progress=None):
try:
logger.info(
'start to wait with timeout {0} '
'interval {1}'.format(timeout, interval))
wait(
lambda: self.client.get_task(
task['id'])['progress'] >= progress,
interval=interval,
timeout=timeout
)
except TimeoutError:
raise TimeoutError(
"Waiting task \"{task}\" timeout {timeout} sec "
"was exceeded: ".format(task=task["name"], timeout=timeout))
logger.info('start to wait with timeout {0} '
'interval {1}'.format(timeout, interval))
wait(
lambda: self.client.get_task(
task['id'])['progress'] >= progress,
interval=interval,
timeout=timeout,
timeout_msg='Waiting task {0!r} timeout {1} sec '
'was exceeded'.format(task["name"], timeout))
return self.client.get_task(task['id'])
@logwrap
@@ -1186,7 +1192,7 @@ class FuelWebClient(object):
self.client.update_nodes(nodes_data)
nailgun_nodes = self.client.list_cluster_nodes(cluster_id)
cluster_node_ids = map(lambda _node: str(_node['id']), nailgun_nodes)
cluster_node_ids = [str(_node['id']) for _node in nailgun_nodes]
assert_true(
all([node_id in cluster_node_ids for node_id in node_ids]))
@@ -1229,7 +1235,7 @@ class FuelWebClient(object):
interfaces_dict[iface].append('fuelweb_admin')
def get_iface_by_name(ifaces, name):
iface = filter(lambda iface: iface['name'] == name, ifaces)
iface = [_iface for _iface in ifaces if _iface['name'] == name]
assert_true(len(iface) > 0,
"Interface with name {} is not present on "
"node. Please check override params.".format(name))
@@ -1429,7 +1435,7 @@ class FuelWebClient(object):
net.get('seg_type', '') == 'tun'):
result['private_tun'] = net
elif (net['name'] == 'private' and
net.get('seg_type', '') == 'gre'):
net.get('seg_type', '') == 'gre'):
result['private_gre'] = net
elif net['name'] == 'public':
result['public'] = net
@@ -1553,7 +1559,8 @@ class FuelWebClient(object):
networks=new_settings["networks"]
)
def _get_true_net_name(self, name, net_pools):
@staticmethod
def _get_true_net_name(name, net_pools):
"""Find a devops network name in net_pools"""
for net in net_pools:
if name in net:
@@ -1671,7 +1678,8 @@ class FuelWebClient(object):
else:
net_config['ip_ranges'] = self.get_range(ip_network, -1)
def get_range(self, ip_network, ip_range=0):
@staticmethod
def get_range(ip_network, ip_range=0):
net = list(netaddr.IPNetwork(str(ip_network)))
half = len(net) / 2
if ip_range == 0:
@@ -1795,7 +1803,7 @@ class FuelWebClient(object):
cmd = 'ip netns exec {0} ip -4 ' \
'-o address show {1}'.format(namespace, interface)
else:
cmd = 'ip -4 -o address show {1}'.format(interface)
cmd = 'ip -4 -o address show {0}'.format(interface)
with self.get_ssh_for_node(node_name) as remote:
ret = remote.check_call(cmd)
@@ -1939,7 +1947,7 @@ class FuelWebClient(object):
test_path = map_ostf.OSTF_TEST_MAPPING.get(test_name_to_run)
logger.info('Test path is {0}'.format(test_path))
for i in range(0, retries):
for _ in range(retries):
result = self.run_single_ostf_test(
cluster_id=cluster_id, test_sets=['smoke', 'sanity'],
test_name=test_path,
@@ -1971,8 +1979,8 @@ class FuelWebClient(object):
@logwrap
def run_ceph_task(self, cluster_id, offline_nodes):
ceph_id = [n['id'] for n in self.client.list_cluster_nodes(cluster_id)
if 'ceph-osd'
in n['roles'] and n['id'] not in offline_nodes]
if 'ceph-osd' in n['roles'] and
n['id'] not in offline_nodes]
res = self.client.put_deployment_tasks_for_cluster(
cluster_id, data=['top-role-ceph-osd'],
node_id=str(ceph_id).strip('[]'))
@@ -2021,7 +2029,8 @@ class FuelWebClient(object):
"on node %s", fqdn)
ceph.restart_monitor(remote_to_mon)
wait(lambda: not ceph.is_clock_skew(remote), timeout=120)
wait(lambda: not ceph.is_clock_skew(remote), timeout=120,
timeout_msg='check ceph time skew timeout')
@logwrap
def check_ceph_status(self, cluster_id, offline_nodes=(),
@@ -2035,14 +2044,11 @@ class FuelWebClient(object):
for node in online_ceph_nodes:
with self.environment.d_env\
.get_ssh_to_remote(node['ip']) as remote:
try:
wait(lambda: ceph.check_service_ready(remote) is True,
interval=20, timeout=600)
except TimeoutError:
error_msg = 'Ceph service is not properly started' \
' on {0}'.format(node['name'])
logger.error(error_msg)
raise TimeoutError(error_msg)
wait(lambda: ceph.check_service_ready(remote) is True,
interval=20, timeout=600,
timeout_msg='Ceph service is not properly started'
' on {0}'.format(node['name']))
logger.info('Ceph service is ready. Checking Ceph Health...')
self.check_ceph_time_skew(cluster_id, offline_nodes)
@@ -2226,8 +2232,9 @@ class FuelWebClient(object):
'Cidr after deployment is not equal'
' to cidr by default')
@staticmethod
@logwrap
def check_fixed_nova_splited_cidr(self, os_conn, nailgun_cidr):
def check_fixed_nova_splited_cidr(os_conn, nailgun_cidr):
logger.debug('Nailgun cidr for nova: {0}'.format(nailgun_cidr))
subnets_list = [net.cidr for net in os_conn.get_nova_network_list()]
@@ -2318,13 +2325,12 @@ class FuelWebClient(object):
fqdn, self.environment.d_env.nodes().slaves)
return devops_node
@staticmethod
@logwrap
def get_fqdn_by_hostname(self, hostname):
if DNS_SUFFIX not in hostname:
hostname += DNS_SUFFIX
return hostname
else:
return hostname
def get_fqdn_by_hostname(hostname):
return (
hostname + DNS_SUFFIX if DNS_SUFFIX not in hostname else hostname
)
def get_nodegroup(self, cluster_id, name='default', group_id=None):
ngroups = self.client.get_nodegroups()
@@ -2444,8 +2450,9 @@ class FuelWebClient(object):
plugin_data[path[-1]] = value
self.client.update_cluster_attributes(cluster_id, attr)
@staticmethod
@logwrap
def prepare_ceph_to_delete(self, remote_ceph):
def prepare_ceph_to_delete(remote_ceph):
hostname = ''.join(remote_ceph.execute(
"hostname -s")['stdout']).strip()
osd_tree = ceph.get_osd_tree(remote_ceph)
@@ -2460,7 +2467,8 @@ class FuelWebClient(object):
for osd_id in ids:
remote_ceph.execute("ceph osd out {}".format(osd_id))
wait(lambda: ceph.is_health_ok(remote_ceph),
interval=30, timeout=10 * 60)
interval=30, timeout=10 * 60,
timeout_msg='ceph helth ok timeout')
for osd_id in ids:
if OPENSTACK_RELEASE_UBUNTU in OPENSTACK_RELEASE:
remote_ceph.execute("stop ceph-osd id={}".format(osd_id))
@@ -2564,9 +2572,9 @@ class FuelWebClient(object):
@logwrap
def spawn_vms_wait(self, cluster_id, timeout=60 * 60, interval=30):
logger.info('Spawn VMs of a cluster %s', cluster_id)
task = self.client.spawn_vms(cluster_id)
self.assert_task_success(task, timeout=timeout, interval=interval)
logger.info('Spawn VMs of a cluster %s', cluster_id)
task = self.client.spawn_vms(cluster_id)
self.assert_task_success(task, timeout=timeout, interval=interval)
@logwrap
def get_all_ostf_set_names(self, cluster_id):

View File

@@ -148,19 +148,16 @@ class RhBase(TestBasic):
)
if settings.RH_SERVER_URL:
reg_command = reg_command + " --serverurl={0}".format(
settings.RH_SERVER_URL)
reg_command += " --serverurl={0}".format(settings.RH_SERVER_URL)
if settings.RH_REGISTERED_ORG_NAME:
reg_command = reg_command + " --org={0}".format(
settings.RH_REGISTERED_ORG_NAME)
reg_command += " --org={0}".format(settings.RH_REGISTERED_ORG_NAME)
if settings.RH_RELEASE:
reg_command = reg_command + " --release={0}".format(
settings.RH_RELEASE)
reg_command += " --release={0}".format(settings.RH_RELEASE)
if settings.RH_ACTIVATION_KEY:
reg_command = reg_command + " --activationkey={0}".format(
reg_command += " --activationkey={0}".format(
settings.RH_ACTIVATION_KEY)
if settings.RH_POOL_HASH:

View File

@@ -14,7 +14,7 @@
import time
from devops.error import TimeoutError
from devops.helpers.helpers import _wait
from devops.helpers.helpers import wait_pass
from devops.helpers.helpers import tcp_ping
from devops.helpers.helpers import wait
from proboscis.asserts import assert_equal
@@ -167,17 +167,17 @@ class CICMaintenanceMode(TestBasic):
self.fuel_web.wait_cinder_is_up(
[n.name for n in d_ctrls])
_wait(lambda:
self.fuel_web.run_single_ostf_test(
cluster_id, test_sets=['sanity'],
test_name=map_ostf.OSTF_TEST_MAPPING.get(
'Check that required services are running')),
timeout=1500)
wait_pass(lambda:
self.fuel_web.run_single_ostf_test(
cluster_id, test_sets=['sanity'],
test_name=map_ostf.OSTF_TEST_MAPPING.get(
'Check that required services are running')),
timeout=1500)
logger.debug("Required services are running")
_wait(lambda:
self.fuel_web.run_ostf(cluster_id, test_sets=['ha']),
timeout=1500)
wait_pass(lambda:
self.fuel_web.run_ostf(cluster_id, test_sets=['ha']),
timeout=1500)
logger.debug("HA tests are pass now")
try:
@@ -296,17 +296,17 @@ class CICMaintenanceMode(TestBasic):
self.fuel_web.wait_cinder_is_up(
[n.name for n in d_ctrls])
_wait(lambda:
self.fuel_web.run_single_ostf_test(
cluster_id, test_sets=['sanity'],
test_name=map_ostf.OSTF_TEST_MAPPING.get(
'Check that required services are running')),
timeout=1500)
wait_pass(lambda:
self.fuel_web.run_single_ostf_test(
cluster_id, test_sets=['sanity'],
test_name=map_ostf.OSTF_TEST_MAPPING.get(
'Check that required services are running')),
timeout=1500)
logger.debug("Required services are running")
_wait(lambda:
self.fuel_web.run_ostf(cluster_id, test_sets=['ha']),
timeout=1500)
wait_pass(lambda:
self.fuel_web.run_ostf(cluster_id, test_sets=['ha']),
timeout=1500)
logger.debug("HA tests are pass now")
try:
@@ -470,17 +470,17 @@ class CICMaintenanceMode(TestBasic):
self.fuel_web.wait_cinder_is_up(
[n.name for n in d_ctrls])
_wait(lambda:
self.fuel_web.run_single_ostf_test(
cluster_id, test_sets=['sanity'],
test_name=map_ostf.OSTF_TEST_MAPPING.get(
'Check that required services are running')),
timeout=1500)
wait_pass(lambda:
self.fuel_web.run_single_ostf_test(
cluster_id, test_sets=['sanity'],
test_name=map_ostf.OSTF_TEST_MAPPING.get(
'Check that required services are running')),
timeout=1500)
logger.debug("Required services are running")
_wait(lambda:
self.fuel_web.run_ostf(cluster_id, test_sets=['ha']),
timeout=1500)
wait_pass(lambda:
self.fuel_web.run_ostf(cluster_id, test_sets=['ha']),
timeout=1500)
logger.debug("HA tests are pass now")
try:

View File

@@ -16,7 +16,7 @@ import re
import time
from devops.error import TimeoutError
from devops.helpers.helpers import _wait
from devops.helpers.helpers import wait_pass
from devops.helpers.helpers import tcp_ping
from devops.helpers.helpers import wait
from fuelweb_test.helpers.utils import RunLimit
@@ -27,14 +27,14 @@ from proboscis.asserts import assert_true
from proboscis import SkipTest
import yaml
from fuelweb_test import logger
from fuelweb_test import logwrap
from fuelweb_test.helpers import os_actions
from fuelweb_test.helpers.checkers import check_mysql
from fuelweb_test.helpers.checkers import check_ping
from fuelweb_test.helpers.checkers import check_public_ping
from fuelweb_test.helpers.checkers import get_file_size
from fuelweb_test.helpers import os_actions
from fuelweb_test.helpers.utils import TimeStat
from fuelweb_test import logger
from fuelweb_test import logwrap
from fuelweb_test.helpers.utils import run_on_remote
from fuelweb_test.settings import DEPLOYMENT_MODE
from fuelweb_test.settings import DNS
@@ -122,7 +122,7 @@ class TestHaFailoverBase(TestBasic):
def ha_destroy_controllers(self):
if not self.env.d_env.has_snapshot(self.snapshot_name):
raise SkipTest()
raise SkipTest('Snapshot {} not found'.format(self.snapshot_name))
def get_needed_controllers(cluster_id):
n_ctrls = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
@@ -196,7 +196,7 @@ class TestHaFailoverBase(TestBasic):
def ha_disconnect_controllers(self):
if not self.env.revert_snapshot(self.snapshot_name):
raise SkipTest()
raise SkipTest('Snapshot {} not found'.format(self.snapshot_name))
cluster_id = self.fuel_web.client.get_cluster_id(
self.__class__.__name__)
@@ -224,7 +224,7 @@ class TestHaFailoverBase(TestBasic):
def ha_delete_vips(self):
if not self.env.d_env.has_snapshot(self.snapshot_name):
raise SkipTest()
raise SkipTest('Snapshot {} not found'.format(self.snapshot_name))
logger.debug('Start reverting of {0} snapshot'
.format(self.snapshot_name))
@@ -299,11 +299,9 @@ class TestHaFailoverBase(TestBasic):
# 3. Waiting for restore the IP
logger.debug("Waiting while deleted ip restores ...")
try:
wait(check_restore, timeout=60)
except TimeoutError as e:
logger.error("Resource has not been restored for a 60 sec")
raise e
wait(check_restore, timeout=60,
timeout_msg='Resource has not been restored for a 60 sec')
new_nodes = self.fuel_web.get_pacemaker_resource_location(
devops_controllers[0].name,
@@ -328,7 +326,7 @@ class TestHaFailoverBase(TestBasic):
def ha_mysql_termination(self):
if not self.env.d_env.has_snapshot(self.snapshot_name):
raise SkipTest()
raise SkipTest('Snapshot {} not found'.format(self.snapshot_name))
self.env.revert_snapshot(self.snapshot_name)
cluster_id = self.fuel_web.client.get_cluster_id(
@@ -362,7 +360,7 @@ class TestHaFailoverBase(TestBasic):
def ha_haproxy_termination(self):
if not self.env.d_env.has_snapshot(self.snapshot_name):
raise SkipTest()
raise SkipTest('Snapshot {} not found'.format(self.snapshot_name))
self.env.revert_snapshot(self.snapshot_name)
@@ -409,7 +407,7 @@ class TestHaFailoverBase(TestBasic):
def ha_pacemaker_configuration(self):
if not self.env.d_env.has_snapshot(self.snapshot_name):
raise SkipTest()
raise SkipTest('Snapshot {} not found'.format(self.snapshot_name))
self.env.revert_snapshot(self.snapshot_name)
@@ -453,7 +451,7 @@ class TestHaFailoverBase(TestBasic):
def ha_pacemaker_restart_heat_engine(self):
if not self.env.d_env.has_snapshot(self.snapshot_name):
raise SkipTest()
raise SkipTest('Snapshot {} not found'.format(self.snapshot_name))
self.env.revert_snapshot(self.snapshot_name)
ocf_success = "DEBUG: OpenStack Orchestration Engine" \
@@ -489,7 +487,9 @@ class TestHaFailoverBase(TestBasic):
remote.execute("iptables -I OUTPUT 1 -m owner --uid-owner heat -m"
" state --state NEW,ESTABLISHED,RELATED -j DROP")
cmd = "netstat -nap | grep {0} | grep :5673".format(pid)
wait(lambda: len(remote.execute(cmd)['stdout']) == 0, timeout=300)
wait(lambda: len(remote.execute(cmd)['stdout']) == 0, timeout=300,
timeout_msg='Failed to drop AMQP connections on node {}'
''.format(p_d_ctrl.name))
get_ocf_status = ''.join(
remote.execute(ocf_status)['stdout']).rstrip()
@@ -502,7 +502,7 @@ class TestHaFailoverBase(TestBasic):
with self.fuel_web.get_ssh_for_node(p_d_ctrl.name) as remote:
remote.execute("iptables -D OUTPUT 1 -m owner --uid-owner heat -m"
" state --state NEW,ESTABLISHED,RELATED")
_wait(lambda: assert_true(ocf_success in ''.join(
wait_pass(lambda: assert_true(ocf_success in ''.join(
remote.execute(ocf_status)['stdout']).rstrip()), timeout=240)
newpid = ''.join(remote.execute('pgrep {0}'
.format(heat_name))['stdout'])
@@ -524,7 +524,7 @@ class TestHaFailoverBase(TestBasic):
def ha_check_monit(self):
if not self.env.d_env.has_snapshot(self.snapshot_name):
raise SkipTest()
raise SkipTest('Snapshot {} not found'.format(self.snapshot_name))
self.env.revert_snapshot(self.snapshot_name)
cluster_id = self.fuel_web.client.get_cluster_id(
@@ -550,7 +550,7 @@ class TestHaFailoverBase(TestBasic):
def check_firewall_vulnerability(self):
if not self.env.d_env.has_snapshot(self.snapshot_name):
raise SkipTest()
raise SkipTest('Snapshot {} not found'.format(self.snapshot_name))
self.env.revert_snapshot(self.snapshot_name)
cluster_id = self.fuel_web.get_last_created_cluster()
@@ -558,7 +558,7 @@ class TestHaFailoverBase(TestBasic):
def check_virtual_router(self):
if not self.env.d_env.has_snapshot(self.snapshot_name):
raise SkipTest()
raise SkipTest('Snapshot {} not found'.format(self.snapshot_name))
self.env.revert_snapshot(self.snapshot_name)
cluster_id = self.fuel_web.get_last_created_cluster()
@@ -580,13 +580,9 @@ class TestHaFailoverBase(TestBasic):
DOWNLOAD_LINK))
with self.fuel_web.get_ssh_for_node('slave-05') as remote:
try:
wait(
lambda: remote.execute("ls -1 {0}/{1}".format(
file_path, file_name))['exit_code'] == 0, timeout=60)
except TimeoutError:
raise TimeoutError(
"File download was not started")
wait(lambda: remote.execute("ls -1 {0}/{1}".format(
file_path, file_name))['exit_code'] == 0, timeout=60,
timeout_msg='File download was not started')
with self.fuel_web.get_ssh_for_node('slave-05') as remote:
file_size1 = get_file_size(remote, file_name, file_path)
@@ -622,7 +618,7 @@ class TestHaFailoverBase(TestBasic):
def ha_controller_loss_packages(self, dev='br-mgmt', loss_percent='0.05'):
if not self.env.d_env.has_snapshot(self.snapshot_name):
raise SkipTest()
raise SkipTest('Snapshot {} not found'.format(self.snapshot_name))
self.env.revert_snapshot(self.snapshot_name)
@@ -643,8 +639,8 @@ class TestHaFailoverBase(TestBasic):
remote.check_call(cmd_input)
remote.check_call(cmd_output)
except:
logger.error('command failed to be executed'.format(
p_d_ctrl.name))
logger.error(
'Command {:s} failed to be executed'.format(p_d_ctrl.name))
raise
finally:
remote.clear()
@@ -667,7 +663,7 @@ class TestHaFailoverBase(TestBasic):
def ha_sequential_rabbit_master_failover(self):
if not self.env.d_env.has_snapshot(self.snapshot_name):
raise SkipTest()
raise SkipTest('Snapshot {} not found'.format(self.snapshot_name))
self.env.revert_snapshot(self.snapshot_name)
@@ -707,11 +703,9 @@ class TestHaFailoverBase(TestBasic):
floating_ip = os_conn.assign_floating_ip(instance)
# check instance
try:
wait(lambda: tcp_ping(floating_ip.ip, 22), timeout=120)
except TimeoutError:
raise TimeoutError('Can not ping instance'
' by floating ip {0}'.format(floating_ip.ip))
wait(lambda: tcp_ping(floating_ip.ip, 22), timeout=120,
timeout_msg='Can not ping instance'
' by floating ip {0}'.format(floating_ip.ip))
p_d_ctrl = self.fuel_web.get_nailgun_primary_node(
self.env.d_env.nodes().slaves[0])
@@ -742,11 +736,9 @@ class TestHaFailoverBase(TestBasic):
test_sets=['ha'], should_fail=3)
# check instance
try:
wait(lambda: tcp_ping(floating_ip.ip, 22), timeout=120)
except TimeoutError:
raise TimeoutError('Can not ping instance'
' by floating ip {0}'.format(floating_ip.ip))
wait(lambda: tcp_ping(floating_ip.ip, 22), timeout=120,
timeout_msg='Can not ping instance'
' by floating ip {0}'.format(floating_ip.ip))
n_ctrls = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
cluster_id, ['controller'])
@@ -824,7 +816,9 @@ class TestHaFailoverBase(TestBasic):
test_sets=['ha'])
# ping instance
wait(lambda: tcp_ping(floating_ip.ip, 22), timeout=120)
wait(lambda: tcp_ping(floating_ip.ip, 22), timeout=120,
timeout_msg='Can not ping instance'
' by floating ip {0}'.format(floating_ip.ip))
# delete instance
os_conn = os_actions.OpenStackActions(public_vip)
@@ -844,7 +838,7 @@ class TestHaFailoverBase(TestBasic):
def check_alive_rabbit_node_not_kicked(self):
if not self.env.d_env.has_snapshot(self.snapshot_name):
raise SkipTest()
raise SkipTest('Snapshot {} not found'.format(self.snapshot_name))
self.env.revert_snapshot(self.snapshot_name)
@@ -925,7 +919,7 @@ class TestHaFailoverBase(TestBasic):
def check_dead_rabbit_node_kicked(self):
if not self.env.d_env.has_snapshot(self.snapshot_name):
raise SkipTest()
raise SkipTest('Snapshot {} not found'.format(self.snapshot_name))
self.env.revert_snapshot(self.snapshot_name)
@@ -1005,7 +999,7 @@ class TestHaFailoverBase(TestBasic):
def test_3_1_rabbit_failover(self):
if not self.env.d_env.has_snapshot(self.snapshot_name):
raise SkipTest()
raise SkipTest('Snapshot {} not found'.format(self.snapshot_name))
logger.info('Revert environment started...')
self.env.revert_snapshot(self.snapshot_name)
@@ -1185,7 +1179,7 @@ class TestHaFailoverBase(TestBasic):
return True
if not self.env.d_env.has_snapshot(self.snapshot_name):
raise SkipTest()
raise SkipTest('Snapshot {} not found'.format(self.snapshot_name))
self.env.revert_snapshot(self.snapshot_name)
p_d_ctrl = self.fuel_web.get_nailgun_primary_node(
@@ -1210,27 +1204,27 @@ class TestHaFailoverBase(TestBasic):
for count in xrange(500):
logger.debug('Checking splitbrain in the loop, '
'count number: {0}'.format(count))
_wait(
wait_pass(
lambda: assert_equal(
remote_controller.execute(
'killall -TERM corosync')['exit_code'], 0,
'Corosync was not killed on controller, '
'see debug log, count-{0}'.format(count)), timeout=20)
_wait(
wait_pass(
lambda: assert_true(
_check_all_pcs_nodes_status(
live_remotes, [controller_node['fqdn']],
'Offline'),
'Caught splitbrain, see debug log, '
'count-{0}'.format(count)), timeout=20)
_wait(
wait_pass(
lambda: assert_equal(
remote_controller.execute(
'service corosync start && service pacemaker '
'restart')['exit_code'], 0,
'Corosync was not started, see debug log,'
' count-{0}'.format(count)), timeout=20)
_wait(
wait_pass(
lambda: assert_true(
_check_all_pcs_nodes_status(
ctrl_remotes, pcs_nodes_online, 'Online'),
@@ -1301,7 +1295,7 @@ class TestHaFailoverBase(TestBasic):
def ha_rabbitmq_stability_check(self):
if not self.env.d_env.has_snapshot(self.snapshot_name):
raise SkipTest()
raise SkipTest('Snapshot {} not found'.format(self.snapshot_name))
logger.info('Revert environment started...')
self.show_step(1, initialize=True)
self.env.revert_snapshot(self.snapshot_name)

View File

@@ -70,7 +70,7 @@ def replace_fuel_agent_rpm(environment):
pack_path)
result = remote.execute(cmd)
assert_equal(result['exit_code'], 0,
('Failed to update package {}').format(result))
'Failed to update package {}'.format(result))
except Exception as e:
logger.error("Could not upload package {e}".format(e=e))
@@ -127,7 +127,7 @@ def patch_centos_bootstrap(environment):
result = remote.execute(cmd)
assert_equal(result['exit_code'], 0,
('Failed to rebuild bootstrap {}').format(result))
'Failed to rebuild bootstrap {}'.format(result))
except Exception as e:
logger.error("Could not upload package {e}".format(e=e))
raise
@@ -150,7 +150,7 @@ def patch_and_assemble_ubuntu_bootstrap(environment):
# renew code in bootstrap
# Step 1 - install squashfs-tools
cmd = ("yum install -y squashfs-tools")
cmd = "yum install -y squashfs-tools"
result = remote.execute(cmd)
assert_equal(result['exit_code'], 0,
('Failed to install squashfs-tools {}'

View File

@@ -73,4 +73,4 @@ class DeployWithPluginExampleV3(ActionsBase):
@factory
def cases():
return (case_factory(DeployWithPluginExampleV3))
return case_factory(DeployWithPluginExampleV3)