Refactoring functions in checkers.py(part 1)

Following functions were changed:
  * check_update_network_data_over_cli
  * check_get_network_data_over_cli
  * check_offload
  * check_client_smoke
  * check_hiera_hosts
  * check_log_lines_order
  * check_file_exists
  * check_cobbler_node_exists
  * check_haproxy_backend
  * check_public_ping
  * check_repo_managment
  * check_cinder_status
  * verify_service

Related tests are also modified.
Some typos are fixed.
Implements: blueprint sshmanager-integration

Change-Id: Ifdc2a01d100dbf3b809f6619d9877a8c98db3258
This commit is contained in:
Vasily Gorin 2016-02-24 17:42:23 +03:00
parent d5c987924a
commit 0af42bd0f8
21 changed files with 303 additions and 331 deletions

View File

@ -44,15 +44,21 @@ from proboscis.asserts import assert_true
from time import sleep
ssh_manager = SSHManager()
@logwrap
def check_cinder_status(remote):
def check_cinder_status(ip):
"""Parse output and return False if any enabled service is down.
'cinder service-list' stdout example:
| cinder-scheduler | node-1.test.domain.local | nova | enabled | up |
| cinder-scheduler | node-2.test.domain.local | nova | enabled | down |
"""
cmd = '. openrc; cinder service-list'
result = remote.execute(cmd)
result = ssh_manager.execute(
ip=ip,
cmd=cmd
)
cinder_services = ''.join(result['stdout'])
logger.debug('>$ cinder service-list\n{}'.format(cinder_services))
if result['exit_code'] == 0:
@ -87,9 +93,12 @@ def check_image(image, md5, path):
@logwrap
def verify_service(remote, service_name, count=1,
def verify_service(ip, service_name, count=1,
ignore_count_of_proccesses=False):
ps_output = remote.execute('ps ax')['stdout']
ps_output = ssh_manager.execute_on_remote(
ip=ip,
cmd='ps ax'
)['stdout']
api = filter(lambda x: service_name in x, ps_output)
logger.debug("{} \\n: {}".format(service_name, str(api)))
if not ignore_count_of_proccesses:
@ -237,12 +246,10 @@ def check_archive_type(tar_path):
@logwrap
def check_file_exists(node_ssh, path):
result = node_ssh.execute('test -e "{0}"'.format(path))
assert_equal(result['exit_code'],
0,
'Can not find {0}'.format(path))
logger.info('File {0} exists on {1}'.format(path, node_ssh.host))
def check_file_exists(ip, path):
assert_true(ssh_manager.exists_on_remote(ip, path),
'Can not find {0}'.format(path))
logger.info('File {0} exists on {1}'.format(path, ip))
@logwrap
@ -1028,46 +1035,52 @@ def is_ntpd_active(remote, ntpd_ip):
return not remote.execute(cmd)['exit_code']
def check_repo_managment(remote):
def check_repo_managment(ip):
"""Check repo management
run 'yum -y clean all && yum check-update' or
'apt-get clean all && apt-get update' exit code should be 0
:type remote: SSHClient
:type ip: node ip
:rtype Dict
"""
if OPENSTACK_RELEASE == OPENSTACK_RELEASE_UBUNTU:
cmd = "apt-get clean all && apt-get update > /dev/null"
else:
cmd = "yum -y clean all && yum check-update > /dev/null"
remote.check_call(cmd)
ssh_manager.execute_on_remote(
ip=ip,
cmd=cmd
)
def check_public_ping(remote):
def check_public_ping(ip):
""" Check if ping public vip
:type remote: SSHClient object
:type ip: node ip
"""
cmd = ('ruby /etc/puppet/modules/osnailyfacter/'
'modular/virtual_ips/public_vip_ping_post.rb')
res = remote.execute(cmd)
assert_equal(0, res['exit_code'],
'Public ping check failed:'
' {0}'.format(res))
ssh_manager.execute_on_remote(
ip=ip,
cmd=cmd,
err_msg='Public ping check failed'
)
def check_cobbler_node_exists(remote, node_id):
def check_cobbler_node_exists(ip, node_id):
"""Check node with following node_id
is present in the cobbler node list
:param remote: SSHClient
:param ip: node ip
:param node_id: fuel node id
:return: bool: True if exit code of command (node) == 0
"""
logger.debug("Check that cluster contains node with ID:{0} ".
format(node_id))
node = remote.execute(
'bash -c "cobbler system list" | grep '
'-w "node-{0}"'.format(node_id))
node = ssh_manager.execute(
ip=ip,
cmd='bash -c "cobbler system list" | grep '
'-w "node-{0}"'.format(node_id)
)
return int(node['exit_code']) == 0
@ -1079,7 +1092,7 @@ def check_cluster_presence(cluster_id, postgres_actions):
return str(cluster_id) in query_result
def check_haproxy_backend(remote,
def check_haproxy_backend(ip,
services=None, nodes=None,
ignore_services=None, ignore_nodes=None):
"""Check DOWN state of HAProxy backends. Define names of service or nodes
@ -1087,7 +1100,7 @@ def check_haproxy_backend(remote,
service status on all nodes. Use ignore_nodes for ignore all services on
all nodes. Ignoring has a bigger priority.
:type remote: SSHClient
:type ip: node ip
:type services: List
:type nodes: List
:type ignore_services: List
@ -1102,39 +1115,35 @@ def check_haproxy_backend(remote,
grep.extend(
['|egrep -v "{}"'.format('|'.join(n)) for n in negative_filter if n])
return remote.execute("{}{}".format(cmd, ''.join(grep)))
result = ssh_manager.execute(
ip=ip,
cmd="{}{}".format(cmd, ''.join(grep))
)
return result
def check_log_lines_order(remote, log_file_path, line_matcher):
def check_log_lines_order(ip, log_file_path, line_matcher):
"""Read log file and check that lines order are same as strings in list
:param remote: SSHClient
:param ip: ip of node in str format
:param log_file_path: path to log file
:param line_matcher: list of strings to search
"""
check_file_exists(remote, path=log_file_path)
check_file_exists(ip, path=log_file_path)
previous_line_pos = 1
previous_line = None
for current_line in line_matcher:
cmd = 'tail -n +{0} {1} | grep -n "{2}"'\
.format(previous_line_pos, log_file_path, current_line)
result = remote.execute(cmd)
# line not found case
assert_equal(0,
result['exit_code'],
"Line '{0}' not found after line '{1}' in the file '{2}'."
" Command '{3}' executed with exit_code='{4}'\n"
"stdout:\n* {5} *\n"
"stderr:\n'* {6} *\n"
.format(current_line,
previous_line,
log_file_path,
cmd,
result['exit_code'],
'\n'.join(result['stdout']),
'\n'.join(result['stderr'])))
result = ssh_manager.execute_on_remote(
ip=ip,
cmd=cmd,
err_msg="Line '{0}' not found after line '{1}' in the file "
"'{2}'.".format(current_line, previous_line, log_file_path)
)
# few lines found case
assert_equal(1,
@ -1157,54 +1166,68 @@ def check_log_lines_order(remote, log_file_path, line_matcher):
previous_line = current_line
def check_hiera_hosts(self, nodes, cmd):
def check_hiera_hosts(nodes, cmd):
hiera_hosts = []
for node in nodes:
with self.env.d_env.get_ssh_to_remote(node['ip']) as remote:
hosts = ''.join(run_on_remote(remote, cmd)).strip().split(',')
logger.debug("hosts on {0} are {1}".format(node['hostname'],
hosts))
if not hiera_hosts:
hiera_hosts = hosts
continue
else:
assert_true(set(hosts) == set(hiera_hosts),
'Hosts on node {0} differ from'
' others'.format(node['hostname']))
result = ssh_manager.execute_on_remote(
ip=node['ip'],
cmd=cmd
)['stdout']
hosts = ''.join(result).strip().split(',')
logger.debug("hosts on {0} are {1}".format(node['hostname'], hosts))
if not hiera_hosts:
hiera_hosts = hosts
continue
else:
assert_true(set(hosts) == set(hiera_hosts),
'Hosts on node {0} differ from'
' others'.format(node['hostname']))
def check_client_smoke(remote):
fuel_output = remote.execute(
'fuel env list')['stdout'][2].split('|')[2].strip()
fuel_2_output = remote.execute(
'fuel2 env list')['stdout'][3].split('|')[3].strip()
def check_client_smoke(ip):
fuel_output = ssh_manager.execute(
ip=ip,
cmd='fuel env list'
)['stdout'][2].split('|')[2].strip()
fuel_2_output = ssh_manager.execute(
ip=ip,
cmd='fuel2 env list'
)['stdout'][3].split('|')[3].strip()
assert_equal(fuel_output, fuel_2_output,
"The fuel: {0} and fuel2: {1} outputs are not equal")
def check_offload(node, interface, offload_type):
command = "ethtool --show-offload %s | awk '/%s/ {print $2}'"
offload_status = node.execute(command % (interface, offload_type))
assert_equal(offload_status['exit_code'], 0,
"Failed to get Offload {0} "
"on node {1}".format(offload_type, node))
return ''.join(node.execute(
command % (interface, offload_type))['stdout']).rstrip()
def check_offload(ip, interface, offload_type):
command = "ethtool --show-offload {0} |" \
" awk '/{1}/ {{print $2}}'".format(interface, offload_type)
result = ssh_manager.execute_on_remote(
ip=ip,
cmd=command,
err_msg="Failed to get Offload {0} "
"on node {1}".format(offload_type, ip)
)
return ''.join(result['stdout']).rstrip()
def check_get_network_data_over_cli(remote, cluster_id, path):
def check_get_network_data_over_cli(ip, cluster_id, path):
logger.info("Download network data over cli")
cmd = 'fuel --debug --env {0} network --dir {1} --json -d'.format(
cluster_id, path)
result = remote.execute(cmd)
assert_equal(result['exit_code'], 0,
'Failed to download network data {0}'.format(result))
ssh_manager.execute_on_remote(
ip=ip,
cmd=cmd,
err_msg='Failed to upload network data'
)
def check_update_network_data_over_cli(remote, cluster_id, path):
def check_update_network_data_over_cli(ip, cluster_id, path):
logger.info("Upload network data over cli")
cmd = 'fuel --debug --env {0} network --dir {1} --json -u'.format(
cluster_id, path)
result = remote.execute(cmd)
assert_equal(result['exit_code'], 0,
'Failed to upload network data {0}'.format(result))
ssh_manager.execute_on_remote(
ip=ip,
cmd=cmd,
err_msg='Failed to upload network data'
)

View File

@ -485,8 +485,7 @@ def check_repos_management(func):
for n in nailgun_nodes:
logger.debug("Check repository management on {0}"
.format(n['ip']))
with env.d_env.get_ssh_to_remote(n['ip']) as node_ssh:
check_repo_managment(node_ssh)
check_repo_managment(n['ip'])
except Exception:
logger.error("An error happened during check repositories "
"management on nodes. Please see the debug log.")

View File

@ -222,9 +222,9 @@ class SSHManager(object):
remote = self._get_remote(ip=ip, port=port)
return remote.download(destination, target)
def exist_on_remote(self, ip, path, port=22):
def exists_on_remote(self, ip, path, port=22):
remote = self._get_remote(ip=ip, port=port)
return remote.exist(path)
return remote.exists(path)
def isdir_on_remote(self, ip, path, port=22):
remote = self._get_remote(ip=ip, port=port)

View File

@ -1925,15 +1925,15 @@ class FuelWebClient(object):
def wait_cinder_is_up(self, node_names):
logger.info("Waiting for all Cinder services up.")
for node_name in node_names:
with self.get_ssh_for_node(node_name) as remote:
try:
wait(lambda: checkers.check_cinder_status(remote),
timeout=300)
logger.info("All Cinder services up.")
except TimeoutError:
logger.error("Cinder services not ready.")
raise TimeoutError(
"Cinder services not ready. ")
node = self.get_nailgun_node_by_name(node_name)
try:
wait(lambda: checkers.check_cinder_status(node['ip']),
timeout=300)
logger.info("All Cinder services up.")
except TimeoutError:
logger.error("Cinder services not ready.")
raise TimeoutError(
"Cinder services not ready. ")
return True
def run_ostf_repeatably(self, cluster_id, test_name=None,

View File

@ -183,72 +183,65 @@ class ExamplePlugin(TestBasic):
# plugin+100.0.all
# plugin+100.all
# fuel_plugin_example_v3_sh]
with self.env.fuel_web.get_ssh_for_node('slave-01') as remote:
checkers.check_file_exists(remote,
'/tmp/plugin+100.0.all')
checkers.check_file_exists(remote,
'/tmp/plugin+100.all')
checkers.check_file_exists(remote,
'/tmp/fuel_plugin_example_v3_sh')
checkers.check_file_exists(remote,
'/tmp/fuel_plugin_example_v3_puppet')
slave1 = self.fuel_web.get_nailgun_node_by_name('slave-01')
checkers.check_file_exists(slave1['ip'], '/tmp/plugin+100.0.all')
checkers.check_file_exists(slave1['ip'], '/tmp/plugin+100.all')
checkers.check_file_exists(slave1['ip'],
'/tmp/fuel_plugin_example_v3_sh')
checkers.check_file_exists(slave1['ip'],
'/tmp/fuel_plugin_example_v3_puppet')
# check if fuel_plugin_example_v3_puppet called
# between netconfig and connectivity_tests
netconfig_str = 'MODULAR: netconfig.pp'
plugin_str = 'PLUGIN: fuel_plugin_example_v3 - deploy.pp'
connect_str = 'MODULAR: connectivity_tests.pp'
checkers.check_log_lines_order(remote,
log_file_path='/var/log/puppet.log',
line_matcher=[netconfig_str,
plugin_str,
connect_str])
# check if fuel_plugin_example_v3_puppet called
# between netconfig and connectivity_tests
netconfig_str = 'MODULAR: netconfig.pp'
plugin_str = 'PLUGIN: fuel_plugin_example_v3 - deploy.pp'
connect_str = 'MODULAR: connectivity_tests.pp'
checkers.check_log_lines_order(
ip=slave1['ip'],
log_file_path='/var/log/puppet.log',
line_matcher=[netconfig_str,
plugin_str,
connect_str])
# check if slave-02 contain
# plugin+100.0.all
# plugin+100.al
with self.env.fuel_web.get_ssh_for_node('slave-02') as remote:
checkers.check_file_exists(remote,
'/tmp/plugin+100.0.all')
checkers.check_file_exists(remote,
'/tmp/plugin+100.all')
slave2 = self.fuel_web.get_nailgun_node_by_name('slave-02')
checkers.check_file_exists(slave2['ip'], '/tmp/plugin+100.0.all')
checkers.check_file_exists(slave2['ip'], '/tmp/plugin+100.all')
# check if slave-03 contain
# plugin+100.0.all
# plugin+100.all
# fuel_plugin_example_v3_sh
# fuel_plugin_example_v3_puppet
with self.env.fuel_web.get_ssh_for_node('slave-03') as remote:
checkers.check_file_exists(remote,
'/tmp/plugin+100.0.all')
checkers.check_file_exists(remote,
'/tmp/plugin+100.all')
checkers.check_file_exists(remote,
'/tmp/fuel_plugin_example_v3_sh')
checkers.check_file_exists(remote,
'/tmp/fuel_plugin_example_v3_puppet')
slave3 = self.fuel_web.get_nailgun_node_by_name('slave-03')
checkers.check_file_exists(slave3['ip'], '/tmp/plugin+100.0.all')
checkers.check_file_exists(slave3['ip'], '/tmp/plugin+100.all')
checkers.check_file_exists(slave3['ip'],
'/tmp/fuel_plugin_example_v3_sh')
checkers.check_file_exists(slave3['ip'],
'/tmp/fuel_plugin_example_v3_puppet')
# check if service run on slave-03
logger.debug("Checking service on node {0}".format('slave-03'))
# check if service run on slave-03
logger.debug("Checking service on node {0}".format('slave-03'))
cmd = 'pgrep -f fuel-simple-service'
res_pgrep = remote.execute(cmd)
assert_equal(0, res_pgrep['exit_code'],
'Command {0} failed with error {1}'
.format(cmd, res_pgrep['stderr']))
process_count = len(res_pgrep['stdout'])
assert_equal(1, process_count,
"There should be 1 process 'fuel-simple-service',"
" but {0} found {1} processes".format(cmd,
process_count))
# curl to service
cmd_curl = 'curl localhost:8234'
res_curl = remote.execute(cmd_curl)
assert_equal(0, res_pgrep['exit_code'],
'Command {0} failed with error {1}'
.format(cmd_curl, res_curl['stderr']))
cmd = 'pgrep -f fuel-simple-service'
res_pgrep = self.ssh_manager.execute_on_remote(
ip=slave3['ip'],
cmd=cmd
)
process_count = len(res_pgrep['stdout'])
assert_equal(1, process_count,
"There should be 1 process 'fuel-simple-service',"
" but {0} found {1} processes".format(cmd, process_count))
# curl to service
cmd_curl = 'curl localhost:8234'
self.ssh_manager.execute_on_remote(
ip=slave3['ip'],
cmd=cmd_curl
)
self.fuel_web.run_ostf(cluster_id=cluster_id)
self.env.make_snapshot("deploy_ha_one_controller_neutron_example_v3")

View File

@ -131,20 +131,18 @@ class TestOffloading(BondingTest):
for node in nodes:
for eth in bond0:
for name in offloadings_1:
with self.env.d_env.get_ssh_to_remote(node['ip']) as host:
result = check_offload(host, eth, name)
assert_equal(
result, 'off',
"Offload type '{0}': '{1}' - node-{2}, {3}".format(
name, result, node['id'], eth))
result = check_offload(node['ip'], eth, name)
assert_equal(
result, 'off',
"Offload type '{0}': '{1}' - node-{2}, {3}".format(
name, result, node['id'], eth))
for eth in bond1:
for name in offloadings_2:
with self.env.d_env.get_ssh_to_remote(node['ip']) as host:
result = check_offload(host, eth, name)
assert_equal(
result, 'on',
"Offload type '{0}': '{1}' - node-{2}, {3}".format(
name, result, node['id'], eth))
result = check_offload(node['ip'], eth, name)
assert_equal(
result, 'on',
"Offload type '{0}': '{1}' - node-{2}, {3}".format(
name, result, node['id'], eth))
self.show_step(10)
self.fuel_web.run_ostf(cluster_id=cluster_id)
@ -241,20 +239,18 @@ class TestOffloading(BondingTest):
for node in nodes:
for eth in bond0:
for name in offloadings_1:
with self.env.d_env.get_ssh_to_remote(node['ip']) as host:
result = check_offload(host, eth, name)
assert_equal(
result, 'off',
"Offload type '{0}': '{1}' - node-{2}, {3}".format(
name, result, node['id'], eth))
result = check_offload(node['ip'], eth, name)
assert_equal(
result, 'off',
"Offload type '{0}': '{1}' - node-{2}, {3}".format(
name, result, node['id'], eth))
for eth in bond1:
for name in offloadings_2:
with self.env.d_env.get_ssh_to_remote(node['ip']) as host:
result = check_offload(host, eth, name)
assert_equal(
result, 'on',
"Offload type '{0}': '{1}' - node-{2}, {3}".format(
name, result, node['id'], eth))
result = check_offload(node['ip'], eth, name)
assert_equal(
result, 'on',
"Offload type '{0}': '{1}' - node-{2}, {3}".format(
name, result, node['id'], eth))
self.show_step(10)
self.fuel_web.run_ostf(cluster_id=cluster_id)

View File

@ -505,11 +505,9 @@ class CephRadosGW(TestBasic):
cluster_id, ['controller'])
for node in controller_nodes:
remote = self.env.d_env.get_ssh_to_remote(node['ip'])
logger.info("Check all HAProxy backends on {}".format(
node['meta']['system']['fqdn']))
haproxy_status = checkers.check_haproxy_backend(remote)
remote.clear()
haproxy_status = checkers.check_haproxy_backend(node['ip'])
assert_equal(haproxy_status['exit_code'], 1,
"HAProxy backends are DOWN. {0}".format(
haproxy_status))

View File

@ -225,12 +225,12 @@ class CommandLineTest(test_cli_base.CommandLine):
"""
self.env.revert_snapshot("cli_selected_nodes_deploy")
with self.env.d_env.get_admin_remote() as remote:
node_id = self.fuel_web.get_nailgun_node_by_devops_node(
self.env.d_env.nodes().slaves[2])['id']
node_id = self.fuel_web.get_nailgun_node_by_devops_node(
self.env.d_env.nodes().slaves[2])['id']
assert_true(check_cobbler_node_exists(remote, node_id),
"node-{0} is not found".format(node_id))
assert_true(check_cobbler_node_exists(self.ssh_manager.admin_ip,
node_id),
"node-{0} is not found".format(node_id))
self.env.d_env.nodes().slaves[2].destroy()
try:
wait(
@ -258,8 +258,8 @@ class CommandLineTest(test_cli_base.CommandLine):
"After deletion node-{0} is found in fuel list".
format(node_id))
with self.env.d_env.get_admin_remote() as remote:
is_cobbler_node_exists = check_cobbler_node_exists(remote, node_id)
is_cobbler_node_exists = check_cobbler_node_exists(
self.ssh_manager.admin_ip, node_id)
assert_false(is_cobbler_node_exists,
"After deletion node-{0} is found in cobbler list".

View File

@ -169,8 +169,8 @@ class TestMultipleClusterNets(TestBasic):
self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[3:5])
self.show_step(6)
with self.env.d_env.get_admin_remote() as remote:
check_get_network_data_over_cli(remote, cluster_id, '/var/log/')
check_get_network_data_over_cli(self.ssh_manager.admin_ip,
cluster_id, '/var/log/')
management_ranges_default = []
management_ranges_custom = []
@ -219,12 +219,14 @@ class TestMultipleClusterNets(TestBasic):
utils.put_json_on_remote_from_dict(
remote, updated_network, cluster_id)
check_update_network_data_over_cli(remote, cluster_id,
check_update_network_data_over_cli(self.ssh_manager.admin_ip,
cluster_id,
'/var/log/')
self.show_step(9)
with self.env.d_env.get_admin_remote() as remote:
check_get_network_data_over_cli(remote, cluster_id, '/var/log/')
check_get_network_data_over_cli(self.ssh_manager.admin_ip,
cluster_id, '/var/log/')
latest_net = json.loads(remote.open(
'/var/log/network_1.json').read())
updated_storage_default = self.get_ranges(latest_net, 'storage',

View File

@ -90,8 +90,7 @@ class NeutronTun(TestBasic):
self.fuel_web.check_fixed_network_cidr(
cluster_id, os_conn)
with self.env.d_env.get_admin_remote() as remote:
checkers.check_client_smoke(remote)
checkers.check_client_smoke(self.ssh_manager.admin_ip)
self.fuel_web.verify_network(cluster_id)

View File

@ -133,17 +133,16 @@ class TestOffloading(TestBasic):
self.show_step(8)
for node in nodes:
with self.env.d_env.get_ssh_to_remote(node['ip']) as remote:
for name in offloadings_1:
result = check_offload(remote, iface1, name)
assert_equal(result, "off",
"Offload type {0} is {1} on {2}".format(
name, result, node['name']))
for name in offloadings_2:
result = check_offload(remote, iface2, name)
assert_equal(result, "on",
"Offload type {0} is {1} on {2}".format(
name, result, node['name']))
for name in offloadings_1:
result = check_offload(node['ip'], iface1, name)
assert_equal(result, "off",
"Offload type {0} is {1} on {2}".format(
name, result, node['name']))
for name in offloadings_2:
result = check_offload(node['ip'], iface2, name)
assert_equal(result, "on",
"Offload type {0} is {1} on {2}".format(
name, result, node['name']))
self.show_step(9)
self.fuel_web.run_ostf(cluster_id=cluster_id)
@ -234,17 +233,16 @@ class TestOffloading(TestBasic):
self.show_step(8)
for node in nodes:
with self.env.d_env.get_ssh_to_remote(node['ip']) as remote:
for name in offloadings_1:
result = check_offload(remote, iface1, name)
assert_equal(result, "off",
"Offload type {0} is {1} on {2}".format(
name, result, node['name']))
for name in offloadings_2:
result = check_offload(remote, iface2, name)
assert_equal(result, "on",
"Offload type {0} is {1} on {2}".format(
name, result, node['name']))
for name in offloadings_1:
result = check_offload(node['ip'], iface1, name)
assert_equal(result, "off",
"Offload type {0} is {1} on {2}".format(
name, result, node['name']))
for name in offloadings_2:
result = check_offload(node['ip'], iface2, name)
assert_equal(result, "on",
"Offload type {0} is {1} on {2}".format(
name, result, node['name']))
self.show_step(9)
self.fuel_web.run_ostf(cluster_id=cluster_id)

View File

@ -84,17 +84,10 @@ class SaharaHAOneController(TestBasic):
logger.debug('Verify Sahara service on controller')
_ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
with self.env.d_env.get_ssh_to_remote(_ip) as remote:
# count = 1 + api_workers (from sahara.conf)
checkers.verify_service(
remote,
service_name='sahara-api',
count=2)
# count = 2 * 1 (hardcoded by deployment team)
checkers.verify_service(
remote,
service_name='sahara-engine',
count=2)
# count = 1 + api_workers (from sahara.conf)
checkers.verify_service(_ip, service_name='sahara-api', count=2)
# count = 2 * 1 (hardcoded by deployment team)
checkers.verify_service(_ip, service_name='sahara-engine', count=2)
logger.debug('Check MD5 sum of Vanilla2 image')
check_image = checkers.check_image(
@ -198,17 +191,11 @@ class SaharaHA(TestBasic):
logger.debug('Verify Sahara service on all controllers')
for slave in ["slave-01", "slave-02", "slave-03"]:
_ip = self.fuel_web.get_nailgun_node_by_name(slave)['ip']
with self.env.d_env.get_ssh_to_remote(_ip) as remote:
# count = 1 + api_workers (from sahara.conf)
checkers.verify_service(
remote,
service_name='sahara-api',
count=2)
# count = 2 * 1 (hardcoded by deployment team)
checkers.verify_service(
remote,
service_name='sahara-engine',
count=2)
# count = 1 + api_workers (from sahara.conf)
checkers.verify_service(_ip, service_name='sahara-api', count=2)
# count = 2 * 1 (hardcoded by deployment team)
checkers.verify_service(_ip,
service_name='sahara-engine', count=2)
logger.debug('Check MD5 sum of Vanilla2 image')
check_image = checkers.check_image(
@ -306,10 +293,7 @@ class MuranoHAOneController(TestBasic):
data['user'], data['password'], data['tenant'])
self.fuel_web.assert_cluster_ready(os_conn, smiles_count=5)
_ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
with self.env.d_env.get_ssh_to_remote(_ip) as remote:
checkers.verify_service(
remote,
service_name='murano-api')
checkers.verify_service(_ip, service_name='murano-api')
logger.debug('Run sanity and functional Murano OSTF tests')
self.fuel_web.run_single_ostf_test(
@ -396,10 +380,7 @@ class MuranoHA(TestBasic):
self.fuel_web.assert_cluster_ready(os_conn, smiles_count=13)
for slave in ["slave-01", "slave-02", "slave-03"]:
_ip = self.fuel_web.get_nailgun_node_by_name(slave)['ip']
with self.env.d_env.get_ssh_to_remote(_ip) as remote:
checkers.verify_service(
remote,
service_name='murano-api')
checkers.verify_service(_ip, service_name='murano-api')
logger.debug('Run sanity and functional Murano OSTF tests')
self.fuel_web.run_single_ostf_test(
@ -561,10 +542,9 @@ class CeilometerHAOneControllerMongo(OSTFCeilometerHelper):
self.fuel_web.deploy_cluster_wait(cluster_id)
_ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
with self.env.d_env.get_ssh_to_remote(_ip) as remote:
checkers.verify_service(remote,
service_name='ceilometer-api',
ignore_count_of_proccesses=True)
checkers.verify_service(_ip,
service_name='ceilometer-api',
ignore_count_of_proccesses=True)
_ip = self.fuel_web.get_nailgun_node_by_name("slave-03")['ip']
with self.env.d_env.get_ssh_to_remote(_ip) as remote:
@ -618,10 +598,9 @@ class CeilometerHAOneControllerMongo(OSTFCeilometerHelper):
self.fuel_web.deploy_cluster_wait(cluster_id)
_ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
with self.env.d_env.get_ssh_to_remote(_ip) as remote:
checkers.verify_service(remote,
service_name='ceilometer-api',
ignore_count_of_proccesses=True)
checkers.verify_service(_ip,
service_name='ceilometer-api',
ignore_count_of_proccesses=True)
self.run_tests(cluster_id)
self.env.make_snapshot("deploy_ceilometer_ha_one_controller_multirole")
@ -676,10 +655,9 @@ class CeilometerHAMongo(OSTFCeilometerHelper):
self.fuel_web.deploy_cluster_wait(cluster_id)
_ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
with self.env.d_env.get_ssh_to_remote(_ip) as remote:
checkers.verify_service(remote,
service_name='ceilometer-api',
ignore_count_of_proccesses=True)
checkers.verify_service(_ip,
service_name='ceilometer-api',
ignore_count_of_proccesses=True)
self.run_tests(cluster_id,
skip_tests=['test_check_volume_events'])
@ -728,10 +706,9 @@ class CeilometerHAMongo(OSTFCeilometerHelper):
self.fuel_web.deploy_cluster_wait(cluster_id)
_ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
with self.env.d_env.get_ssh_to_remote(_ip) as remote:
checkers.verify_service(remote,
service_name='ceilometer-api',
ignore_count_of_proccesses=True)
checkers.verify_service(_ip,
service_name='ceilometer-api',
ignore_count_of_proccesses=True)
self.run_tests(cluster_id)
self.env.make_snapshot("deploy_ceilometer_ha_multirole", is_make=True)
@ -823,10 +800,9 @@ class CeilometerHAMongo(OSTFCeilometerHelper):
self.fuel_web.deploy_cluster_wait(cluster_id)
_ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
with self.env.d_env.get_ssh_to_remote(_ip) as remote:
checkers.verify_service(remote,
service_name='ceilometer-api',
ignore_count_of_proccesses=True)
checkers.verify_service(_ip,
service_name='ceilometer-api',
ignore_count_of_proccesses=True)
self.run_tests(cluster_id)
self.env.make_snapshot("deploy_ceilometer_ha_with_external_mongo")
@ -887,16 +863,12 @@ class HeatHAOneController(TestBasic):
self.fuel_web.assert_cluster_ready(os_conn, smiles_count=5)
_ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
with self.env.d_env.get_ssh_to_remote(_ip) as remote:
checkers.verify_service(
remote,
service_name='heat-api', count=3)
checkers.verify_service(_ip, service_name='heat-api', count=3)
_ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
with self.env.d_env.get_ssh_to_remote(_ip) as remote:
checkers.verify_service(remote,
service_name='ceilometer-api',
ignore_count_of_proccesses=True)
checkers.verify_service(_ip,
service_name='ceilometer-api',
ignore_count_of_proccesses=True)
logger.debug('Run Heat OSTF platform tests')
@ -982,13 +954,10 @@ class HeatHA(TestBasic):
for slave in ["slave-01", "slave-02", "slave-03"]:
_ip = self.fuel_web.get_nailgun_node_by_name(slave)['ip']
with self.env.d_env.get_ssh_to_remote(_ip) as remote:
checkers.verify_service(
remote,
service_name='heat-api', count=3)
checkers.verify_service(remote,
service_name='ceilometer-api',
ignore_count_of_proccesses=True)
checkers.verify_service(_ip, service_name='heat-api', count=3)
checkers.verify_service(_ip,
service_name='ceilometer-api',
ignore_count_of_proccesses=True)
logger.debug('Run Heat OSTF platform tests')

View File

@ -221,6 +221,7 @@ class UbuntuBootstrapBuild(base_test_case.TestBasic):
self.env.bootstrap_nodes(nodes)
for node in nodes:
n_node = self.fuel_web.get_nailgun_node_by_devops_node(node)
with self.fuel_web.get_ssh_for_node(node.name) as slave_remote:
checkers.verify_bootstrap_on_node(slave_remote,
os_type="ubuntu",
@ -234,7 +235,7 @@ class UbuntuBootstrapBuild(base_test_case.TestBasic):
"{1}".format(package, node.name))
for injected_dir in ["/var/lib/testdir", "/var/www/testdir2"]:
checkers.check_file_exists(slave_remote, injected_dir)
checkers.check_file_exists(n_node['ip'], injected_dir)
file_content = \
slave_remote.execute("cat /test_bootstrap_script")

View File

@ -242,14 +242,14 @@ class SeparateDbFailover(TestBasic):
test_sets=['sanity', 'smoke', 'ha'])
checkers.check_hiera_hosts(
self, self.fuel_web.client.list_cluster_nodes(cluster_id),
self.fuel_web.client.list_cluster_nodes(cluster_id),
cmd='hiera memcache_roles')
database_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
cluster_id, ['standalone-database'])
logger.debug("database nodes are {0}".format(database_nodes))
checkers.check_hiera_hosts(
self, database_nodes,
database_nodes,
cmd='hiera corosync_roles')
nailgun_node = self.fuel_web.update_nodes(cluster_id, node,
@ -263,12 +263,12 @@ class SeparateDbFailover(TestBasic):
test_sets=['sanity', 'smoke', 'ha'])
checkers.check_hiera_hosts(
self, self.fuel_web.client.list_cluster_nodes(cluster_id),
self.fuel_web.client.list_cluster_nodes(cluster_id),
cmd='hiera memcache_roles')
database_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
cluster_id, ['standalone-database'])
logger.debug("database nodes are {0}".format(database_nodes))
checkers.check_hiera_hosts(
self, database_nodes,
database_nodes,
cmd='hiera corosync_roles')

View File

@ -255,7 +255,7 @@ class SeparateKeystoneFailover(TestBasic):
cluster_id, ['standalone-keystone'])
logger.debug("keystone nodes are {0}".format(keystone_nodes))
checkers.check_hiera_hosts(
self, keystone_nodes,
keystone_nodes,
cmd='hiera memcache_roles')
other_nodes = []
@ -265,11 +265,11 @@ class SeparateKeystoneFailover(TestBasic):
other_nodes.append(nodes_list)
logger.debug("other nodes are {0}".format(other_nodes))
checkers.check_hiera_hosts(
self, other_nodes,
other_nodes,
cmd='hiera memcache_roles')
checkers.check_hiera_hosts(
self, keystone_nodes,
keystone_nodes,
cmd='hiera corosync_roles')
nailgun_node = self.fuel_web.update_nodes(cluster_id, node,
@ -286,7 +286,7 @@ class SeparateKeystoneFailover(TestBasic):
cluster_id, ['standalone-keystone'])
logger.debug("keystone nodes are {0}".format(keystone_nodes))
checkers.check_hiera_hosts(
self, keystone_nodes,
keystone_nodes,
cmd='hiera memcache_roles')
other_nodes = []
@ -296,9 +296,9 @@ class SeparateKeystoneFailover(TestBasic):
other_nodes.append(nodes_list)
logger.debug("other nodes are {0}".format(other_nodes))
checkers.check_hiera_hosts(
self, other_nodes,
other_nodes,
cmd='hiera memcache_roles')
checkers.check_hiera_hosts(
self, keystone_nodes,
keystone_nodes,
cmd='hiera corosync_roles')

View File

@ -241,18 +241,18 @@ class SeparateRabbitFailover(TestBasic):
self.fuel_web.run_ostf(cluster_id=cluster_id,
test_sets=['sanity', 'smoke', 'ha'])
checkers.check_hiera_hosts(
self, self.fuel_web.client.list_cluster_nodes(cluster_id),
self.fuel_web.client.list_cluster_nodes(cluster_id),
cmd='hiera amqp_hosts')
checkers.check_hiera_hosts(
self, self.fuel_web.client.list_cluster_nodes(cluster_id),
self.fuel_web.client.list_cluster_nodes(cluster_id),
cmd='hiera memcache_roles')
rabbit_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
cluster_id, ['standalone-rabbitmq'])
logger.debug("rabbit nodes are {0}".format(rabbit_nodes))
checkers.check_hiera_hosts(
self, rabbit_nodes,
rabbit_nodes,
cmd='hiera corosync_roles')
nailgun_node = self.fuel_web.update_nodes(cluster_id, node,
@ -265,16 +265,16 @@ class SeparateRabbitFailover(TestBasic):
self.fuel_web.run_ostf(cluster_id=cluster_id,
test_sets=['sanity', 'smoke', 'ha'])
checkers.check_hiera_hosts(
self, self.fuel_web.client.list_cluster_nodes(cluster_id),
self.fuel_web.client.list_cluster_nodes(cluster_id),
cmd='hiera amqp_hosts')
checkers.check_hiera_hosts(
self, self.fuel_web.client.list_cluster_nodes(cluster_id),
self.fuel_web.client.list_cluster_nodes(cluster_id),
cmd='hiera memcache_roles')
rabbit_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
cluster_id, ['standalone-rabbitmq'])
logger.debug("rabbit nodes are {0}".format(rabbit_nodes))
checkers.check_hiera_hosts(
self, rabbit_nodes,
rabbit_nodes,
cmd='hiera corosync_roles')

View File

@ -115,8 +115,8 @@ class TestHaFailoverBase(TestBasic):
self.fuel_web.verify_network(cluster_id)
for node in ['slave-0{0}'.format(slave) for slave in xrange(1, 4)]:
with self.fuel_web.get_ssh_for_node(node) as remote:
check_public_ping(remote)
ip = self.fuel_web.get_nailgun_node_by_name(node)['ip']
check_public_ping(ip)
self.env.make_snapshot(self.snapshot_name, is_make=True)

View File

@ -158,8 +158,8 @@ class TestNeutronFailoverBase(base_test_case.TestBasic):
self.fuel_web.deploy_cluster_wait(cluster_id)
for node in ['slave-0{0}'.format(slave) for slave in xrange(1, 4)]:
with self.fuel_web.get_ssh_for_node(node) as remote:
checkers.check_public_ping(remote)
ip = self.fuel_web.get_nailgun_node_by_name(node)['ip']
checkers.check_public_ping(ip)
self.env.make_snapshot('deploy_ha_neutron_{}'.format(
self.segment_type), is_make=True)

View File

@ -164,8 +164,7 @@ def patch_and_assemble_ubuntu_bootstrap(environment):
no_progress_bar,
no_append)
ssh.execute_on_remote(ip=ssh.admin_ip, cmd=image_rebuild)
with environment.d_env.get_admin_remote() as remote:
checkers.check_file_exists(remote, '{0}'.format(bootstrap_file))
checkers.check_file_exists(ssh.admin_ip, str(bootstrap_file))
except Exception as e:
logger.error("Could not upload package {e}".format(e=e))
raise
@ -182,10 +181,9 @@ def replace_centos_bootstrap(environment):
raise Exception("{} variable don't exist"
.format(settings.UPDATE_FUEL))
rebuilded_bootstrap = '/var/initramfs.img.updated'
with environment.d_env.get_admin_remote() as remote:
checkers.check_file_exists(
remote,
'{0}'.format(rebuilded_bootstrap))
checkers.check_file_exists(
ssh.admin_ip,
str(rebuilded_bootstrap))
logger.info("Assigning new bootstrap from {}".format(rebuilded_bootstrap))
bootstrap = "/var/www/nailgun/bootstrap"
cmd = ("mv {0}/initramfs.img /var/initramfs.img;"

View File

@ -359,11 +359,9 @@ class BaseActions(PrepareActions, HealthCheckActions, PluginsActions):
self.cluster_id, ['controller'])
for node in controller_nodes:
remote = self.env.d_env.get_ssh_to_remote(node['ip'])
logger.info("Check all HAProxy backends on {}".format(
node['meta']['system']['fqdn']))
haproxy_status = checkers.check_haproxy_backend(remote)
remote.clear()
haproxy_status = checkers.check_haproxy_backend(node['ip'])
assert_equal(haproxy_status['exit_code'], 1,
"HAProxy backends are DOWN. {0}".format(
haproxy_status))

View File

@ -186,47 +186,45 @@ class FillRootActions(object):
self.primary_controller_space_to_filled
))
with self.fuel_web.get_ssh_for_node(
self.primary_controller.name) as remote:
run_on_remote_get_results(
remote, 'fallocate -l {}M /root/bigfile'.format(
self.primary_controller_space_to_filled))
check_file_exists(remote, '/root/bigfile')
node = self.fuel_web.get_nailgun_node_by_name(
self.primary_controller.name)
self.ssh_manager.execute_on_remote(
ip=node['ip'],
cmd='fallocate -l {}M /root/bigfile'.format(
self.primary_controller_space_to_filled)
)
check_file_exists(node['ip'], '/root/bigfile')
@deferred_decorator([make_snapshot_if_step_fail])
@action
def fill_root_below_rabbit_disk_free_limit(self):
"""Fill root more to below rabbit disk free limit"""
with self.fuel_web.get_ssh_for_node(
self.primary_controller.name) as remote:
node = self.fuel_web.get_nailgun_node_by_name(
self.primary_controller.name)
pacemaker_attributes = self.ssh_manager.execute_on_remote(
ip=node['ip'],
cmd='cibadmin --query --scope status'
)['stdout_str']
controller_space_on_root = get_pacemaker_nodes_attributes(
pacemaker_attributes)[self.primary_controller_fqdn]['root_free']
pacemaker_attributes = run_on_remote_get_results(
remote, 'cibadmin --query --scope status')['stdout_str']
logger.info("Free space in root on primary controller - {}".format(
controller_space_on_root))
controller_space_on_root = get_pacemaker_nodes_attributes(
pacemaker_attributes)[self.primary_controller_fqdn][
'root_free']
controller_space_to_filled = str(
int(controller_space_on_root) - self.rabbit_disk_free_limit - 1
)
logger.info(
"Free space in root on primary controller - {}".format(
controller_space_on_root
))
logger.info("Need to fill space on root - {}".format(
controller_space_to_filled))
controller_space_to_filled = str(
int(
controller_space_on_root
) - self.rabbit_disk_free_limit - 1)
logger.info(
"Need to fill space on root - {}".format(
controller_space_to_filled
))
run_on_remote_get_results(
remote, 'fallocate -l {}M /root/bigfile2'.format(
controller_space_to_filled))
check_file_exists(remote, '/root/bigfile2')
self.ssh_manager.execute_on_remote(
ip=node['ip'],
cmd='fallocate -l {}M /root/bigfile2'.format(
controller_space_to_filled)
)
check_file_exists(node['ip'], '/root/bigfile2')
@deferred_decorator([make_snapshot_if_step_fail])
@action