Refactoring functions in checkers.py(part 1)

Following functions were changed:
  * check_update_network_data_over_cli
  * check_get_network_data_over_cli
  * check_offload
  * check_client_smoke
  * check_hiera_hosts
  * check_log_lines_order
  * check_file_exists
  * check_cobbler_node_exists
  * check_haproxy_backend
  * check_public_ping
  * check_repo_managment
  * check_cinder_status
  * verify_service

Related tests are also modified.
Some typos are fixed.
Implements: blueprint sshmanager-integration

Change-Id: Ifdc2a01d100dbf3b809f6619d9877a8c98db3258
This commit is contained in:
Vasily Gorin 2016-02-24 17:42:23 +03:00
parent d5c987924a
commit 0af42bd0f8
21 changed files with 303 additions and 331 deletions

View File

@ -44,15 +44,21 @@ from proboscis.asserts import assert_true
from time import sleep from time import sleep
ssh_manager = SSHManager()
@logwrap @logwrap
def check_cinder_status(remote): def check_cinder_status(ip):
"""Parse output and return False if any enabled service is down. """Parse output and return False if any enabled service is down.
'cinder service-list' stdout example: 'cinder service-list' stdout example:
| cinder-scheduler | node-1.test.domain.local | nova | enabled | up | | cinder-scheduler | node-1.test.domain.local | nova | enabled | up |
| cinder-scheduler | node-2.test.domain.local | nova | enabled | down | | cinder-scheduler | node-2.test.domain.local | nova | enabled | down |
""" """
cmd = '. openrc; cinder service-list' cmd = '. openrc; cinder service-list'
result = remote.execute(cmd) result = ssh_manager.execute(
ip=ip,
cmd=cmd
)
cinder_services = ''.join(result['stdout']) cinder_services = ''.join(result['stdout'])
logger.debug('>$ cinder service-list\n{}'.format(cinder_services)) logger.debug('>$ cinder service-list\n{}'.format(cinder_services))
if result['exit_code'] == 0: if result['exit_code'] == 0:
@ -87,9 +93,12 @@ def check_image(image, md5, path):
@logwrap @logwrap
def verify_service(remote, service_name, count=1, def verify_service(ip, service_name, count=1,
ignore_count_of_proccesses=False): ignore_count_of_proccesses=False):
ps_output = remote.execute('ps ax')['stdout'] ps_output = ssh_manager.execute_on_remote(
ip=ip,
cmd='ps ax'
)['stdout']
api = filter(lambda x: service_name in x, ps_output) api = filter(lambda x: service_name in x, ps_output)
logger.debug("{} \\n: {}".format(service_name, str(api))) logger.debug("{} \\n: {}".format(service_name, str(api)))
if not ignore_count_of_proccesses: if not ignore_count_of_proccesses:
@ -237,12 +246,10 @@ def check_archive_type(tar_path):
@logwrap @logwrap
def check_file_exists(node_ssh, path): def check_file_exists(ip, path):
result = node_ssh.execute('test -e "{0}"'.format(path)) assert_true(ssh_manager.exists_on_remote(ip, path),
assert_equal(result['exit_code'],
0,
'Can not find {0}'.format(path)) 'Can not find {0}'.format(path))
logger.info('File {0} exists on {1}'.format(path, node_ssh.host)) logger.info('File {0} exists on {1}'.format(path, ip))
@logwrap @logwrap
@ -1028,46 +1035,52 @@ def is_ntpd_active(remote, ntpd_ip):
return not remote.execute(cmd)['exit_code'] return not remote.execute(cmd)['exit_code']
def check_repo_managment(remote): def check_repo_managment(ip):
"""Check repo management """Check repo management
run 'yum -y clean all && yum check-update' or run 'yum -y clean all && yum check-update' or
'apt-get clean all && apt-get update' exit code should be 0 'apt-get clean all && apt-get update' exit code should be 0
:type remote: SSHClient :type ip: node ip
:rtype Dict :rtype Dict
""" """
if OPENSTACK_RELEASE == OPENSTACK_RELEASE_UBUNTU: if OPENSTACK_RELEASE == OPENSTACK_RELEASE_UBUNTU:
cmd = "apt-get clean all && apt-get update > /dev/null" cmd = "apt-get clean all && apt-get update > /dev/null"
else: else:
cmd = "yum -y clean all && yum check-update > /dev/null" cmd = "yum -y clean all && yum check-update > /dev/null"
remote.check_call(cmd) ssh_manager.execute_on_remote(
ip=ip,
cmd=cmd
)
def check_public_ping(remote): def check_public_ping(ip):
""" Check if ping public vip """ Check if ping public vip
:type remote: SSHClient object :type ip: node ip
""" """
cmd = ('ruby /etc/puppet/modules/osnailyfacter/' cmd = ('ruby /etc/puppet/modules/osnailyfacter/'
'modular/virtual_ips/public_vip_ping_post.rb') 'modular/virtual_ips/public_vip_ping_post.rb')
res = remote.execute(cmd) ssh_manager.execute_on_remote(
assert_equal(0, res['exit_code'], ip=ip,
'Public ping check failed:' cmd=cmd,
' {0}'.format(res)) err_msg='Public ping check failed'
)
def check_cobbler_node_exists(remote, node_id): def check_cobbler_node_exists(ip, node_id):
"""Check node with following node_id """Check node with following node_id
is present in the cobbler node list is present in the cobbler node list
:param remote: SSHClient :param ip: node ip
:param node_id: fuel node id :param node_id: fuel node id
:return: bool: True if exit code of command (node) == 0 :return: bool: True if exit code of command (node) == 0
""" """
logger.debug("Check that cluster contains node with ID:{0} ". logger.debug("Check that cluster contains node with ID:{0} ".
format(node_id)) format(node_id))
node = remote.execute( node = ssh_manager.execute(
'bash -c "cobbler system list" | grep ' ip=ip,
'-w "node-{0}"'.format(node_id)) cmd='bash -c "cobbler system list" | grep '
'-w "node-{0}"'.format(node_id)
)
return int(node['exit_code']) == 0 return int(node['exit_code']) == 0
@ -1079,7 +1092,7 @@ def check_cluster_presence(cluster_id, postgres_actions):
return str(cluster_id) in query_result return str(cluster_id) in query_result
def check_haproxy_backend(remote, def check_haproxy_backend(ip,
services=None, nodes=None, services=None, nodes=None,
ignore_services=None, ignore_nodes=None): ignore_services=None, ignore_nodes=None):
"""Check DOWN state of HAProxy backends. Define names of service or nodes """Check DOWN state of HAProxy backends. Define names of service or nodes
@ -1087,7 +1100,7 @@ def check_haproxy_backend(remote,
service status on all nodes. Use ignore_nodes for ignore all services on service status on all nodes. Use ignore_nodes for ignore all services on
all nodes. Ignoring has a bigger priority. all nodes. Ignoring has a bigger priority.
:type remote: SSHClient :type ip: node ip
:type services: List :type services: List
:type nodes: List :type nodes: List
:type ignore_services: List :type ignore_services: List
@ -1102,39 +1115,35 @@ def check_haproxy_backend(remote,
grep.extend( grep.extend(
['|egrep -v "{}"'.format('|'.join(n)) for n in negative_filter if n]) ['|egrep -v "{}"'.format('|'.join(n)) for n in negative_filter if n])
return remote.execute("{}{}".format(cmd, ''.join(grep))) result = ssh_manager.execute(
ip=ip,
cmd="{}{}".format(cmd, ''.join(grep))
)
return result
def check_log_lines_order(remote, log_file_path, line_matcher): def check_log_lines_order(ip, log_file_path, line_matcher):
"""Read log file and check that lines order are same as strings in list """Read log file and check that lines order are same as strings in list
:param remote: SSHClient :param ip: ip of node in str format
:param log_file_path: path to log file :param log_file_path: path to log file
:param line_matcher: list of strings to search :param line_matcher: list of strings to search
""" """
check_file_exists(remote, path=log_file_path) check_file_exists(ip, path=log_file_path)
previous_line_pos = 1 previous_line_pos = 1
previous_line = None previous_line = None
for current_line in line_matcher: for current_line in line_matcher:
cmd = 'tail -n +{0} {1} | grep -n "{2}"'\ cmd = 'tail -n +{0} {1} | grep -n "{2}"'\
.format(previous_line_pos, log_file_path, current_line) .format(previous_line_pos, log_file_path, current_line)
result = remote.execute(cmd)
# line not found case result = ssh_manager.execute_on_remote(
assert_equal(0, ip=ip,
result['exit_code'], cmd=cmd,
"Line '{0}' not found after line '{1}' in the file '{2}'." err_msg="Line '{0}' not found after line '{1}' in the file "
" Command '{3}' executed with exit_code='{4}'\n" "'{2}'.".format(current_line, previous_line, log_file_path)
"stdout:\n* {5} *\n"
"stderr:\n'* {6} *\n" )
.format(current_line,
previous_line,
log_file_path,
cmd,
result['exit_code'],
'\n'.join(result['stdout']),
'\n'.join(result['stderr'])))
# few lines found case # few lines found case
assert_equal(1, assert_equal(1,
@ -1157,13 +1166,16 @@ def check_log_lines_order(remote, log_file_path, line_matcher):
previous_line = current_line previous_line = current_line
def check_hiera_hosts(self, nodes, cmd): def check_hiera_hosts(nodes, cmd):
hiera_hosts = [] hiera_hosts = []
for node in nodes: for node in nodes:
with self.env.d_env.get_ssh_to_remote(node['ip']) as remote: result = ssh_manager.execute_on_remote(
hosts = ''.join(run_on_remote(remote, cmd)).strip().split(',') ip=node['ip'],
logger.debug("hosts on {0} are {1}".format(node['hostname'], cmd=cmd
hosts)) )['stdout']
hosts = ''.join(result).strip().split(',')
logger.debug("hosts on {0} are {1}".format(node['hostname'], hosts))
if not hiera_hosts: if not hiera_hosts:
hiera_hosts = hosts hiera_hosts = hosts
continue continue
@ -1173,38 +1185,49 @@ def check_hiera_hosts(self, nodes, cmd):
' others'.format(node['hostname'])) ' others'.format(node['hostname']))
def check_client_smoke(remote): def check_client_smoke(ip):
fuel_output = remote.execute( fuel_output = ssh_manager.execute(
'fuel env list')['stdout'][2].split('|')[2].strip() ip=ip,
fuel_2_output = remote.execute( cmd='fuel env list'
'fuel2 env list')['stdout'][3].split('|')[3].strip() )['stdout'][2].split('|')[2].strip()
fuel_2_output = ssh_manager.execute(
ip=ip,
cmd='fuel2 env list'
)['stdout'][3].split('|')[3].strip()
assert_equal(fuel_output, fuel_2_output, assert_equal(fuel_output, fuel_2_output,
"The fuel: {0} and fuel2: {1} outputs are not equal") "The fuel: {0} and fuel2: {1} outputs are not equal")
def check_offload(node, interface, offload_type): def check_offload(ip, interface, offload_type):
command = "ethtool --show-offload %s | awk '/%s/ {print $2}'" command = "ethtool --show-offload {0} |" \
offload_status = node.execute(command % (interface, offload_type)) " awk '/{1}/ {{print $2}}'".format(interface, offload_type)
assert_equal(offload_status['exit_code'], 0,
"Failed to get Offload {0} " result = ssh_manager.execute_on_remote(
"on node {1}".format(offload_type, node)) ip=ip,
return ''.join(node.execute( cmd=command,
command % (interface, offload_type))['stdout']).rstrip() err_msg="Failed to get Offload {0} "
"on node {1}".format(offload_type, ip)
)
return ''.join(result['stdout']).rstrip()
def check_get_network_data_over_cli(remote, cluster_id, path): def check_get_network_data_over_cli(ip, cluster_id, path):
logger.info("Download network data over cli") logger.info("Download network data over cli")
cmd = 'fuel --debug --env {0} network --dir {1} --json -d'.format( cmd = 'fuel --debug --env {0} network --dir {1} --json -d'.format(
cluster_id, path) cluster_id, path)
result = remote.execute(cmd) ssh_manager.execute_on_remote(
assert_equal(result['exit_code'], 0, ip=ip,
'Failed to download network data {0}'.format(result)) cmd=cmd,
err_msg='Failed to upload network data'
)
def check_update_network_data_over_cli(remote, cluster_id, path): def check_update_network_data_over_cli(ip, cluster_id, path):
logger.info("Upload network data over cli") logger.info("Upload network data over cli")
cmd = 'fuel --debug --env {0} network --dir {1} --json -u'.format( cmd = 'fuel --debug --env {0} network --dir {1} --json -u'.format(
cluster_id, path) cluster_id, path)
result = remote.execute(cmd) ssh_manager.execute_on_remote(
assert_equal(result['exit_code'], 0, ip=ip,
'Failed to upload network data {0}'.format(result)) cmd=cmd,
err_msg='Failed to upload network data'
)

View File

@ -485,8 +485,7 @@ def check_repos_management(func):
for n in nailgun_nodes: for n in nailgun_nodes:
logger.debug("Check repository management on {0}" logger.debug("Check repository management on {0}"
.format(n['ip'])) .format(n['ip']))
with env.d_env.get_ssh_to_remote(n['ip']) as node_ssh: check_repo_managment(n['ip'])
check_repo_managment(node_ssh)
except Exception: except Exception:
logger.error("An error happened during check repositories " logger.error("An error happened during check repositories "
"management on nodes. Please see the debug log.") "management on nodes. Please see the debug log.")

View File

@ -222,9 +222,9 @@ class SSHManager(object):
remote = self._get_remote(ip=ip, port=port) remote = self._get_remote(ip=ip, port=port)
return remote.download(destination, target) return remote.download(destination, target)
def exist_on_remote(self, ip, path, port=22): def exists_on_remote(self, ip, path, port=22):
remote = self._get_remote(ip=ip, port=port) remote = self._get_remote(ip=ip, port=port)
return remote.exist(path) return remote.exists(path)
def isdir_on_remote(self, ip, path, port=22): def isdir_on_remote(self, ip, path, port=22):
remote = self._get_remote(ip=ip, port=port) remote = self._get_remote(ip=ip, port=port)

View File

@ -1925,9 +1925,9 @@ class FuelWebClient(object):
def wait_cinder_is_up(self, node_names): def wait_cinder_is_up(self, node_names):
logger.info("Waiting for all Cinder services up.") logger.info("Waiting for all Cinder services up.")
for node_name in node_names: for node_name in node_names:
with self.get_ssh_for_node(node_name) as remote: node = self.get_nailgun_node_by_name(node_name)
try: try:
wait(lambda: checkers.check_cinder_status(remote), wait(lambda: checkers.check_cinder_status(node['ip']),
timeout=300) timeout=300)
logger.info("All Cinder services up.") logger.info("All Cinder services up.")
except TimeoutError: except TimeoutError:

View File

@ -183,14 +183,12 @@ class ExamplePlugin(TestBasic):
# plugin+100.0.all # plugin+100.0.all
# plugin+100.all # plugin+100.all
# fuel_plugin_example_v3_sh] # fuel_plugin_example_v3_sh]
with self.env.fuel_web.get_ssh_for_node('slave-01') as remote: slave1 = self.fuel_web.get_nailgun_node_by_name('slave-01')
checkers.check_file_exists(remote, checkers.check_file_exists(slave1['ip'], '/tmp/plugin+100.0.all')
'/tmp/plugin+100.0.all') checkers.check_file_exists(slave1['ip'], '/tmp/plugin+100.all')
checkers.check_file_exists(remote, checkers.check_file_exists(slave1['ip'],
'/tmp/plugin+100.all')
checkers.check_file_exists(remote,
'/tmp/fuel_plugin_example_v3_sh') '/tmp/fuel_plugin_example_v3_sh')
checkers.check_file_exists(remote, checkers.check_file_exists(slave1['ip'],
'/tmp/fuel_plugin_example_v3_puppet') '/tmp/fuel_plugin_example_v3_puppet')
# check if fuel_plugin_example_v3_puppet called # check if fuel_plugin_example_v3_puppet called
@ -198,7 +196,8 @@ class ExamplePlugin(TestBasic):
netconfig_str = 'MODULAR: netconfig.pp' netconfig_str = 'MODULAR: netconfig.pp'
plugin_str = 'PLUGIN: fuel_plugin_example_v3 - deploy.pp' plugin_str = 'PLUGIN: fuel_plugin_example_v3 - deploy.pp'
connect_str = 'MODULAR: connectivity_tests.pp' connect_str = 'MODULAR: connectivity_tests.pp'
checkers.check_log_lines_order(remote, checkers.check_log_lines_order(
ip=slave1['ip'],
log_file_path='/var/log/puppet.log', log_file_path='/var/log/puppet.log',
line_matcher=[netconfig_str, line_matcher=[netconfig_str,
plugin_str, plugin_str,
@ -207,48 +206,42 @@ class ExamplePlugin(TestBasic):
# check if slave-02 contain # check if slave-02 contain
# plugin+100.0.all # plugin+100.0.all
# plugin+100.al # plugin+100.al
with self.env.fuel_web.get_ssh_for_node('slave-02') as remote: slave2 = self.fuel_web.get_nailgun_node_by_name('slave-02')
checkers.check_file_exists(remote, checkers.check_file_exists(slave2['ip'], '/tmp/plugin+100.0.all')
'/tmp/plugin+100.0.all') checkers.check_file_exists(slave2['ip'], '/tmp/plugin+100.all')
checkers.check_file_exists(remote,
'/tmp/plugin+100.all')
# check if slave-03 contain # check if slave-03 contain
# plugin+100.0.all # plugin+100.0.all
# plugin+100.all # plugin+100.all
# fuel_plugin_example_v3_sh # fuel_plugin_example_v3_sh
# fuel_plugin_example_v3_puppet # fuel_plugin_example_v3_puppet
with self.env.fuel_web.get_ssh_for_node('slave-03') as remote: slave3 = self.fuel_web.get_nailgun_node_by_name('slave-03')
checkers.check_file_exists(remote, checkers.check_file_exists(slave3['ip'], '/tmp/plugin+100.0.all')
'/tmp/plugin+100.0.all') checkers.check_file_exists(slave3['ip'], '/tmp/plugin+100.all')
checkers.check_file_exists(remote, checkers.check_file_exists(slave3['ip'],
'/tmp/plugin+100.all')
checkers.check_file_exists(remote,
'/tmp/fuel_plugin_example_v3_sh') '/tmp/fuel_plugin_example_v3_sh')
checkers.check_file_exists(remote, checkers.check_file_exists(slave3['ip'],
'/tmp/fuel_plugin_example_v3_puppet') '/tmp/fuel_plugin_example_v3_puppet')
# check if service run on slave-03 # check if service run on slave-03
logger.debug("Checking service on node {0}".format('slave-03')) logger.debug("Checking service on node {0}".format('slave-03'))
cmd = 'pgrep -f fuel-simple-service' cmd = 'pgrep -f fuel-simple-service'
res_pgrep = remote.execute(cmd) res_pgrep = self.ssh_manager.execute_on_remote(
assert_equal(0, res_pgrep['exit_code'], ip=slave3['ip'],
'Command {0} failed with error {1}' cmd=cmd
.format(cmd, res_pgrep['stderr'])) )
process_count = len(res_pgrep['stdout']) process_count = len(res_pgrep['stdout'])
assert_equal(1, process_count, assert_equal(1, process_count,
"There should be 1 process 'fuel-simple-service'," "There should be 1 process 'fuel-simple-service',"
" but {0} found {1} processes".format(cmd, " but {0} found {1} processes".format(cmd, process_count))
process_count))
# curl to service # curl to service
cmd_curl = 'curl localhost:8234' cmd_curl = 'curl localhost:8234'
res_curl = remote.execute(cmd_curl) self.ssh_manager.execute_on_remote(
assert_equal(0, res_pgrep['exit_code'], ip=slave3['ip'],
'Command {0} failed with error {1}' cmd=cmd_curl
.format(cmd_curl, res_curl['stderr'])) )
self.fuel_web.run_ostf(cluster_id=cluster_id) self.fuel_web.run_ostf(cluster_id=cluster_id)
self.env.make_snapshot("deploy_ha_one_controller_neutron_example_v3") self.env.make_snapshot("deploy_ha_one_controller_neutron_example_v3")

View File

@ -131,16 +131,14 @@ class TestOffloading(BondingTest):
for node in nodes: for node in nodes:
for eth in bond0: for eth in bond0:
for name in offloadings_1: for name in offloadings_1:
with self.env.d_env.get_ssh_to_remote(node['ip']) as host: result = check_offload(node['ip'], eth, name)
result = check_offload(host, eth, name)
assert_equal( assert_equal(
result, 'off', result, 'off',
"Offload type '{0}': '{1}' - node-{2}, {3}".format( "Offload type '{0}': '{1}' - node-{2}, {3}".format(
name, result, node['id'], eth)) name, result, node['id'], eth))
for eth in bond1: for eth in bond1:
for name in offloadings_2: for name in offloadings_2:
with self.env.d_env.get_ssh_to_remote(node['ip']) as host: result = check_offload(node['ip'], eth, name)
result = check_offload(host, eth, name)
assert_equal( assert_equal(
result, 'on', result, 'on',
"Offload type '{0}': '{1}' - node-{2}, {3}".format( "Offload type '{0}': '{1}' - node-{2}, {3}".format(
@ -241,16 +239,14 @@ class TestOffloading(BondingTest):
for node in nodes: for node in nodes:
for eth in bond0: for eth in bond0:
for name in offloadings_1: for name in offloadings_1:
with self.env.d_env.get_ssh_to_remote(node['ip']) as host: result = check_offload(node['ip'], eth, name)
result = check_offload(host, eth, name)
assert_equal( assert_equal(
result, 'off', result, 'off',
"Offload type '{0}': '{1}' - node-{2}, {3}".format( "Offload type '{0}': '{1}' - node-{2}, {3}".format(
name, result, node['id'], eth)) name, result, node['id'], eth))
for eth in bond1: for eth in bond1:
for name in offloadings_2: for name in offloadings_2:
with self.env.d_env.get_ssh_to_remote(node['ip']) as host: result = check_offload(node['ip'], eth, name)
result = check_offload(host, eth, name)
assert_equal( assert_equal(
result, 'on', result, 'on',
"Offload type '{0}': '{1}' - node-{2}, {3}".format( "Offload type '{0}': '{1}' - node-{2}, {3}".format(

View File

@ -505,11 +505,9 @@ class CephRadosGW(TestBasic):
cluster_id, ['controller']) cluster_id, ['controller'])
for node in controller_nodes: for node in controller_nodes:
remote = self.env.d_env.get_ssh_to_remote(node['ip'])
logger.info("Check all HAProxy backends on {}".format( logger.info("Check all HAProxy backends on {}".format(
node['meta']['system']['fqdn'])) node['meta']['system']['fqdn']))
haproxy_status = checkers.check_haproxy_backend(remote) haproxy_status = checkers.check_haproxy_backend(node['ip'])
remote.clear()
assert_equal(haproxy_status['exit_code'], 1, assert_equal(haproxy_status['exit_code'], 1,
"HAProxy backends are DOWN. {0}".format( "HAProxy backends are DOWN. {0}".format(
haproxy_status)) haproxy_status))

View File

@ -225,11 +225,11 @@ class CommandLineTest(test_cli_base.CommandLine):
""" """
self.env.revert_snapshot("cli_selected_nodes_deploy") self.env.revert_snapshot("cli_selected_nodes_deploy")
with self.env.d_env.get_admin_remote() as remote:
node_id = self.fuel_web.get_nailgun_node_by_devops_node( node_id = self.fuel_web.get_nailgun_node_by_devops_node(
self.env.d_env.nodes().slaves[2])['id'] self.env.d_env.nodes().slaves[2])['id']
assert_true(check_cobbler_node_exists(remote, node_id), assert_true(check_cobbler_node_exists(self.ssh_manager.admin_ip,
node_id),
"node-{0} is not found".format(node_id)) "node-{0} is not found".format(node_id))
self.env.d_env.nodes().slaves[2].destroy() self.env.d_env.nodes().slaves[2].destroy()
try: try:
@ -258,8 +258,8 @@ class CommandLineTest(test_cli_base.CommandLine):
"After deletion node-{0} is found in fuel list". "After deletion node-{0} is found in fuel list".
format(node_id)) format(node_id))
with self.env.d_env.get_admin_remote() as remote: is_cobbler_node_exists = check_cobbler_node_exists(
is_cobbler_node_exists = check_cobbler_node_exists(remote, node_id) self.ssh_manager.admin_ip, node_id)
assert_false(is_cobbler_node_exists, assert_false(is_cobbler_node_exists,
"After deletion node-{0} is found in cobbler list". "After deletion node-{0} is found in cobbler list".

View File

@ -169,8 +169,8 @@ class TestMultipleClusterNets(TestBasic):
self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[3:5]) self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[3:5])
self.show_step(6) self.show_step(6)
with self.env.d_env.get_admin_remote() as remote: check_get_network_data_over_cli(self.ssh_manager.admin_ip,
check_get_network_data_over_cli(remote, cluster_id, '/var/log/') cluster_id, '/var/log/')
management_ranges_default = [] management_ranges_default = []
management_ranges_custom = [] management_ranges_custom = []
@ -219,12 +219,14 @@ class TestMultipleClusterNets(TestBasic):
utils.put_json_on_remote_from_dict( utils.put_json_on_remote_from_dict(
remote, updated_network, cluster_id) remote, updated_network, cluster_id)
check_update_network_data_over_cli(remote, cluster_id, check_update_network_data_over_cli(self.ssh_manager.admin_ip,
cluster_id,
'/var/log/') '/var/log/')
self.show_step(9) self.show_step(9)
with self.env.d_env.get_admin_remote() as remote: with self.env.d_env.get_admin_remote() as remote:
check_get_network_data_over_cli(remote, cluster_id, '/var/log/') check_get_network_data_over_cli(self.ssh_manager.admin_ip,
cluster_id, '/var/log/')
latest_net = json.loads(remote.open( latest_net = json.loads(remote.open(
'/var/log/network_1.json').read()) '/var/log/network_1.json').read())
updated_storage_default = self.get_ranges(latest_net, 'storage', updated_storage_default = self.get_ranges(latest_net, 'storage',

View File

@ -90,8 +90,7 @@ class NeutronTun(TestBasic):
self.fuel_web.check_fixed_network_cidr( self.fuel_web.check_fixed_network_cidr(
cluster_id, os_conn) cluster_id, os_conn)
with self.env.d_env.get_admin_remote() as remote: checkers.check_client_smoke(self.ssh_manager.admin_ip)
checkers.check_client_smoke(remote)
self.fuel_web.verify_network(cluster_id) self.fuel_web.verify_network(cluster_id)

View File

@ -133,14 +133,13 @@ class TestOffloading(TestBasic):
self.show_step(8) self.show_step(8)
for node in nodes: for node in nodes:
with self.env.d_env.get_ssh_to_remote(node['ip']) as remote:
for name in offloadings_1: for name in offloadings_1:
result = check_offload(remote, iface1, name) result = check_offload(node['ip'], iface1, name)
assert_equal(result, "off", assert_equal(result, "off",
"Offload type {0} is {1} on {2}".format( "Offload type {0} is {1} on {2}".format(
name, result, node['name'])) name, result, node['name']))
for name in offloadings_2: for name in offloadings_2:
result = check_offload(remote, iface2, name) result = check_offload(node['ip'], iface2, name)
assert_equal(result, "on", assert_equal(result, "on",
"Offload type {0} is {1} on {2}".format( "Offload type {0} is {1} on {2}".format(
name, result, node['name'])) name, result, node['name']))
@ -234,14 +233,13 @@ class TestOffloading(TestBasic):
self.show_step(8) self.show_step(8)
for node in nodes: for node in nodes:
with self.env.d_env.get_ssh_to_remote(node['ip']) as remote:
for name in offloadings_1: for name in offloadings_1:
result = check_offload(remote, iface1, name) result = check_offload(node['ip'], iface1, name)
assert_equal(result, "off", assert_equal(result, "off",
"Offload type {0} is {1} on {2}".format( "Offload type {0} is {1} on {2}".format(
name, result, node['name'])) name, result, node['name']))
for name in offloadings_2: for name in offloadings_2:
result = check_offload(remote, iface2, name) result = check_offload(node['ip'], iface2, name)
assert_equal(result, "on", assert_equal(result, "on",
"Offload type {0} is {1} on {2}".format( "Offload type {0} is {1} on {2}".format(
name, result, node['name'])) name, result, node['name']))

View File

@ -84,17 +84,10 @@ class SaharaHAOneController(TestBasic):
logger.debug('Verify Sahara service on controller') logger.debug('Verify Sahara service on controller')
_ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip'] _ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
with self.env.d_env.get_ssh_to_remote(_ip) as remote:
# count = 1 + api_workers (from sahara.conf) # count = 1 + api_workers (from sahara.conf)
checkers.verify_service( checkers.verify_service(_ip, service_name='sahara-api', count=2)
remote,
service_name='sahara-api',
count=2)
# count = 2 * 1 (hardcoded by deployment team) # count = 2 * 1 (hardcoded by deployment team)
checkers.verify_service( checkers.verify_service(_ip, service_name='sahara-engine', count=2)
remote,
service_name='sahara-engine',
count=2)
logger.debug('Check MD5 sum of Vanilla2 image') logger.debug('Check MD5 sum of Vanilla2 image')
check_image = checkers.check_image( check_image = checkers.check_image(
@ -198,17 +191,11 @@ class SaharaHA(TestBasic):
logger.debug('Verify Sahara service on all controllers') logger.debug('Verify Sahara service on all controllers')
for slave in ["slave-01", "slave-02", "slave-03"]: for slave in ["slave-01", "slave-02", "slave-03"]:
_ip = self.fuel_web.get_nailgun_node_by_name(slave)['ip'] _ip = self.fuel_web.get_nailgun_node_by_name(slave)['ip']
with self.env.d_env.get_ssh_to_remote(_ip) as remote:
# count = 1 + api_workers (from sahara.conf) # count = 1 + api_workers (from sahara.conf)
checkers.verify_service( checkers.verify_service(_ip, service_name='sahara-api', count=2)
remote,
service_name='sahara-api',
count=2)
# count = 2 * 1 (hardcoded by deployment team) # count = 2 * 1 (hardcoded by deployment team)
checkers.verify_service( checkers.verify_service(_ip,
remote, service_name='sahara-engine', count=2)
service_name='sahara-engine',
count=2)
logger.debug('Check MD5 sum of Vanilla2 image') logger.debug('Check MD5 sum of Vanilla2 image')
check_image = checkers.check_image( check_image = checkers.check_image(
@ -306,10 +293,7 @@ class MuranoHAOneController(TestBasic):
data['user'], data['password'], data['tenant']) data['user'], data['password'], data['tenant'])
self.fuel_web.assert_cluster_ready(os_conn, smiles_count=5) self.fuel_web.assert_cluster_ready(os_conn, smiles_count=5)
_ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip'] _ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
with self.env.d_env.get_ssh_to_remote(_ip) as remote: checkers.verify_service(_ip, service_name='murano-api')
checkers.verify_service(
remote,
service_name='murano-api')
logger.debug('Run sanity and functional Murano OSTF tests') logger.debug('Run sanity and functional Murano OSTF tests')
self.fuel_web.run_single_ostf_test( self.fuel_web.run_single_ostf_test(
@ -396,10 +380,7 @@ class MuranoHA(TestBasic):
self.fuel_web.assert_cluster_ready(os_conn, smiles_count=13) self.fuel_web.assert_cluster_ready(os_conn, smiles_count=13)
for slave in ["slave-01", "slave-02", "slave-03"]: for slave in ["slave-01", "slave-02", "slave-03"]:
_ip = self.fuel_web.get_nailgun_node_by_name(slave)['ip'] _ip = self.fuel_web.get_nailgun_node_by_name(slave)['ip']
with self.env.d_env.get_ssh_to_remote(_ip) as remote: checkers.verify_service(_ip, service_name='murano-api')
checkers.verify_service(
remote,
service_name='murano-api')
logger.debug('Run sanity and functional Murano OSTF tests') logger.debug('Run sanity and functional Murano OSTF tests')
self.fuel_web.run_single_ostf_test( self.fuel_web.run_single_ostf_test(
@ -561,8 +542,7 @@ class CeilometerHAOneControllerMongo(OSTFCeilometerHelper):
self.fuel_web.deploy_cluster_wait(cluster_id) self.fuel_web.deploy_cluster_wait(cluster_id)
_ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip'] _ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
with self.env.d_env.get_ssh_to_remote(_ip) as remote: checkers.verify_service(_ip,
checkers.verify_service(remote,
service_name='ceilometer-api', service_name='ceilometer-api',
ignore_count_of_proccesses=True) ignore_count_of_proccesses=True)
@ -618,8 +598,7 @@ class CeilometerHAOneControllerMongo(OSTFCeilometerHelper):
self.fuel_web.deploy_cluster_wait(cluster_id) self.fuel_web.deploy_cluster_wait(cluster_id)
_ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip'] _ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
with self.env.d_env.get_ssh_to_remote(_ip) as remote: checkers.verify_service(_ip,
checkers.verify_service(remote,
service_name='ceilometer-api', service_name='ceilometer-api',
ignore_count_of_proccesses=True) ignore_count_of_proccesses=True)
@ -676,8 +655,7 @@ class CeilometerHAMongo(OSTFCeilometerHelper):
self.fuel_web.deploy_cluster_wait(cluster_id) self.fuel_web.deploy_cluster_wait(cluster_id)
_ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip'] _ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
with self.env.d_env.get_ssh_to_remote(_ip) as remote: checkers.verify_service(_ip,
checkers.verify_service(remote,
service_name='ceilometer-api', service_name='ceilometer-api',
ignore_count_of_proccesses=True) ignore_count_of_proccesses=True)
@ -728,8 +706,7 @@ class CeilometerHAMongo(OSTFCeilometerHelper):
self.fuel_web.deploy_cluster_wait(cluster_id) self.fuel_web.deploy_cluster_wait(cluster_id)
_ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip'] _ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
with self.env.d_env.get_ssh_to_remote(_ip) as remote: checkers.verify_service(_ip,
checkers.verify_service(remote,
service_name='ceilometer-api', service_name='ceilometer-api',
ignore_count_of_proccesses=True) ignore_count_of_proccesses=True)
@ -823,8 +800,7 @@ class CeilometerHAMongo(OSTFCeilometerHelper):
self.fuel_web.deploy_cluster_wait(cluster_id) self.fuel_web.deploy_cluster_wait(cluster_id)
_ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip'] _ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
with self.env.d_env.get_ssh_to_remote(_ip) as remote: checkers.verify_service(_ip,
checkers.verify_service(remote,
service_name='ceilometer-api', service_name='ceilometer-api',
ignore_count_of_proccesses=True) ignore_count_of_proccesses=True)
@ -887,14 +863,10 @@ class HeatHAOneController(TestBasic):
self.fuel_web.assert_cluster_ready(os_conn, smiles_count=5) self.fuel_web.assert_cluster_ready(os_conn, smiles_count=5)
_ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip'] _ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
with self.env.d_env.get_ssh_to_remote(_ip) as remote: checkers.verify_service(_ip, service_name='heat-api', count=3)
checkers.verify_service(
remote,
service_name='heat-api', count=3)
_ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip'] _ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
with self.env.d_env.get_ssh_to_remote(_ip) as remote: checkers.verify_service(_ip,
checkers.verify_service(remote,
service_name='ceilometer-api', service_name='ceilometer-api',
ignore_count_of_proccesses=True) ignore_count_of_proccesses=True)
@ -982,11 +954,8 @@ class HeatHA(TestBasic):
for slave in ["slave-01", "slave-02", "slave-03"]: for slave in ["slave-01", "slave-02", "slave-03"]:
_ip = self.fuel_web.get_nailgun_node_by_name(slave)['ip'] _ip = self.fuel_web.get_nailgun_node_by_name(slave)['ip']
with self.env.d_env.get_ssh_to_remote(_ip) as remote: checkers.verify_service(_ip, service_name='heat-api', count=3)
checkers.verify_service( checkers.verify_service(_ip,
remote,
service_name='heat-api', count=3)
checkers.verify_service(remote,
service_name='ceilometer-api', service_name='ceilometer-api',
ignore_count_of_proccesses=True) ignore_count_of_proccesses=True)

View File

@ -221,6 +221,7 @@ class UbuntuBootstrapBuild(base_test_case.TestBasic):
self.env.bootstrap_nodes(nodes) self.env.bootstrap_nodes(nodes)
for node in nodes: for node in nodes:
n_node = self.fuel_web.get_nailgun_node_by_devops_node(node)
with self.fuel_web.get_ssh_for_node(node.name) as slave_remote: with self.fuel_web.get_ssh_for_node(node.name) as slave_remote:
checkers.verify_bootstrap_on_node(slave_remote, checkers.verify_bootstrap_on_node(slave_remote,
os_type="ubuntu", os_type="ubuntu",
@ -234,7 +235,7 @@ class UbuntuBootstrapBuild(base_test_case.TestBasic):
"{1}".format(package, node.name)) "{1}".format(package, node.name))
for injected_dir in ["/var/lib/testdir", "/var/www/testdir2"]: for injected_dir in ["/var/lib/testdir", "/var/www/testdir2"]:
checkers.check_file_exists(slave_remote, injected_dir) checkers.check_file_exists(n_node['ip'], injected_dir)
file_content = \ file_content = \
slave_remote.execute("cat /test_bootstrap_script") slave_remote.execute("cat /test_bootstrap_script")

View File

@ -242,14 +242,14 @@ class SeparateDbFailover(TestBasic):
test_sets=['sanity', 'smoke', 'ha']) test_sets=['sanity', 'smoke', 'ha'])
checkers.check_hiera_hosts( checkers.check_hiera_hosts(
self, self.fuel_web.client.list_cluster_nodes(cluster_id), self.fuel_web.client.list_cluster_nodes(cluster_id),
cmd='hiera memcache_roles') cmd='hiera memcache_roles')
database_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles( database_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
cluster_id, ['standalone-database']) cluster_id, ['standalone-database'])
logger.debug("database nodes are {0}".format(database_nodes)) logger.debug("database nodes are {0}".format(database_nodes))
checkers.check_hiera_hosts( checkers.check_hiera_hosts(
self, database_nodes, database_nodes,
cmd='hiera corosync_roles') cmd='hiera corosync_roles')
nailgun_node = self.fuel_web.update_nodes(cluster_id, node, nailgun_node = self.fuel_web.update_nodes(cluster_id, node,
@ -263,12 +263,12 @@ class SeparateDbFailover(TestBasic):
test_sets=['sanity', 'smoke', 'ha']) test_sets=['sanity', 'smoke', 'ha'])
checkers.check_hiera_hosts( checkers.check_hiera_hosts(
self, self.fuel_web.client.list_cluster_nodes(cluster_id), self.fuel_web.client.list_cluster_nodes(cluster_id),
cmd='hiera memcache_roles') cmd='hiera memcache_roles')
database_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles( database_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
cluster_id, ['standalone-database']) cluster_id, ['standalone-database'])
logger.debug("database nodes are {0}".format(database_nodes)) logger.debug("database nodes are {0}".format(database_nodes))
checkers.check_hiera_hosts( checkers.check_hiera_hosts(
self, database_nodes, database_nodes,
cmd='hiera corosync_roles') cmd='hiera corosync_roles')

View File

@ -255,7 +255,7 @@ class SeparateKeystoneFailover(TestBasic):
cluster_id, ['standalone-keystone']) cluster_id, ['standalone-keystone'])
logger.debug("keystone nodes are {0}".format(keystone_nodes)) logger.debug("keystone nodes are {0}".format(keystone_nodes))
checkers.check_hiera_hosts( checkers.check_hiera_hosts(
self, keystone_nodes, keystone_nodes,
cmd='hiera memcache_roles') cmd='hiera memcache_roles')
other_nodes = [] other_nodes = []
@ -265,11 +265,11 @@ class SeparateKeystoneFailover(TestBasic):
other_nodes.append(nodes_list) other_nodes.append(nodes_list)
logger.debug("other nodes are {0}".format(other_nodes)) logger.debug("other nodes are {0}".format(other_nodes))
checkers.check_hiera_hosts( checkers.check_hiera_hosts(
self, other_nodes, other_nodes,
cmd='hiera memcache_roles') cmd='hiera memcache_roles')
checkers.check_hiera_hosts( checkers.check_hiera_hosts(
self, keystone_nodes, keystone_nodes,
cmd='hiera corosync_roles') cmd='hiera corosync_roles')
nailgun_node = self.fuel_web.update_nodes(cluster_id, node, nailgun_node = self.fuel_web.update_nodes(cluster_id, node,
@ -286,7 +286,7 @@ class SeparateKeystoneFailover(TestBasic):
cluster_id, ['standalone-keystone']) cluster_id, ['standalone-keystone'])
logger.debug("keystone nodes are {0}".format(keystone_nodes)) logger.debug("keystone nodes are {0}".format(keystone_nodes))
checkers.check_hiera_hosts( checkers.check_hiera_hosts(
self, keystone_nodes, keystone_nodes,
cmd='hiera memcache_roles') cmd='hiera memcache_roles')
other_nodes = [] other_nodes = []
@ -296,9 +296,9 @@ class SeparateKeystoneFailover(TestBasic):
other_nodes.append(nodes_list) other_nodes.append(nodes_list)
logger.debug("other nodes are {0}".format(other_nodes)) logger.debug("other nodes are {0}".format(other_nodes))
checkers.check_hiera_hosts( checkers.check_hiera_hosts(
self, other_nodes, other_nodes,
cmd='hiera memcache_roles') cmd='hiera memcache_roles')
checkers.check_hiera_hosts( checkers.check_hiera_hosts(
self, keystone_nodes, keystone_nodes,
cmd='hiera corosync_roles') cmd='hiera corosync_roles')

View File

@ -241,18 +241,18 @@ class SeparateRabbitFailover(TestBasic):
self.fuel_web.run_ostf(cluster_id=cluster_id, self.fuel_web.run_ostf(cluster_id=cluster_id,
test_sets=['sanity', 'smoke', 'ha']) test_sets=['sanity', 'smoke', 'ha'])
checkers.check_hiera_hosts( checkers.check_hiera_hosts(
self, self.fuel_web.client.list_cluster_nodes(cluster_id), self.fuel_web.client.list_cluster_nodes(cluster_id),
cmd='hiera amqp_hosts') cmd='hiera amqp_hosts')
checkers.check_hiera_hosts( checkers.check_hiera_hosts(
self, self.fuel_web.client.list_cluster_nodes(cluster_id), self.fuel_web.client.list_cluster_nodes(cluster_id),
cmd='hiera memcache_roles') cmd='hiera memcache_roles')
rabbit_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles( rabbit_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
cluster_id, ['standalone-rabbitmq']) cluster_id, ['standalone-rabbitmq'])
logger.debug("rabbit nodes are {0}".format(rabbit_nodes)) logger.debug("rabbit nodes are {0}".format(rabbit_nodes))
checkers.check_hiera_hosts( checkers.check_hiera_hosts(
self, rabbit_nodes, rabbit_nodes,
cmd='hiera corosync_roles') cmd='hiera corosync_roles')
nailgun_node = self.fuel_web.update_nodes(cluster_id, node, nailgun_node = self.fuel_web.update_nodes(cluster_id, node,
@ -265,16 +265,16 @@ class SeparateRabbitFailover(TestBasic):
self.fuel_web.run_ostf(cluster_id=cluster_id, self.fuel_web.run_ostf(cluster_id=cluster_id,
test_sets=['sanity', 'smoke', 'ha']) test_sets=['sanity', 'smoke', 'ha'])
checkers.check_hiera_hosts( checkers.check_hiera_hosts(
self, self.fuel_web.client.list_cluster_nodes(cluster_id), self.fuel_web.client.list_cluster_nodes(cluster_id),
cmd='hiera amqp_hosts') cmd='hiera amqp_hosts')
checkers.check_hiera_hosts( checkers.check_hiera_hosts(
self, self.fuel_web.client.list_cluster_nodes(cluster_id), self.fuel_web.client.list_cluster_nodes(cluster_id),
cmd='hiera memcache_roles') cmd='hiera memcache_roles')
rabbit_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles( rabbit_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
cluster_id, ['standalone-rabbitmq']) cluster_id, ['standalone-rabbitmq'])
logger.debug("rabbit nodes are {0}".format(rabbit_nodes)) logger.debug("rabbit nodes are {0}".format(rabbit_nodes))
checkers.check_hiera_hosts( checkers.check_hiera_hosts(
self, rabbit_nodes, rabbit_nodes,
cmd='hiera corosync_roles') cmd='hiera corosync_roles')

View File

@ -115,8 +115,8 @@ class TestHaFailoverBase(TestBasic):
self.fuel_web.verify_network(cluster_id) self.fuel_web.verify_network(cluster_id)
for node in ['slave-0{0}'.format(slave) for slave in xrange(1, 4)]: for node in ['slave-0{0}'.format(slave) for slave in xrange(1, 4)]:
with self.fuel_web.get_ssh_for_node(node) as remote: ip = self.fuel_web.get_nailgun_node_by_name(node)['ip']
check_public_ping(remote) check_public_ping(ip)
self.env.make_snapshot(self.snapshot_name, is_make=True) self.env.make_snapshot(self.snapshot_name, is_make=True)

View File

@ -158,8 +158,8 @@ class TestNeutronFailoverBase(base_test_case.TestBasic):
self.fuel_web.deploy_cluster_wait(cluster_id) self.fuel_web.deploy_cluster_wait(cluster_id)
for node in ['slave-0{0}'.format(slave) for slave in xrange(1, 4)]: for node in ['slave-0{0}'.format(slave) for slave in xrange(1, 4)]:
with self.fuel_web.get_ssh_for_node(node) as remote: ip = self.fuel_web.get_nailgun_node_by_name(node)['ip']
checkers.check_public_ping(remote) checkers.check_public_ping(ip)
self.env.make_snapshot('deploy_ha_neutron_{}'.format( self.env.make_snapshot('deploy_ha_neutron_{}'.format(
self.segment_type), is_make=True) self.segment_type), is_make=True)

View File

@ -164,8 +164,7 @@ def patch_and_assemble_ubuntu_bootstrap(environment):
no_progress_bar, no_progress_bar,
no_append) no_append)
ssh.execute_on_remote(ip=ssh.admin_ip, cmd=image_rebuild) ssh.execute_on_remote(ip=ssh.admin_ip, cmd=image_rebuild)
with environment.d_env.get_admin_remote() as remote: checkers.check_file_exists(ssh.admin_ip, str(bootstrap_file))
checkers.check_file_exists(remote, '{0}'.format(bootstrap_file))
except Exception as e: except Exception as e:
logger.error("Could not upload package {e}".format(e=e)) logger.error("Could not upload package {e}".format(e=e))
raise raise
@ -182,10 +181,9 @@ def replace_centos_bootstrap(environment):
raise Exception("{} variable don't exist" raise Exception("{} variable don't exist"
.format(settings.UPDATE_FUEL)) .format(settings.UPDATE_FUEL))
rebuilded_bootstrap = '/var/initramfs.img.updated' rebuilded_bootstrap = '/var/initramfs.img.updated'
with environment.d_env.get_admin_remote() as remote:
checkers.check_file_exists( checkers.check_file_exists(
remote, ssh.admin_ip,
'{0}'.format(rebuilded_bootstrap)) str(rebuilded_bootstrap))
logger.info("Assigning new bootstrap from {}".format(rebuilded_bootstrap)) logger.info("Assigning new bootstrap from {}".format(rebuilded_bootstrap))
bootstrap = "/var/www/nailgun/bootstrap" bootstrap = "/var/www/nailgun/bootstrap"
cmd = ("mv {0}/initramfs.img /var/initramfs.img;" cmd = ("mv {0}/initramfs.img /var/initramfs.img;"

View File

@ -359,11 +359,9 @@ class BaseActions(PrepareActions, HealthCheckActions, PluginsActions):
self.cluster_id, ['controller']) self.cluster_id, ['controller'])
for node in controller_nodes: for node in controller_nodes:
remote = self.env.d_env.get_ssh_to_remote(node['ip'])
logger.info("Check all HAProxy backends on {}".format( logger.info("Check all HAProxy backends on {}".format(
node['meta']['system']['fqdn'])) node['meta']['system']['fqdn']))
haproxy_status = checkers.check_haproxy_backend(remote) haproxy_status = checkers.check_haproxy_backend(node['ip'])
remote.clear()
assert_equal(haproxy_status['exit_code'], 1, assert_equal(haproxy_status['exit_code'], 1,
"HAProxy backends are DOWN. {0}".format( "HAProxy backends are DOWN. {0}".format(
haproxy_status)) haproxy_status))

View File

@ -186,47 +186,45 @@ class FillRootActions(object):
self.primary_controller_space_to_filled self.primary_controller_space_to_filled
)) ))
with self.fuel_web.get_ssh_for_node( node = self.fuel_web.get_nailgun_node_by_name(
self.primary_controller.name) as remote: self.primary_controller.name)
run_on_remote_get_results( self.ssh_manager.execute_on_remote(
remote, 'fallocate -l {}M /root/bigfile'.format( ip=node['ip'],
self.primary_controller_space_to_filled)) cmd='fallocate -l {}M /root/bigfile'.format(
check_file_exists(remote, '/root/bigfile') self.primary_controller_space_to_filled)
)
check_file_exists(node['ip'], '/root/bigfile')
@deferred_decorator([make_snapshot_if_step_fail]) @deferred_decorator([make_snapshot_if_step_fail])
@action @action
def fill_root_below_rabbit_disk_free_limit(self): def fill_root_below_rabbit_disk_free_limit(self):
"""Fill root more to below rabbit disk free limit""" """Fill root more to below rabbit disk free limit"""
with self.fuel_web.get_ssh_for_node( node = self.fuel_web.get_nailgun_node_by_name(
self.primary_controller.name) as remote: self.primary_controller.name)
pacemaker_attributes = self.ssh_manager.execute_on_remote(
pacemaker_attributes = run_on_remote_get_results( ip=node['ip'],
remote, 'cibadmin --query --scope status')['stdout_str'] cmd='cibadmin --query --scope status'
)['stdout_str']
controller_space_on_root = get_pacemaker_nodes_attributes( controller_space_on_root = get_pacemaker_nodes_attributes(
pacemaker_attributes)[self.primary_controller_fqdn][ pacemaker_attributes)[self.primary_controller_fqdn]['root_free']
'root_free']
logger.info( logger.info("Free space in root on primary controller - {}".format(
"Free space in root on primary controller - {}".format( controller_space_on_root))
controller_space_on_root
))
controller_space_to_filled = str( controller_space_to_filled = str(
int( int(controller_space_on_root) - self.rabbit_disk_free_limit - 1
controller_space_on_root )
) - self.rabbit_disk_free_limit - 1)
logger.info( logger.info("Need to fill space on root - {}".format(
"Need to fill space on root - {}".format(
controller_space_to_filled
))
run_on_remote_get_results(
remote, 'fallocate -l {}M /root/bigfile2'.format(
controller_space_to_filled)) controller_space_to_filled))
check_file_exists(remote, '/root/bigfile2')
self.ssh_manager.execute_on_remote(
ip=node['ip'],
cmd='fallocate -l {}M /root/bigfile2'.format(
controller_space_to_filled)
)
check_file_exists(node['ip'], '/root/bigfile2')
@deferred_decorator([make_snapshot_if_step_fail]) @deferred_decorator([make_snapshot_if_step_fail])
@action @action