Use SSHManager instead of context manager

- Function update_connection was added in SSHManager.
   You can use it to update existed connecetion with
   new/custom credentials

 - Function execute_on_remote was renamed to 'execut'
 - Function execute_on_remote was added to SSHManager
   with new functionality. It is the same with
   run_on_remote_and_get_result
 - Move function json_deserialize to SSHManager
 - Code refactoring according the changes above

 SSHManager was added in folowing places:
  - fuelweb_test.environment.py for all needed methods
  - patching.py for:
     - connect_admin_to_repo
     - connect_slaves_to_repo
  - decorators.py for update_rpm_packages
  - multiple_networks_hacks.py for all
Related-Bug: #1527847

Change-Id: I20dd5e02bb8833b3543780e0083576516a2893f6
This commit is contained in:
Vasily Gorin
2015-12-22 20:24:27 +03:00
committed by tatyana-leontovich
parent e29fde0f96
commit bc4bc66999
9 changed files with 318 additions and 137 deletions

View File

@@ -33,6 +33,7 @@ from proboscis.asserts import assert_equal
from fuelweb_test import logger
from fuelweb_test import settings
from fuelweb_test.helpers.ssh_manager import SSHManager
from fuelweb_test.settings import MASTER_IS_CENTOS7
from fuelweb_test.helpers.regenerate_repo import CustomRepo
from fuelweb_test.helpers.utils import get_current_env
@@ -209,19 +210,26 @@ def update_rpm_packages(func):
cmd = ("echo -e '[temporary]\nname=temporary\nbaseurl=file://{0}/"
"\ngpgcheck=0\npriority=1' > {1}").format(
settings.LOCAL_MIRROR_CENTOS, conf_file)
with environment.d_env.get_admin_remote() as remote:
environment.execute_remote_cmd(remote, cmd, exit_code=0)
update_command = 'yum clean expire-cache; yum update -y -d3'
result = remote.execute(update_command)
logger.debug('Result of "yum update" command on master node: '
'{0}'.format(result))
assert_equal(int(result['exit_code']), 0,
'Packages update failed, '
'inspect logs for details')
environment.execute_remote_cmd(remote,
cmd='rm -f {0}'
.format(conf_file),
exit_code=0)
SSHManager().execute_on_remote(
ip=SSHManager().admin_ip,
cmd=cmd
)
update_command = 'yum clean expire-cache; yum update -y -d3'
SSHManager().execute(
ip=SSHManager().admin_ip,
cmd=update_command
)
logger.debug('Result of "yum update" command on master node: '
'{0}'.format(result))
assert_equal(int(result['exit_code']), 0,
'Packages update failed, '
'inspect logs for details')
SSHManager().execute_on_remote(
ip=SSHManager().admin_ip,
cmd='rm -f {0}'.format(conf_file)
)
except Exception:
logger.error("Could not update packages")
raise

View File

@@ -64,7 +64,7 @@ class BaseActions(object):
if stdin is not None:
cmd = 'echo "{0}" | {1}'.format(stdin, cmd)
result = self.ssh_manager.execute_on_remote(
result = self.ssh_manager.execute(
ip=self.admin_ip,
cmd=cmd
)
@@ -98,7 +98,7 @@ class BaseActions(object):
Standard output from console
"""
cmd = 'dockerctl copy {0} {1}'.format(copy_from, copy_to)
result = self.ssh_manager.execute_on_remote(
result = self.ssh_manager.execute(
ip=self.admin_ip,
cmd=cmd
)
@@ -113,7 +113,7 @@ class BaseActions(object):
@property
def is_container_ready(self):
result = self.ssh_manager.execute_on_remote(
result = self.ssh_manager.execute(
ip=self.admin_ip,
cmd="timeout 5 dockerctl check {0}".format(self.container)
)
@@ -286,8 +286,8 @@ class AdminActions(BaseActions):
# for admin node
cmd = 'ntpdate -p 4 -t 0.2 -ub {0}'.format(router)
if not self.ssh_manager.execute_on_remote(ip=self.admin_ip,
cmd=cmd)['exit_code']:
if not self.ssh_manager.execute(ip=self.admin_ip,
cmd=cmd)['exit_code']:
# Local ntpd on the host is alive, so
# remove all NTP sources and add the host instead.
logger.info("Switching NTPD on the Fuel admin node to use "
@@ -342,7 +342,7 @@ class AdminActions(BaseActions):
@logwrap
def clean_generated_image(self, distro):
out = self.ssh_manager.execute_on_remote(
out = self.ssh_manager.execute(
ip=self.admin_ip,
cmd="find /var/www/nailgun/targetimages/ -name "
"'env*{}*' -printf '%P\n'".format(distro.lower())
@@ -350,7 +350,7 @@ class AdminActions(BaseActions):
images = ''.join(out)
logger.debug("images are {}".format(images))
self.ssh_manager.execute_on_remote(
self.ssh_manager.execute(
ip=self.admin_ip,
cmd="find /var/www/nailgun/targetimages/ -name 'env*{}*'"
" -delete".format(distro.lower())
@@ -362,7 +362,7 @@ class AdminActions(BaseActions):
logger.info('Unpacking file')
filename, ext = os.path.splitext(name)
cmd = "tar -xpvf" if ext.endswith("tar") else "lrzuntar"
result = self.ssh_manager.execute_on_remote(
result = self.ssh_manager.execute(
ip=node_ip,
cmd='cd {0} && {2} {1}'.format(path, name, cmd)
)
@@ -407,7 +407,7 @@ class AdminActions(BaseActions):
def get_fuel_settings(self):
cmd = 'cat {cfg_file}'.format(cfg_file=hlp_data.FUEL_SETTINGS_YAML)
result = self.ssh_manager.execute_on_remote(
result = self.ssh_manager.execute(
ip=self.admin_ip,
cmd=cmd
)
@@ -424,7 +424,7 @@ class AdminActions(BaseActions):
default_style='"',
default_flow_style=False),
hlp_data.FUEL_SETTINGS_YAML)
result = self.ssh_manager.execute_on_remote(
result = self.ssh_manager.execute(
ip=self.admin_ip,
cmd=cmd
)
@@ -666,7 +666,7 @@ class DockerActions(object):
self.ssh_manager = SSHManager()
def list_containers(self):
result = self.ssh_manager.execute_on_remote(
result = self.ssh_manager.execute(
ip=self.ssh_manager.admin_ip,
cmd='dockerctl list'
)
@@ -692,7 +692,7 @@ class DockerActions(object):
.format(failed_containers, timeout))
def restart_container(self, container):
self.ssh_manager.execute_on_remote(
self.ssh_manager.execute(
ip=self.ssh_manager.admin_ip,
cmd='dockerctl restart {0}'.format(container)
)
@@ -706,7 +706,7 @@ class DockerActions(object):
def execute_in_containers(self, cmd):
for container in self.list_containers():
self.ssh_manager.execute_on_remote(
self.ssh_manager.execute(
ip=self.ssh_manager.admin_ip,
cmd="dockerctl shell {0} bash -c '{1}'".format(container, cmd)
)

View File

@@ -22,10 +22,11 @@
from proboscis.asserts import assert_equal
from fuelweb_test import logwrap
from fuelweb_test.helpers.ssh_manager import SSHManager
@logwrap
def configure_second_admin_dhcp(remote, interface):
def configure_second_admin_dhcp(ip, interface):
dhcp_conf_file = '/etc/cobbler/dnsmasq.template'
docker_start_file = '/usr/local/bin/start.sh'
cmd = ("dockerctl shell cobbler sed '/^interface/a interface={0}' -i {1};"
@@ -34,13 +35,16 @@ def configure_second_admin_dhcp(remote, interface):
"dockerctl shell cobbler cobbler sync").format(interface,
dhcp_conf_file,
docker_start_file)
result = remote.execute(cmd)
result = SSHManager().execute(
ip=ip,
cmd=cmd
)
assert_equal(result['exit_code'], 0, ('Failed to add second admin '
'network to DHCP server: {0}').format(result))
@logwrap
def configure_second_admin_firewall(remote, network, netmask, interface,
def configure_second_admin_firewall(ip, network, netmask, interface,
master_ip):
# Allow input/forwarding for nodes from the second admin network and
# enable source NAT for UDP (tftp) and HTTP (proxy server) traffic
@@ -63,15 +67,20 @@ def configure_second_admin_firewall(remote, network, netmask, interface,
for rule in rules:
cmd = 'iptables {0}'.format(rule)
result = remote.execute(cmd)
result = SSHManager().execute(
ip=ip,
cmd=cmd
)
assert_equal(result['exit_code'], 0,
('Failed to add firewall rule for second admin net '
'on master node: {0}, {1}').format(rule, result))
# Save new firewall configuration
cmd = 'service iptables save'
result = remote.execute(cmd)
result = SSHManager().execute(
ip=ip,
cmd=cmd
)
assert_equal(result['exit_code'], 0,
('Failed to save firewall configuration on master node:'
' {0}').format(result))

View File

@@ -30,7 +30,7 @@ from proboscis.asserts import assert_true
from fuelweb_test import logger
from fuelweb_test import settings
from fuelweb_test.helpers.ssh_manager import SSHManager
patching_validation_schema = {
'type': {
@@ -304,9 +304,11 @@ def connect_slaves_to_repo(environment, nodes, repo_name):
]
for slave in nodes:
with environment.d_env.get_ssh_to_remote(slave['ip']) as remote:
for cmd in cmds:
environment.execute_remote_cmd(remote, cmd, exit_code=0)
for cmd in cmds:
SSHManager().execute_on_remote(
ip=slave['ip'],
cmd=cmd
)
def connect_admin_to_repo(environment, repo_name):
@@ -328,9 +330,11 @@ def connect_admin_to_repo(environment, repo_name):
"yum check-update; [[ $? -eq 100 || $? -eq 0 ]]"
]
with environment.d_env.get_admin_remote() as remote:
for cmd in cmds:
environment.execute_remote_cmd(remote, cmd, exit_code=0)
for cmd in cmds:
SSHManager().execute_on_remote(
ip=SSHManager().admin_ip,
cmd=cmd
)
def update_packages(environment, remote, packages, exclude_packages=None):

View File

@@ -246,7 +246,7 @@ class CustomRepo(object):
.format(pkgs_local_path + path_suff,
self.custom_pkgs_mirror,
pkg["filename:"])
wget_result = self.ssh_manager.execute_on_remote(
wget_result = self.ssh_manager.execute(
ip=self.ip,
cmd=wget_cmd
)
@@ -278,7 +278,7 @@ class CustomRepo(object):
regenerate_script,
local_mirror_path,
self.ubuntu_release)
script_result = self.ssh_manager.execute_on_remote(
script_result = self.ssh_manager.execute(
ip=self.ip,
cmd=script_cmd
)
@@ -316,7 +316,7 @@ class CustomRepo(object):
cmd = ('fgrep -h -e " Depends: " -e "{0}" -e "{1}" '
'/var/log/docker-logs/remote/node-*/'
'puppet*.log'.format(err_start, err_end))
result = self.ssh_manager.execute_on_remote(
result = self.ssh_manager.execute(
ip=self.ip,
cmd=cmd
)['stdout']
@@ -356,7 +356,7 @@ class CustomRepo(object):
cmd = ('fgrep -h -e "Error: Package: " -e " Requires: " /var/log/'
'docker-logs/remote/node-*/puppet*.log')
result = self.ssh_manager.execute_on_remote(
result = self.ssh_manager.execute(
ip=self.ip,
cmd=cmd
)['stdout']

View File

@@ -15,6 +15,7 @@
import os
import posixpath
import re
import json
from paramiko import RSAKey
from devops.models.node import SSHClient
@@ -104,10 +105,112 @@ class SSHManager(object):
logger.debug('SSH_MANAGER: Connections {0}'.format(self.connections))
return self._connect(self.connections[(ip, port)])
def execute_on_remote(self, ip, cmd, port=22):
def update_connection(self, ip, login=None, password=None,
keys=None, port=22):
"""Update existed connection
:param ip: host ip string
:param login: login string
:param password: password string
:param keys: list of keys
:param port: ssh port int
:return: None
"""
if (ip, port) in self.connections:
logger.info('SSH_MANAGER:Close connection for {ip}:{port}'.format(
ip=ip, port=port))
self.connections[(ip, port)].clear()
logger.info('SSH_MANAGER:Create new connection for '
'{ip}:{port}'.format(ip=ip, port=port))
self.connections[(ip, port)] = SSHClient(
host=ip,
port=port,
username=login,
password=password,
private_keys=keys if keys is not None else []
)
def execute(self, ip, cmd, port=22):
remote = self._get_remote(ip=ip, port=port)
return remote.execute(cmd)
def check_call(self, ip, cmd, port=22, verbose=False):
remote = self._get_remote(ip=ip, port=port)
return remote.check_call(cmd, verbose)
def execute_on_remote(self, ip, cmd, port=22, err_msg=None,
jsonify=False, assert_ec_equal=None,
raise_on_assert=True):
"""Execute ``cmd`` on ``remote`` and return result.
:param ip: ip of host
:param port: ssh port
:param cmd: command to execute on remote host
:param err_msg: custom error message
:param assert_ec_equal: list of expected exit_code
:param raise_on_assert: Boolean
:return: dict
:raise: Exception
"""
if assert_ec_equal is None:
assert_ec_equal = [0]
result = self.execute(ip=ip, port=port, cmd=cmd)
if result['exit_code'] not in assert_ec_equal:
error_details = {
'command': cmd,
'host': ip,
'stdout': result['stdout'],
'stderr': result['stderr'],
'exit_code': result['exit_code']}
error_msg = (err_msg or "Unexpected exit_code returned:"
" actual {0}, expected {1}."
.format(error_details['exit_code'],
' '.join(map(str, assert_ec_equal))))
log_msg = ("{0} Command: '{1}' "
"Details: {2}".format(error_msg, cmd, error_details))
logger.error(log_msg)
if raise_on_assert:
raise Exception(log_msg)
result['stdout_str'] = ''.join(result['stdout'])
result['stdout_len'] = len(result['stdout'])
result['stderr_str'] = ''.join(result['stderr'])
result['stderr_len'] = len(result['stderr'])
if jsonify:
try:
result['stdout_json'] = \
self._json_deserialize(result['stdout_str'])
except Exception:
error_msg = (
"Unable to deserialize output of command"
" '{0}' on host {1}".format(cmd, ip))
logger.error(error_msg)
raise Exception(error_msg)
return result
def _json_deserialize(self, json_string):
""" Deserialize json_string and return object
:param json_string: string or list with json
:return: obj
:raise: Exception
"""
if isinstance(json_string, list):
json_string = ''.join(json_string)
try:
obj = json.loads(json_string)
except Exception:
log_msg = "Unable to deserialize"
logger.error("{0}. Actual string:\n{1}".format(log_msg,
json_string))
raise Exception(log_msg)
return obj
def open_on_remote(self, ip, path, mode='r', port=22):
remote = self._get_remote(ip=ip, port=port)
return remote.open(path, mode)

View File

@@ -311,7 +311,7 @@ def install_pkg_2(ip, pkg_name, port=22):
:return: exit code of installation
"""
ssh_manager = SSHManager()
remote_status = ssh_manager.execute_on_remote(
remote_status = ssh_manager.execute(
ip=ip,
port=port,
cmd="rpm -q '{0}'".format(pkg_name)
@@ -320,7 +320,7 @@ def install_pkg_2(ip, pkg_name, port=22):
logger.info("Package '{0}' already installed.".format(pkg_name))
else:
logger.info("Installing package '{0}' ...".format(pkg_name))
remote_status = ssh_manager.execute_on_remote(
remote_status = ssh_manager.execute(
ip=ip,
port=port,
cmd="yum -y install {0}".format(pkg_name)

View File

@@ -38,7 +38,6 @@ from fuelweb_test.helpers.fuel_actions import PostgresActions
from fuelweb_test.helpers.fuel_actions import NessusActions
from fuelweb_test.helpers.ntp import GroupNtpSync
from fuelweb_test.helpers.ssh_manager import SSHManager
from fuelweb_test.helpers.utils import run_on_remote
from fuelweb_test.helpers.utils import TimeStat
from fuelweb_test.helpers import multiple_networks_hacks
from fuelweb_test.models.fuel_web_client import FuelWebClient
@@ -372,35 +371,46 @@ class EnvironmentModel(object):
return True
def set_admin_ssh_password(self):
new_login = settings.SSH_CREDENTIALS['login']
new_password = settings.SSH_CREDENTIALS['password']
try:
with self.d_env.get_admin_remote(
login=settings.SSH_CREDENTIALS['login'],
password=settings.SSH_CREDENTIALS['password']) as remote:
self.execute_remote_cmd(remote, 'date')
self.ssh_manager.execute_on_remote(
ip=self.ssh_manager.admin_ip,
cmd='date'
)
logger.debug('Accessing admin node using SSH: SUCCESS')
except Exception:
logger.debug('Accessing admin node using SSH credentials:'
' FAIL, trying to change password from default')
with self.d_env.get_admin_remote(
login='root', password='r00tme') as remote:
self.execute_remote_cmd(
remote, 'echo -e "{1}\\n{1}" | passwd {0}'
.format(settings.SSH_CREDENTIALS['login'],
settings.SSH_CREDENTIALS['password']))
self.ssh_manager.update_connection(
ip=self.ssh_manager.admin_ip,
login='root',
password='r00tme'
)
self.ssh_manager.execute_on_remote(
ip=self.ssh_manager.admin_ip,
cmd='echo -e "{1}\\n{1}" | passwd {0}'.format(new_login,
new_password)
)
self.ssh_manager.update_connection(
ip=self.ssh_manager.admin_ip,
login=new_login,
password=new_password
)
logger.debug("Admin node password has changed.")
logger.info("Admin node login name: '{0}' , password: '{1}'".
format(settings.SSH_CREDENTIALS['login'],
settings.SSH_CREDENTIALS['password']))
format(new_login, new_password))
def set_admin_keystone_password(self):
try:
self.fuel_web.client.get_releases()
# TODO(akostrikov) CENTOS7 except exceptions.Unauthorized:
except:
with self.d_env.get_admin_remote() as remote:
self.execute_remote_cmd(
remote, 'fuel user --newpass {0} --change-password'
.format(settings.KEYSTONE_CREDS['password']))
self.ssh_manager.execute_on_remote(
ip=self.ssh_manager.admin_ip,
cmd='fuel user --newpass {0} --change-password'.format(
settings.KEYSTONE_CREDS['password'])
)
logger.info(
'New Fuel UI (keystone) username: "{0}", password: "{1}"'
.format(settings.KEYSTONE_CREDS['username'],
@@ -461,8 +471,11 @@ class EnvironmentModel(object):
"temporary-{0}\nbaseurl={1}/"
"\ngpgcheck=0\npriority="
"1' > {2}").format(i, url, conf_file)
with self.d_env.get_admin_remote() as remote:
remote.execute(cmd)
self.ssh_manager.execute(
ip=self.ssh_manager.admin_ip,
cmd=cmd
)
self.admin_install_updates()
if settings.MULTIPLE_NETWORKS:
self.describe_second_admin_interface()
@@ -479,11 +492,13 @@ class EnvironmentModel(object):
settings.FUEL_STATS_HOST, settings.FUEL_STATS_PORT
))
if settings.PATCHING_DISABLE_UPDATES:
with self.d_env.get_admin_remote() as remote:
cmd = "find /etc/yum.repos.d/ -type f -regextype posix-egrep" \
" -regex '.*/mos[0-9,\.]+\-(updates|security).repo' | " \
"xargs -n1 -i sed '$aenabled=0' -i {}"
self.execute_remote_cmd(remote, cmd)
cmd = "find /etc/yum.repos.d/ -type f -regextype posix-egrep" \
" -regex '.*/mos[0-9,\.]+\-(updates|security).repo' | " \
"xargs -n1 -i sed '$aenabled=0' -i {}"
self.ssh_manager.execute_on_remote(
ip=self.ssh_manager.admin_ip,
cmd=cmd
)
@update_rpm_packages
@upload_manifests
@@ -501,21 +516,30 @@ class EnvironmentModel(object):
@logwrap
def wait_for_external_config(self, timeout=120):
check_cmd = 'pkill -0 -f wait_for_external_config'
with self.d_env.get_admin_remote() as remote:
if MASTER_IS_CENTOS7:
remote.execute(check_cmd)
else:
wait(
lambda: remote.execute(check_cmd)['exit_code'] == 0,
timeout=timeout)
if MASTER_IS_CENTOS7:
self.ssh_manager.execute(
ip=self.ssh_manager.admin_ip,
cmd=check_cmd
)
else:
wait(
lambda: self.ssh_manager.execute(
ip=self.ssh_manager.admin_ip,
cmd=check_cmd)['exit_code'] == 0, timeout=timeout)
@logwrap
def kill_wait_for_external_config(self):
kill_cmd = 'pkill -f "^wait_for_external_config"'
check_cmd = 'pkill -0 -f "^wait_for_external_config"; [[ $? -eq 1 ]]'
with self.d_env.get_admin_remote() as remote:
run_on_remote(remote, kill_cmd)
run_on_remote(remote, check_cmd)
self.ssh_manager.execute_on_remote(
ip=self.ssh_manager.admin_ip,
cmd=kill_cmd
)
self.ssh_manager.execute_on_remote(
ip=self.ssh_manager.admin_ip,
cmd=check_cmd
)
@retry(count=3, delay=60)
def sync_time(self, nailgun_nodes=None):
@@ -573,8 +597,11 @@ class EnvironmentModel(object):
"--ifaces {iface} " \
"--repeat 3 " \
"--timeout 10".format(iface=iface)
with self.d_env.get_admin_remote() as admin_remote:
out = admin_remote.execute(command)['stdout']
out = self.ssh_manager.execute(
ip=self.ssh_manager.admin_ip,
cmd=command
)['stdout']
assert_true(self.get_admin_node_ip() in "".join(out),
"dhcpcheck doesn't discover master ip")
@@ -585,36 +612,46 @@ class EnvironmentModel(object):
logger.warning('Default image for bootstrap '
'is not based on Ubuntu!')
return
with self.d_env.get_admin_remote() as admin_remote:
cmd = 'fuel-bootstrap --quiet list'
bootstrap_images = run_on_remote(admin_remote, cmd)
assert_true(any('active' in line for line in bootstrap_images),
'Ubuntu bootstrap image wasn\'t built and activated! '
'See logs in /var/log/fuel-bootstrap-image-build.log '
'for details.')
bootstrap_images = self.ssh_manager.execute_on_remote(
ip=self.ssh_manager.admin_ip,
cmd='fuel-bootstrap --quiet list'
)['stdout']
assert_true(any('active' in line for line in bootstrap_images),
'Ubuntu bootstrap image wasn\'t built and activated! '
'See logs in /var/log/fuel-bootstrap-image-build.log '
'for details.')
def admin_install_pkg(self, pkg_name):
"""Install a package <pkg_name> on the admin node"""
with self.d_env.get_admin_remote() as remote:
remote_status = remote.execute("rpm -q {0}'".format(pkg_name))
if remote_status['exit_code'] == 0:
logger.info("Package '{0}' already installed."
.format(pkg_name))
else:
logger.info("Installing package '{0}' ...".format(pkg_name))
remote_status = remote.execute("yum -y install {0}"
.format(pkg_name))
logger.info("Installation of the package '{0}' has been"
" completed with exit code {1}"
.format(pkg_name, remote_status['exit_code']))
remote_status = self.ssh_manager.execute(
ip=self.ssh_manager.admin_ip,
cmd="rpm -q {0}'".format(pkg_name)
)
if remote_status['exit_code'] == 0:
logger.info("Package '{0}' already installed.".format(pkg_name))
else:
logger.info("Installing package '{0}' ...".format(pkg_name))
remote_status = self.ssh_manager.execute(
ip=self.ssh_manager.admin_ip,
cmd="yum -y install {0}".format(pkg_name)
)
logger.info("Installation of the package '{0}' has been"
" completed with exit code {1}"
.format(pkg_name, remote_status['exit_code']))
return remote_status['exit_code']
def admin_run_service(self, service_name):
"""Start a service <service_name> on the admin node"""
with self.d_env.get_admin_remote() as admin_remote:
admin_remote.execute("service {0} start".format(service_name))
remote_status = admin_remote.execute("service {0} status"
.format(service_name))
self.ssh_manager.execute(
ip=self.ssh_manager.admin_ip,
cmd="service {0} start".format(service_name)
)
remote_status = self.ssh_manager.execute(
ip=self.ssh_manager.admin_ip,
cmd="service {0} status".format(service_name)
)
if any('running...' in status for status in remote_status['stdout']):
logger.info("Service '{0}' is running".format(service_name))
else:
@@ -630,8 +667,12 @@ class EnvironmentModel(object):
def admin_install_updates(self):
logger.info('Searching for updates..')
update_command = 'yum clean expire-cache; yum update -y'
with self.d_env.get_admin_remote() as admin_remote:
update_result = admin_remote.execute(update_command)
update_result = self.ssh_manager.execute(
ip=self.ssh_manager.admin_ip,
cmd=update_command
)
logger.info('Result of "{1}" command on master node: '
'{0}'.format(update_result, update_command))
assert_equal(int(update_result['exit_code']), 0,
@@ -655,8 +696,11 @@ class EnvironmentModel(object):
logger.info('{0} packet(s) were updated'.format(updates_count))
cmd = 'dockerctl destroy all; bootstrap_admin_node.sh;'
with self.d_env.get_admin_remote() as admin_remote:
result = admin_remote.execute(cmd)
result = self.ssh_manager.execute(
ip=self.ssh_manager.admin_ip,
cmd=cmd
)
logger.info('Result of "{1}" command on master node: '
'{0}'.format(result, cmd))
assert_equal(int(result['exit_code']), 0,
@@ -670,24 +714,28 @@ class EnvironmentModel(object):
def modify_resolv_conf(self, nameservers=None, merge=True):
if nameservers is None:
nameservers = []
with self.d_env.get_admin_remote() as remote:
resolv_conf = remote.execute('cat /etc/resolv.conf')
assert_equal(0, resolv_conf['exit_code'], 'Executing "{0}" on the '
'admin node has failed with: {1}'
.format('cat /etc/resolv.conf',
resolv_conf['stderr']))
if merge:
nameservers.extend(resolv_conf['stdout'])
resolv_keys = ['search', 'domain', 'nameserver']
resolv_new = "".join('{0}\n'.format(ns) for ns in nameservers
if any(x in ns for x in resolv_keys))
logger.debug('echo "{0}" > /etc/resolv.conf'.format(resolv_new))
echo_cmd = 'echo "{0}" > /etc/resolv.conf'.format(resolv_new)
echo_result = remote.execute(echo_cmd)
assert_equal(0, echo_result['exit_code'], 'Executing "{0}" on the '
'admin node has failed with: {1}'
.format(echo_cmd, echo_result['stderr']))
resolv_conf = self.ssh_manager.execute(
ip=self.ssh_manager.admin_ip,
cmd='cat /etc/resolv.conf'
)
assert_equal(0, resolv_conf['exit_code'],
'Executing "{0}" on the admin node has failed with: {1}'
.format('cat /etc/resolv.conf', resolv_conf['stderr']))
if merge:
nameservers.extend(resolv_conf['stdout'])
resolv_keys = ['search', 'domain', 'nameserver']
resolv_new = "".join('{0}\n'.format(ns) for ns in nameservers
if any(x in ns for x in resolv_keys))
logger.debug('echo "{0}" > /etc/resolv.conf'.format(resolv_new))
echo_cmd = 'echo "{0}" > /etc/resolv.conf'.format(resolv_new)
echo_result = self.ssh_manager.execute(
ip=self.ssh_manager.admin_ip,
cmd=echo_cmd
)
assert_equal(0, echo_result['exit_code'],
'Executing "{0}" on the admin node has failed with: {1}'
.format(echo_cmd, echo_result['stderr']))
return resolv_conf['stdout']
@logwrap
@@ -727,17 +775,27 @@ class EnvironmentModel(object):
add_second_admin_ip, second_admin_if, second_admin_ip)
logger.debug('Trying to assign {0} IP to the {1} on master node...'.
format(second_admin_ip, second_admin_if))
with self.d_env.get_admin_remote() as remote:
result = remote.execute(cmd)
result = self.ssh_manager.execute(
ip=self.ssh_manager.admin_ip,
cmd=cmd
)
assert_equal(result['exit_code'], 0, ('Failed to assign second admin '
'IP address on master node: {0}').format(result))
logger.debug('Done: {0}'.format(result['stdout']))
with self.d_env.get_admin_remote() as remote:
multiple_networks_hacks.configure_second_admin_dhcp(
remote, second_admin_if)
multiple_networks_hacks.configure_second_admin_firewall(
remote, second_admin_network, second_admin_netmask,
second_admin_if, self.get_admin_node_ip())
# TODO for ssh manager
multiple_networks_hacks.configure_second_admin_dhcp(
self.ssh_manager.admin_ip,
second_admin_if
)
multiple_networks_hacks.configure_second_admin_firewall(
self.ssh_manager.admin_ip,
second_admin_network,
second_admin_netmask,
second_admin_if,
self.get_admin_node_ip()
)
@logwrap
def get_masternode_uuid(self):

View File

@@ -269,15 +269,14 @@ class CommandLineTest(test_cli_base.CommandLine):
'Some slaves do not become online after revert!!'
' Expected {0} Actual {1}'.format(nodes, online_nodes))
res = self.ssh_manager.execute_on_remote(
self.ssh_manager.execute_on_remote(
ip=self.ssh_manager.admin_ip,
cmd='fuel --env {0} env delete'.format(cluster_id)
)
assert_true(res['exit_code'] == 0)
try:
wait(lambda:
self.ssh_manager.execute_on_remote(
self.ssh_manager.execute(
ip=self.ssh_manager.admin_ip,
cmd="fuel env | awk '{print $1}' | tail -n 1 | "
"grep '^.$'"