Use context manager for ssh sessions

Change-Id: I4dfd0918b1a9664e5c7a775688a059043b591969
Related-Bug: #1346362
This commit is contained in:
Maksym Strukov 2015-07-31 00:34:38 +03:00
parent 4e8761f8ea
commit 1b432a77a7
30 changed files with 1235 additions and 1055 deletions

View File

@ -92,8 +92,9 @@ def log_snapshot_after_test(func):
logger.error("Fetching of diagnostic snapshot failed: {0}".
format(traceback.format_exc()))
try:
admin_remote = args[0].env.d_env.get_admin_remote()
pull_out_logs_via_ssh(admin_remote, name)
with args[0].env.d_env.get_admin_remote()\
as admin_remote:
pull_out_logs_via_ssh(admin_remote, name)
except:
logger.error("Fetching of raw logs failed: {0}".
format(traceback.format_exc()))
@ -145,17 +146,17 @@ def upload_manifests(func):
logger.warning("Can't upload manifests: method of "
"unexpected class is decorated.")
return result
remote = environment.d_env.get_admin_remote()
remote.execute('rm -rf /etc/puppet/modules/*')
remote.upload(settings.UPLOAD_MANIFESTS_PATH,
'/etc/puppet/modules/')
logger.info("Copying new site.pp from %s" %
settings.SITEPP_FOR_UPLOAD)
remote.execute("cp %s /etc/puppet/manifests" %
settings.SITEPP_FOR_UPLOAD)
if settings.SYNC_DEPL_TASKS:
remote.execute("fuel release --sync-deployment-tasks"
" --dir /etc/puppet/")
with environment.d_env.get_admin_remote() as remote:
remote.execute('rm -rf /etc/puppet/modules/*')
remote.upload(settings.UPLOAD_MANIFESTS_PATH,
'/etc/puppet/modules/')
logger.info("Copying new site.pp from %s" %
settings.SITEPP_FOR_UPLOAD)
remote.execute("cp %s /etc/puppet/manifests" %
settings.SITEPP_FOR_UPLOAD)
if settings.SYNC_DEPL_TASKS:
remote.execute("fuel release --sync-deployment-tasks"
" --dir /etc/puppet/")
except Exception:
logger.error("Could not upload manifests")
raise
@ -176,8 +177,6 @@ def update_packages(func):
"unexpected class is decorated.")
return result
remote = environment.d_env.get_admin_remote()
if settings.UPDATE_FUEL_MIRROR:
for url in settings.UPDATE_FUEL_MIRROR:
repo_url = urlparse(url)
@ -210,17 +209,19 @@ def update_packages(func):
cmd = ("echo -e '[temporary]\nname=temporary\nbaseurl=file://{0}/"
"\ngpgcheck=0\npriority=1' > {1}").format(
settings.LOCAL_MIRROR_CENTOS, conf_file)
environment.execute_remote_cmd(remote, cmd, exit_code=0)
update_command = 'yum clean expire-cache; yum update -y -d3'
result = remote.execute(update_command)
logger.debug('Result of "yum update" command on master node: '
'{0}'.format(result))
assert_equal(int(result['exit_code']), 0,
'Packages update failed, '
'inspect logs for details')
environment.execute_remote_cmd(remote,
cmd='rm -f {0}'.format(conf_file),
exit_code=0)
with environment.d_env.get_admin_remote() as remote:
environment.execute_remote_cmd(remote, cmd, exit_code=0)
update_command = 'yum clean expire-cache; yum update -y -d3'
result = remote.execute(update_command)
logger.debug('Result of "yum update" command on master node: '
'{0}'.format(result))
assert_equal(int(result['exit_code']), 0,
'Packages update failed, '
'inspect logs for details')
environment.execute_remote_cmd(remote,
cmd='rm -f {0}'
.format(conf_file),
exit_code=0)
except Exception:
logger.error("Could not update packages")
raise
@ -247,7 +248,6 @@ def update_fuel(func):
centos_repo_path=settings.LOCAL_MIRROR_CENTOS,
ubuntu_repo_path=settings.LOCAL_MIRROR_UBUNTU)
remote = environment.d_env.get_admin_remote()
cluster_id = environment.fuel_web.get_last_created_cluster()
if centos_files_count > 0:
@ -259,10 +259,11 @@ def update_fuel(func):
cmd='yum clean expire-cache; yum update -y')
environment.docker_actions.restart_containers()
# Update packages on master node
remote.execute(
'yum -y install yum-plugin-priorities;'
'yum clean expire-cache; yum update -y')
with environment.d_env.get_admin_remote() as remote:
# Update packages on master node
remote.execute(
'yum -y install yum-plugin-priorities;'
'yum clean expire-cache; yum update -y')
# Add auxiliary repository to the cluster attributes
if settings.OPENSTACK_RELEASE_UBUNTU not in \
@ -285,6 +286,7 @@ def update_fuel(func):
" because of deploying wrong release!"
.format(ubuntu_files_count))
if settings.SYNC_DEPL_TASKS:
with environment.d_env.get_admin_remote() as remote:
remote.execute("fuel release --sync-deployment-tasks"
" --dir /etc/puppet/")
return result
@ -318,19 +320,19 @@ def update_ostf(func):
raise ValueError('REFSPEC should be set for CI tests.')
logger.info("Uploading new patchset from {0}"
.format(settings.GERRIT_REFSPEC))
remote = args[0].environment.d_env.get_admin_remote()
remote.upload(settings.PATCH_PATH.rstrip('/'),
'/var/www/nailgun/fuel-ostf')
remote.execute('dockerctl shell ostf '
'bash -c "cd /var/www/nailgun/fuel-ostf; '
'python setup.py develop"')
remote.execute('dockerctl shell ostf '
'bash -c "supervisorctl restart ostf"')
helpers.wait(
lambda: "0" in
with args[0].environment.d_env.get_admin_remote() as remote:
remote.upload(settings.PATCH_PATH.rstrip('/'),
'/var/www/nailgun/fuel-ostf')
remote.execute('dockerctl shell ostf '
'bash -c "pgrep [o]stf; echo $?"')
['stdout'][1], timeout=60)
'bash -c "cd /var/www/nailgun/fuel-ostf; '
'python setup.py develop"')
remote.execute('dockerctl shell ostf '
'bash -c "supervisorctl restart ostf"')
helpers.wait(
lambda: "0" in
remote.execute('dockerctl shell ostf '
'bash -c "pgrep [o]stf; echo $?"')
['stdout'][1], timeout=60)
logger.info("OSTF status: RUNNING")
except Exception as e:
logger.error("Could not upload patch set {e}".format(e=e))
@ -372,22 +374,23 @@ def retry(count=3, delay=30):
def custom_repo(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
custom_pkgs = CustomRepo(args[0].environment.d_env.get_admin_remote())
try:
if settings.CUSTOM_PKGS_MIRROR:
custom_pkgs.prepare_repository()
with args[0].environment.d_env.get_admin_remote() as remote:
custom_pkgs = CustomRepo(remote)
try:
if settings.CUSTOM_PKGS_MIRROR:
custom_pkgs.prepare_repository()
except Exception:
logger.error("Unable to get custom packages from {0}\n{1}"
.format(settings.CUSTOM_PKGS_MIRROR,
traceback.format_exc()))
raise
except Exception:
logger.error("Unable to get custom packages from {0}\n{1}"
.format(settings.CUSTOM_PKGS_MIRROR,
traceback.format_exc()))
raise
try:
return func(*args, **kwargs)
except Exception:
custom_pkgs.check_puppet_logs()
raise
try:
return func(*args, **kwargs)
except Exception:
custom_pkgs.check_puppet_logs()
raise
return wrapper
@ -507,7 +510,7 @@ def check_repos_management(func):
nailgun_nodes = env.fuel_web.client.list_cluster_nodes(
env.fuel_web.get_last_created_cluster())
for n in nailgun_nodes:
check_repo_managment(
env.d_env.get_ssh_to_remote(n['ip']))
with env.d_env.get_ssh_to_remote(n['ip']) as node_ssh:
check_repo_managment(node_ssh)
return result
return wrapper

View File

@ -29,7 +29,6 @@ from fuelweb_test import logwrap
@logwrap
def configure_second_admin_cobbler(self):
dhcp_template = '/etc/cobbler/dnsmasq.template'
remote = self.d_env.get_admin_remote()
admin_net2 = self.d_env.admin_net2
second_admin_if = settings.INTERFACES.get(admin_net2)
second_admin_ip = str(
@ -53,14 +52,14 @@ def configure_second_admin_cobbler(self):
cmd = ("dockerctl shell cobbler sed -r '$a \{0}' -i {1};"
"dockerctl shell cobbler cobbler sync").format(new_range,
dhcp_template)
result = remote.execute(cmd)
with self.d_env.get_admin_remote() as remote:
result = remote.execute(cmd)
assert_equal(result['exit_code'], 0, ('Failed to add second admin'
'network to cobbler: {0}').format(result))
@logwrap
def configure_second_admin_firewall(self, network, netmask):
remote = self.d_env.get_admin_remote()
# Allow input/forwarding for nodes from the second admin network
rules = [
('-I INPUT -i {0} -m comment --comment "input from 2nd admin network" '
@ -69,16 +68,17 @@ def configure_second_admin_firewall(self, network, netmask):
'"004 forward_admin_net2" -j MASQUERADE').
format(network, netmask)
]
for rule in rules:
cmd = 'iptables {0}'.format(rule)
with self.d_env.get_admin_remote() as remote:
for rule in rules:
cmd = 'iptables {0}'.format(rule)
result = remote.execute(cmd)
assert_equal(result['exit_code'], 0,
('Failed to add firewall rule for second admin net'
'on master node: {0}, {1}').format(rule, result))
# Save new firewall configuration
cmd = 'service iptables save'
result = remote.execute(cmd)
assert_equal(result['exit_code'], 0,
('Failed to add firewall rule for second admin net'
'on master node: {0}, {1}').format(rule, result))
# Save new firewall configuration
cmd = 'service iptables save'
result = remote.execute(cmd)
assert_equal(result['exit_code'], 0,
('Failed to save firewall configuration on master node:'
' {0}').format(result))

View File

@ -269,10 +269,11 @@ def add_remote_repositories(environment, mirrors, prefix_name='custom_repo'):
name = '{0}_{1}'.format(prefix_name, mirrors.index(mir))
local_repo_path = '/'.join([settings.PATCHING_WEB_DIR, name])
remote_repo_url = mir
mirror_remote_repository(
admin_remote=environment.d_env.get_admin_remote(),
remote_repo_url=remote_repo_url,
local_repo_path=local_repo_path)
with environment.d_env.get_admin_remote() as remote:
mirror_remote_repository(
admin_remote=remote,
remote_repo_url=remote_repo_url,
local_repo_path=local_repo_path)
repositories.add(name)
return repositories
@ -303,9 +304,9 @@ def connect_slaves_to_repo(environment, nodes, repo_name):
]
for slave in nodes:
remote = environment.d_env.get_ssh_to_remote(slave['ip'])
for cmd in cmds:
environment.execute_remote_cmd(remote, cmd, exit_code=0)
with environment.d_env.get_ssh_to_remote(slave['ip']) as remote:
for cmd in cmds:
environment.execute_remote_cmd(remote, cmd, exit_code=0)
def connect_admin_to_repo(environment, repo_name):
@ -327,9 +328,9 @@ def connect_admin_to_repo(environment, repo_name):
"yum check-update; [[ $? -eq 100 || $? -eq 0 ]]"
]
remote = environment.d_env.get_admin_remote()
for cmd in cmds:
environment.execute_remote_cmd(remote, cmd, exit_code=0)
with environment.d_env.get_admin_remote() as remote:
for cmd in cmds:
environment.execute_remote_cmd(remote, cmd, exit_code=0)
def update_packages(environment, remote, packages, exclude_packages=None):
@ -358,8 +359,8 @@ def update_packages_on_slaves(environment, slaves, packages=None,
# Install all updates
packages = ' '
for slave in slaves:
remote = environment.d_env.get_ssh_to_remote(slave['ip'])
update_packages(environment, remote, packages, exclude_packages)
with environment.d_env.get_ssh_to_remote(slave['ip']) as remote:
update_packages(environment, remote, packages, exclude_packages)
def get_slaves_ips_by_role(slaves, role=None):
@ -502,6 +503,7 @@ def validate_fix_apply_step(apply_step, environment, slaves):
"command isn't specified".format(apply_step['id'],
apply_step['type']))
command = apply_step['command']
# remotes sessions .clear() placed in run_actions()
remotes = [environment.d_env.get_ssh_to_remote(ip) for ip in remotes_ips] \
if command else []
devops_nodes = devops_nodes if devops_action else []
@ -585,6 +587,10 @@ def run_actions(environment, target, slaves, action_type='patch-scenario'):
elif devops_action == 'reboot':
environment.fuel_web.warm_restart_nodes(devops_nodes)
# clear connections
for remote in remotes:
remote.clear()
def apply_patches(environment, target, slaves=None):
run_actions(environment, target, slaves, action_type='patch-scenario')

View File

@ -33,13 +33,13 @@ class SecurityChecks(object):
@logwrap
def _listen_random_port(self, ip_address, protocol, tmp_file_path):
remote = self.environment.d_env.get_ssh_to_remote(ip_address)
# Install socat
if OPENSTACK_RELEASE_UBUNTU in OPENSTACK_RELEASE:
cmd = '/usr/bin/apt-get install -y {pkg}'.format(pkg='socat')
else:
cmd = '/usr/bin/yum install -y {pkg}'.format(pkg='socat')
result = remote.execute(cmd)
with self.environment.d_env.get_ssh_to_remote(ip_address) as remote:
result = remote.execute(cmd)
if not result['exit_code'] == 0:
raise Exception('Could not install package: {0}\n{1}'.
format(result['stdout'], result['stderr']))
@ -47,8 +47,9 @@ class SecurityChecks(object):
cmd = ('netstat -A inet -ln --{proto} | awk \'$4 ~ /^({ip}'
'|0\.0\.0\.0):[0-9]+/ {{split($4,port,":"); print '
'port[2]}}\'').format(ip=ip_address, proto=protocol)
used_ports = [int(p.strip()) for p in remote.execute(cmd)['stdout']]
with self.environment.d_env.get_ssh_to_remote(ip_address) as remote:
used_ports = [int(p.strip())
for p in remote.execute(cmd)['stdout']]
# Get list of opened ports
cmd = ('iptables -t filter -S INPUT | sed -rn -e \'s/^.*\s\-p\s+'
@ -57,8 +58,9 @@ class SecurityChecks(object):
' while read ports; do if [[ "$ports" =~ [[:digit:]]'
'[[:blank:]][[:digit:]] ]]; then seq $ports; else echo '
'"$ports";fi; done').format(proto=protocol)
allowed_ports = [int(p.strip()) for p in remote.execute(cmd)['stdout']]
with self.environment.d_env.get_ssh_to_remote(ip_address) as remote:
allowed_ports = [int(p.strip())
for p in remote.execute(cmd)['stdout']]
test_port = randrange(10000)
while test_port in used_ports or test_port in allowed_ports:
@ -66,7 +68,8 @@ class SecurityChecks(object):
# Create dump of iptables rules
cmd = 'iptables-save > {0}.dump'.format(tmp_file_path)
result = remote.execute(cmd)
with self.environment.d_env.get_ssh_to_remote(ip_address) as remote:
result = remote.execute(cmd)
assert_equal(result['exit_code'], 0,
'Dumping of iptables rules failed on {0}: {1}; {2}'.
format(ip_address, result['stdout'], result['stderr']))
@ -76,7 +79,9 @@ class SecurityChecks(object):
'&>/dev/null & pid=$! ; disown; sleep 1; kill -0 $pid').\
format(proto=protocol, ip=ip_address, file=tmp_file_path,
port=test_port)
result = remote.execute(cmd)
with self.environment.d_env.get_ssh_to_remote(ip_address) as remote:
result = remote.execute(cmd)
assert_equal(result['exit_code'], 0,
'Listening on {0}:{1}/{2} port failed: {3}'.
format(ip_address, test_port, protocol,
@ -86,7 +91,6 @@ class SecurityChecks(object):
@retry()
@logwrap
def verify_firewall(self, cluster_id):
admin_remote = self.environment.d_env.get_admin_remote()
# Install NetCat
if not self.environment.admin_install_pkg('nc') == 0:
raise Exception('Can not install package "nc".')
@ -108,10 +112,12 @@ class SecurityChecks(object):
cmd = 'echo {string} | nc {opts} {ip} {port}'.\
format(opts=nc_opts, string=check_string, ip=node['ip'],
port=port)
admin_remote.execute(cmd)
remote = self.environment.d_env.get_ssh_to_remote(node['ip'])
cmd = 'cat {0}; mv {0}{{,.old}}'.format(tmp_file_path)
result = remote.execute(cmd)
with self.environment.d_env.get_admin_remote() as admin_remote:
admin_remote.execute(cmd)
with self.environment.d_env\
.get_ssh_to_remote(node['ip']) as remote:
cmd = 'cat {0}; mv {0}{{,.old}}'.format(tmp_file_path)
result = remote.execute(cmd)
if ''.join(result['stdout']).strip() == check_string:
msg = ('Firewall vulnerability detected. Unused port '
'{0}/{1} can be accessed on {2} (node-{3}) node. '

View File

@ -101,14 +101,14 @@ def store_astute_yaml(env):
nailgun_node = env.fuel_web.get_nailgun_node_by_devops_node(node)
if node.driver.node_active(node) and nailgun_node['roles']:
try:
_ip = env.fuel_web.get_nailgun_node_by_name(node.name)['ip']
remote = env.d_env.get_ssh_to_remote(_ip)
filename = '{0}/{1}-{2}.yaml'.format(settings.LOGS_DIR,
func_name, node.name)
logger.info("Storing {0}".format(filename))
if not remote.download('/etc/astute.yaml', filename):
logger.error("Downloading 'astute.yaml' from the node "
"{0} failed.".format(node.name))
_ip = env.fuel_web.get_nailgun_node_by_name(node.name)['ip']
with env.d_env.get_ssh_to_remote(_ip) as remote:
if not remote.download('/etc/astute.yaml', filename):
logger.error("Downloading 'astute.yaml' from the node "
"{0} failed.".format(node.name))
except Exception:
logger.error(traceback.format_exc())
@ -136,10 +136,10 @@ def store_packages_json(env):
packages = {func_name: {}}
cluster_id = env.fuel_web.get_last_created_cluster()
for nailgun_node in env.fuel_web.client.list_cluster_nodes(cluster_id):
remote = env.d_env.get_ssh_to_remote(nailgun_node['ip'])
role = '_'.join(nailgun_node['roles'])
logger.debug('role is {0}'.format(role))
packages = get_node_packages(remote, func_name, role, packages)
with env.d_env.get_ssh_to_remote(nailgun_node['ip']) as remote:
packages = get_node_packages(remote, func_name, role, packages)
packages_file = '{0}/packages.json'.format(settings.LOGS_DIR)
if os.path.isfile(packages_file):
with open(packages_file, 'r') as outfile:

View File

@ -303,33 +303,33 @@ class EnvironmentModel(object):
def set_admin_ssh_password(self):
try:
remote = self.d_env.get_admin_remote(
login=settings.SSH_CREDENTIALS['login'],
password=settings.SSH_CREDENTIALS['password'])
self.execute_remote_cmd(remote, 'date')
with self.d_env.get_admin_remote(
login=settings.SSH_CREDENTIALS['login'],
password=settings.SSH_CREDENTIALS['password']) as remote:
self.execute_remote_cmd(remote, 'date')
logger.debug('Accessing admin node using SSH: SUCCESS')
except Exception:
logger.debug('Accessing admin node using SSH credentials:'
' FAIL, trying to change password from default')
remote = self.d_env.get_admin_remote(
login='root', password='r00tme')
self.execute_remote_cmd(
remote, 'echo -e "{1}\\n{1}" | passwd {0}'
.format(settings.SSH_CREDENTIALS['login'],
settings.SSH_CREDENTIALS['password']))
with self.d_env.get_admin_remote(
login='root', password='r00tme') as remote:
self.execute_remote_cmd(
remote, 'echo -e "{1}\\n{1}" | passwd {0}'
.format(settings.SSH_CREDENTIALS['login'],
settings.SSH_CREDENTIALS['password']))
logger.debug("Admin node password has changed.")
logger.info("Admin node login name: '{0}' , password: '{1}'".
format(settings.SSH_CREDENTIALS['login'],
settings.SSH_CREDENTIALS['password']))
def set_admin_keystone_password(self):
remote = self.d_env.get_admin_remote()
try:
self.fuel_web.client.get_releases()
except exceptions.Unauthorized:
self.execute_remote_cmd(
remote, 'fuel user --newpass {0} --change-password'
.format(settings.KEYSTONE_CREDS['password']))
with self.d_env.get_admin_remote() as remote:
self.execute_remote_cmd(
remote, 'fuel user --newpass {0} --change-password'
.format(settings.KEYSTONE_CREDS['password']))
logger.info(
'New Fuel UI (keystone) username: "{0}", password: "{1}"'
.format(settings.KEYSTONE_CREDS['username'],
@ -396,11 +396,11 @@ class EnvironmentModel(object):
settings.FUEL_STATS_HOST, settings.FUEL_STATS_PORT
))
if settings.PATCHING_DISABLE_UPDATES:
remote = self.d_env.get_admin_remote()
cmd = "find /etc/yum.repos.d/ -type f -regextype posix-egrep" \
" -regex '.*/mos[0-9,\.]+\-(updates|security).repo' | " \
"xargs -n1 -i sed '$aenabled=0' -i {}"
self.execute_remote_cmd(remote, cmd)
with self.d_env.get_admin_remote() as remote:
cmd = "find /etc/yum.repos.d/ -type f -regextype posix-egrep" \
" -regex '.*/mos[0-9,\.]+\-(updates|security).repo' | " \
"xargs -n1 -i sed '$aenabled=0' -i {}"
self.execute_remote_cmd(remote, cmd)
@update_packages
@upload_manifests
@ -413,9 +413,9 @@ class EnvironmentModel(object):
def setup_customisation(self):
self.wait_for_provisioning()
try:
remote = self.d_env.get_admin_remote()
cmd = "pkill -sigusr1 -f '^.*/fuelmenu$'"
wait(lambda: remote.execute(cmd)['exit_code'] == 0, timeout=60)
with self.d_env.get_admin_remote() as remote:
wait(lambda: remote.execute(cmd)['exit_code'] == 0, timeout=60)
except Exception:
logger.error("Could not kill process of fuelmenu")
raise
@ -449,10 +449,8 @@ class EnvironmentModel(object):
def verify_network_configuration(self, node_name):
node = self.fuel_web.get_nailgun_node_by_name(node_name)
checkers.verify_network_configuration(
node=node,
remote=self.d_env.get_ssh_to_remote(node['ip'])
)
with self.fuel_web.get_ssh_for_node(node_name) as ssh:
checkers.verify_network_configuration(node=node, remote=ssh)
def wait_bootstrap(self):
logger.info("Waiting while bootstrapping is in progress")
@ -473,20 +471,24 @@ class EnvironmentModel(object):
raise Exception('Fuel node deployment failed.')
def dhcrelay_check(self):
admin_remote = self.d_env.get_admin_remote()
out = admin_remote.execute("dhcpcheck discover "
"--ifaces eth0 "
"--repeat 3 "
"--timeout 10")['stdout']
with self.d_env.get_admin_remote() as admin_remote:
out = admin_remote.execute("dhcpcheck discover "
"--ifaces eth0 "
"--repeat 3 "
"--timeout 10")['stdout']
assert_true(self.get_admin_node_ip() in "".join(out),
"dhcpcheck doesn't discover master ip")
def get_fuel_settings(self, remote=None):
if not remote:
remote = self.d_env.get_admin_remote()
cmd = 'cat {cfg_file}'.format(cfg_file=settings.FUEL_SETTINGS_YAML)
result = remote.execute(cmd)
if not remote:
with self.d_env.get_admin_remote() as remote:
remote.execute(cmd)
else:
result = remote.execute(cmd)
if result['exit_code'] == 0:
fuel_settings = yaml.load(''.join(result['stdout']))
else:
@ -497,25 +499,26 @@ class EnvironmentModel(object):
def admin_install_pkg(self, pkg_name):
"""Install a package <pkg_name> on the admin node"""
admin_remote = self.d_env.get_admin_remote()
remote_status = admin_remote.execute("rpm -q {0}'".format(pkg_name))
if remote_status['exit_code'] == 0:
logger.info("Package '{0}' already installed.".format(pkg_name))
else:
logger.info("Installing package '{0}' ...".format(pkg_name))
remote_status = admin_remote.execute("yum -y install {0}"
.format(pkg_name))
logger.info("Installation of the package '{0}' has been"
" completed with exit code {1}"
.format(pkg_name, remote_status['exit_code']))
with self.d_env.get_admin_remote() as remote:
remote_status = remote.execute("rpm -q {0}'".format(pkg_name))
if remote_status['exit_code'] == 0:
logger.info("Package '{0}' already installed."
.format(pkg_name))
else:
logger.info("Installing package '{0}' ...".format(pkg_name))
remote_status = remote.execute("yum -y install {0}"
.format(pkg_name))
logger.info("Installation of the package '{0}' has been"
" completed with exit code {1}"
.format(pkg_name, remote_status['exit_code']))
return remote_status['exit_code']
def admin_run_service(self, service_name):
"""Start a service <service_name> on the admin node"""
admin_remote = self.d_env.get_admin_remote()
admin_remote.execute("service {0} start".format(service_name))
remote_status = admin_remote.execute("service {0} status"
.format(service_name))
with self.d_env.get_admin_remote() as admin_remote:
admin_remote.execute("service {0} start".format(service_name))
remote_status = admin_remote.execute("service {0} status"
.format(service_name))
if any('running...' in status for status in remote_status['stdout']):
logger.info("Service '{0}' is running".format(service_name))
else:
@ -530,9 +533,9 @@ class EnvironmentModel(object):
# then `dockerctl destroy all; bootstrap_admin_node.sh;`
def admin_install_updates(self):
logger.info('Searching for updates..')
admin_remote = self.d_env.get_admin_remote()
update_command = 'yum clean expire-cache; yum update -y'
update_result = admin_remote.execute(update_command)
with self.d_env.get_admin_remote() as admin_remote:
update_result = admin_remote.execute(update_command)
logger.info('Result of "{1}" command on master node: '
'{0}'.format(update_result, update_command))
assert_equal(int(update_result['exit_code']), 0,
@ -556,7 +559,8 @@ class EnvironmentModel(object):
logger.info('{0} packet(s) were updated'.format(updates_count))
cmd = 'dockerctl destroy all; bootstrap_admin_node.sh;'
result = admin_remote.execute(cmd)
with self.d_env.get_admin_remote() as admin_remote:
result = admin_remote.execute(cmd)
logger.info('Result of "{1}" command on master node: '
'{0}'.format(result, cmd))
assert_equal(int(result['exit_code']), 0,
@ -568,23 +572,24 @@ class EnvironmentModel(object):
# * adds 'nameservers' at start of resolv.conf if merge=True
# * replaces resolv.conf with 'nameservers' if merge=False
def modify_resolv_conf(self, nameservers=[], merge=True):
remote = self.d_env.get_admin_remote()
resolv_conf = remote.execute('cat /etc/resolv.conf')
assert_equal(0, resolv_conf['exit_code'], 'Executing "{0}" on the '
'admin node has failed with: {1}'
.format('cat /etc/resolv.conf', resolv_conf['stderr']))
if merge:
nameservers.extend(resolv_conf['stdout'])
with self.d_env.get_admin_remote() as remote:
resolv_conf = remote.execute('cat /etc/resolv.conf')
assert_equal(0, resolv_conf['exit_code'], 'Executing "{0}" on the '
'admin node has failed with: {1}'
.format('cat /etc/resolv.conf',
resolv_conf['stderr']))
if merge:
nameservers.extend(resolv_conf['stdout'])
resolv_keys = ['search', 'domain', 'nameserver']
resolv_new = "".join('{0}\n'.format(ns) for ns in nameservers
if any(x in ns for x in resolv_keys))
logger.debug('echo "{0}" > /etc/resolv.conf'.format(resolv_new))
echo_cmd = 'echo "{0}" > /etc/resolv.conf'.format(resolv_new)
echo_result = remote.execute(echo_cmd)
assert_equal(0, echo_result['exit_code'], 'Executing "{0}" on the '
'admin node has failed with: {1}'
.format(echo_cmd, echo_result['stderr']))
resolv_keys = ['search', 'domain', 'nameserver']
resolv_new = "".join('{0}\n'.format(ns) for ns in nameservers
if any(x in ns for x in resolv_keys))
logger.debug('echo "{0}" > /etc/resolv.conf'.format(resolv_new))
echo_cmd = 'echo "{0}" > /etc/resolv.conf'.format(resolv_new)
echo_result = remote.execute(echo_cmd)
assert_equal(0, echo_result['exit_code'], 'Executing "{0}" on the '
'admin node has failed with: {1}'
.format(echo_cmd, echo_result['stderr']))
return resolv_conf['stdout']
@logwrap
@ -597,7 +602,6 @@ class EnvironmentModel(object):
@logwrap
def describe_second_admin_interface(self):
remote = self.d_env.get_admin_remote()
admin_net2_object = self.d_env.get_network(name=self.d_env.admin_net2)
second_admin_network = admin_net2_object.ip.network
second_admin_netmask = admin_net2_object.ip.netmask
@ -625,7 +629,8 @@ class EnvironmentModel(object):
add_second_admin_ip, second_admin_if, second_admin_ip)
logger.debug('Trying to assign {0} IP to the {1} on master node...'.
format(second_admin_ip, second_admin_if))
result = remote.execute(cmd)
with self.d_env.get_admin_remote() as remote:
result = remote.execute(cmd)
assert_equal(result['exit_code'], 0, ('Failed to assign second admin '
'IP address on master node: {0}').format(result))
logger.debug('Done: {0}'.format(result['stdout']))

View File

@ -342,8 +342,8 @@ class FuelWebClient(object):
@logwrap
def get_pcm_nodes(self, ctrl_node, pure=False):
nodes = {}
remote = self.get_ssh_for_node(ctrl_node)
pcs_status = remote.execute('pcs status nodes')['stdout']
with self.get_ssh_for_node(ctrl_node) as remote:
pcs_status = remote.execute('pcs status nodes')['stdout']
pcm_nodes = yaml.load(''.join(pcs_status).strip())
for status in ('Online', 'Offline', 'Standby'):
list_nodes = (pcm_nodes['Pacemaker Nodes'][status] or '').split()
@ -356,9 +356,9 @@ class FuelWebClient(object):
@logwrap
def get_rabbit_running_nodes(self, ctrl_node):
remote = self.get_ssh_for_node(ctrl_node)
rabbit_status = ''.join(remote.execute(
'rabbitmqctl cluster_status')['stdout']).strip()
with self.get_ssh_for_node(ctrl_node) as remote:
rabbit_status = ''.join(remote.execute(
'rabbitmqctl cluster_status')['stdout']).strip()
rabbit_nodes = re.search(
"\{running_nodes,\[(.*)\]\}",
rabbit_status).group(1).replace("'", "").split(',')
@ -466,13 +466,17 @@ class FuelWebClient(object):
public_gw = self.environment.d_env.router(router_name="public")
if help_data.FUEL_USE_LOCAL_NTPD and ('ntp_list' not in settings)\
remote = self.environment.d_env.get_admin_remote()
if help_data.FUEL_USE_LOCAL_NTPD\
and ('ntp_list' not in settings)\
and checkers.is_ntpd_active(
self.environment.d_env.get_admin_remote(), public_gw):
remote, public_gw):
attributes['editable']['external_ntp']['ntp_list']['value'] =\
public_gw
logger.info("Configuring cluster #{0} to use NTP server {1}"
logger.info("Configuring cluster #{0}"
"to use NTP server {1}"
.format(cluster_id, public_gw))
remote.clear()
if help_data.FUEL_USE_LOCAL_DNS and ('dns_list' not in settings):
attributes['editable']['external_dns']['dns_list']['value'] =\
@ -698,29 +702,29 @@ class FuelWebClient(object):
@logwrap
def get_cluster_floating_list(self, node_name):
logger.info('Get floating IPs list at %s devops node', node_name)
remote = self.get_ssh_for_node(node_name)
ret = remote.check_call('/usr/bin/nova-manage floating list')
with self.get_ssh_for_node(node_name) as remote:
ret = remote.check_call('/usr/bin/nova-manage floating list')
ret_str = ''.join(ret['stdout'])
return re.findall('(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})', ret_str)
@logwrap
def get_cluster_block_devices(self, node_name):
logger.info('Get %s node block devices (lsblk)', node_name)
remote = self.get_ssh_for_node(node_name)
ret = remote.check_call('/bin/lsblk')
with self.get_ssh_for_node(node_name) as remote:
ret = remote.check_call('/bin/lsblk')
return ''.join(ret['stdout'])
@logwrap
def get_pacemaker_status(self, controller_node_name):
logger.info('Get pacemaker status at %s node', controller_node_name)
remote = self.get_ssh_for_node(controller_node_name)
return ''.join(remote.check_call('crm_mon -1')['stdout'])
with self.get_ssh_for_node(controller_node_name) as remote:
return ''.join(remote.check_call('crm_mon -1')['stdout'])
@logwrap
def get_pacemaker_config(self, controller_node_name):
logger.info('Get pacemaker config at %s node', controller_node_name)
remote = self.get_ssh_for_node(controller_node_name)
return ''.join(remote.check_call('crm_resource --list')['stdout'])
with self.get_ssh_for_node(controller_node_name) as remote:
return ''.join(remote.check_call('crm_resource --list')['stdout'])
@logwrap
def get_pacemaker_resource_location(self, controller_node_name,
@ -728,14 +732,14 @@ class FuelWebClient(object):
"""Get devops nodes where the resource is running."""
logger.info('Get pacemaker resource %s life status at %s node',
resource_name, controller_node_name)
remote = self.get_ssh_for_node(controller_node_name)
hosts = []
for line in remote.check_call(
'crm_resource --resource {0} '
'--locate --quiet'.format(resource_name))['stdout']:
hosts.append(
self.get_devops_node_by_nailgun_fqdn(line.strip()))
remote.clear()
with self.get_ssh_for_node(controller_node_name) as remote:
for line in remote.check_call(
'crm_resource --resource {0} '
'--locate --quiet'.format(resource_name))['stdout']:
hosts.append(
self.get_devops_node_by_nailgun_fqdn(line.strip()))
return hosts
@logwrap
@ -1402,8 +1406,8 @@ class FuelWebClient(object):
[n.name for n in devops_nodes])
for node in devops_nodes:
logger.debug('Shutdown node %s', node.name)
remote = self.get_ssh_for_node(node.name)
remote.check_call('/sbin/shutdown -Ph now')
with self.get_ssh_for_node(node.name) as remote:
remote.check_call('/sbin/shutdown -Ph now')
for node in devops_nodes:
logger.info('Wait a %s node offline status', node.name)
@ -1480,15 +1484,15 @@ class FuelWebClient(object):
:rtype: String on None
"""
try:
remote = self.get_ssh_for_node(node_name)
if namespace:
cmd = 'ip netns exec {0} ip -4 ' \
'-o address show {1}'.format(namespace, interface)
else:
cmd = 'ip -4 -o address show {1}'.format(interface)
ret = remote.check_call(cmd)
remote.clear()
with self.get_ssh_for_node(node_name) as remote:
ret = remote.check_call(cmd)
ip_search = re.search(
'inet (?P<ip>\d+\.\d+\.\d+.\d+/\d+).*scope .* '
'{0}'.format(interface), ' '.join(ret['stdout']))
@ -1508,11 +1512,10 @@ class FuelWebClient(object):
def ip_address_del(self, node_name, namespace, interface, ip):
logger.info('Delete %s ip address of %s interface at %s node',
ip, interface, node_name)
remote = self.get_ssh_for_node(node_name)
remote.check_call(
'ip netns exec {0} ip addr'
' del {1} dev {2}'.format(namespace, ip, interface))
remote.clear()
with self.get_ssh_for_node(node_name) as remote:
remote.check_call(
'ip netns exec {0} ip addr'
' del {1} dev {2}'.format(namespace, ip, interface))
@logwrap
def provisioning_cluster_wait(self, cluster_id, progress=None):
@ -1568,35 +1571,34 @@ class FuelWebClient(object):
return ''.join(result['stderr']).strip()
for node_name in node_names:
_ip = self.get_nailgun_node_by_name(node_name)['ip']
remote = self.environment.d_env.get_ssh_to_remote(_ip)
try:
wait(lambda: _get_galera_status(remote) == 'ON',
timeout=timeout)
logger.info("MySQL Galera is up on {host} node.".format(
host=node_name))
except TimeoutError:
logger.error("MySQL Galera isn't ready on {0}: {1}"
.format(node_name, _get_galera_status(remote)))
raise TimeoutError(
"MySQL Galera isn't ready on {0}: {1}".format(
node_name, _get_galera_status(remote)))
with self.get_ssh_for_node(node_name) as remote:
try:
wait(lambda: _get_galera_status(remote) == 'ON',
timeout=timeout)
logger.info("MySQL Galera is up on {host} node.".format(
host=node_name))
except TimeoutError:
logger.error("MySQL Galera isn't ready on {0}: {1}"
.format(node_name,
_get_galera_status(remote)))
raise TimeoutError(
"MySQL Galera isn't ready on {0}: {1}".format(
node_name, _get_galera_status(remote)))
return True
@logwrap
def wait_cinder_is_up(self, node_names):
logger.info("Waiting for all Cinder services up.")
for node_name in node_names:
_ip = self.get_nailgun_node_by_name(node_name)['ip']
remote = self.environment.d_env.get_ssh_to_remote(_ip)
try:
wait(lambda: checkers.check_cinder_status(remote),
timeout=300)
logger.info("All Cinder services up.")
except TimeoutError:
logger.error("Cinder services not ready.")
raise TimeoutError(
"Cinder services not ready. ")
with self.get_ssh_for_node(node_name) as remote:
try:
wait(lambda: checkers.check_cinder_status(remote),
timeout=300)
logger.info("All Cinder services up.")
except TimeoutError:
logger.error("Cinder services not ready.")
raise TimeoutError(
"Cinder services not ready. ")
return True
def run_ostf_repeatably(self, cluster_id, test_name=None,
@ -1703,43 +1705,44 @@ class FuelWebClient(object):
logger.info('Waiting until Ceph service become up...')
for node in online_ceph_nodes:
remote = self.environment.d_env.get_ssh_to_remote(node['ip'])
try:
wait(lambda: ceph.check_service_ready(remote) is True,
interval=20, timeout=600)
except TimeoutError:
error_msg = 'Ceph service is not properly started' \
' on {0}'.format(node['name'])
logger.error(error_msg)
raise TimeoutError(error_msg)
with self.environment.d_env\
.get_ssh_to_remote(node['ip']) as remote:
try:
wait(lambda: ceph.check_service_ready(remote) is True,
interval=20, timeout=600)
except TimeoutError:
error_msg = 'Ceph service is not properly started' \
' on {0}'.format(node['name'])
logger.error(error_msg)
raise TimeoutError(error_msg)
logger.info('Ceph service is ready. Checking Ceph Health...')
self.check_ceph_time_skew(cluster_id, offline_nodes)
node = online_ceph_nodes[0]
remote = self.environment.d_env.get_ssh_to_remote(node['ip'])
if not ceph.is_health_ok(remote):
if ceph.is_pgs_recovering(remote) and len(offline_nodes) > 0:
logger.info('Ceph is being recovered after osd node(s)'
' shutdown.')
try:
wait(lambda: ceph.is_health_ok(remote),
interval=30, timeout=recovery_timeout)
except TimeoutError:
result = ceph.health_detail(remote)
msg = 'Ceph HEALTH is not OK on {0}. Details: {1}'.format(
node['name'], result)
logger.error(msg)
raise TimeoutError(msg)
else:
result = ceph.health_detail(remote)
msg = 'Ceph HEALTH is not OK on {0}. Details: {1}'.format(
node['name'], result)
assert_true(ceph.is_health_ok(remote), msg)
with self.environment.d_env.get_ssh_to_remote(node['ip']) as remote:
if not ceph.is_health_ok(remote):
if ceph.is_pgs_recovering(remote) and len(offline_nodes) > 0:
logger.info('Ceph is being recovered after osd node(s)'
' shutdown.')
try:
wait(lambda: ceph.is_health_ok(remote),
interval=30, timeout=recovery_timeout)
except TimeoutError:
result = ceph.health_detail(remote)
msg = 'Ceph HEALTH is not OK on {0}. Details: {1}'\
.format(node['name'], result)
logger.error(msg)
raise TimeoutError(msg)
else:
result = ceph.health_detail(remote)
msg = 'Ceph HEALTH is not OK on {0}. Details: {1}'.format(
node['name'], result)
assert_true(ceph.is_health_ok(remote), msg)
logger.info('Checking Ceph OSD Tree...')
ceph.check_disks(remote, [n['id'] for n in online_ceph_nodes])
logger.info('Checking Ceph OSD Tree...')
ceph.check_disks(remote, [n['id'] for n in online_ceph_nodes])
remote.clear()
logger.info('Ceph cluster status is OK')
@logwrap
@ -2120,17 +2123,17 @@ class FuelWebClient(object):
cluster_id,
roles=['controller', ]) if node['online']]
admin_remote = self.environment.d_env.get_admin_remote()
check_proxy_cmd = ('[[ $(curl -s -w "%{{http_code}}" '
'{0} -o /dev/null) -eq 200 ]]')
with self.environment.d_env.get_admin_remote() as admin_remote:
check_proxy_cmd = ('[[ $(curl -s -w "%{{http_code}}" '
'{0} -o /dev/null) -eq 200 ]]')
for controller in online_controllers:
proxy_url = 'http://{0}:{1}/'.format(controller['ip'], port)
logger.debug('Trying to connect to {0} from master node...'.format(
proxy_url))
if admin_remote.execute(
check_proxy_cmd.format(proxy_url))['exit_code'] == 0:
return proxy_url
for controller in online_controllers:
proxy_url = 'http://{0}:{1}/'.format(controller['ip'], port)
logger.debug('Trying to connect to {0} from master node...'
.format(proxy_url))
if admin_remote.execute(
check_proxy_cmd.format(proxy_url))['exit_code'] == 0:
return proxy_url
assert_true(len(online_controllers) > 0,
'There are no online controllers available '

View File

@ -203,72 +203,80 @@ class NeutronTun(TestBasic):
# check hiera
if self.get_post_test(tasks, 'hiera'):
[gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(node),
path=self.get_post_test(tasks, 'hiera')[0]['cmd'])
for node in nodes]
for node in nodes:
with self.fuel_web.get_ssh_for_node(node) as ssh:
gd.run_check_from_task(
remote=ssh,
path=self.get_post_test(tasks, 'hiera')[0]['cmd'])
# check globals
if self.get_post_test(tasks, 'globals'):
[gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(node),
path=self.get_post_test(tasks, 'globals')[0]['cmd'])
for node in nodes]
for node in nodes:
with self.fuel_web.get_ssh_for_node(node) as ssh:
gd.run_check_from_task(
remote=ssh,
path=self.get_post_test(tasks, 'globals')[0]['cmd'])
# check netconfig
if self.get_post_test(tasks, 'netconfig'):
[gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(node),
path=self.get_post_test(tasks, 'netconfig')[0]['cmd'])
for node in nodes]
for node in nodes:
with self.fuel_web.get_ssh_for_node(node) as ssh:
gd.run_check_from_task(
remote=ssh,
path=self.get_post_test(tasks, 'netconfig')[0]['cmd'])
# check firewall
if self.get_post_test(all_tasks, 'firewall'):
[gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(node),
path=self.get_post_test(all_tasks, 'firewall')[0]['cmd'])
for node in nodes]
for node in nodes:
with self.fuel_web.get_ssh_for_node(node) as ssh:
gd.run_check_from_task(
remote=ssh,
path=self.get_post_test(all_tasks,
'firewall')[0]['cmd'])
# check hosts
if self.get_post_test(tasks, 'hosts'):
[gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(node),
path=self.get_post_test(tasks, 'hosts')[0]['cmd'])
for node in nodes]
for node in nodes:
with self.fuel_web.get_ssh_for_node(node) as ssh:
gd.run_check_from_task(
remote=ssh,
path=self.get_post_test(tasks, 'hosts')[0]['cmd'])
# check tools
if self.get_post_test(all_tasks, 'tools'):
[gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(node),
path=self.get_post_test(all_tasks, 'tools')[0]['cmd'])
for node in nodes]
for node in nodes:
with self.fuel_web.get_ssh_for_node(node) as ssh:
gd.run_check_from_task(
remote=ssh,
path=self.get_post_test(all_tasks, 'tools')[0]['cmd'])
# check cluster on controller
post_cluster = self.get_post_test(all_tasks, 'cluster')
if post_cluster:
[gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(node),
path=post_cluster[0]['cmd'])
for node in ['slave-01']]
for node in ['slave-01']:
with self.fuel_web.get_ssh_for_node(node) as ssh:
gd.run_check_from_task(
remote=ssh,
path=post_cluster[0]['cmd'])
post_virtual_ips = self.get_post_test(tasks, 'virtual_ips')
if post_virtual_ips:
try:
gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node('slave-01'),
path=post_virtual_ips[0]['cmd'])
except AssertionError:
import time
time.sleep(60)
gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node('slave-01'),
path=post_virtual_ips[0]['cmd'])
with self.fuel_web.get_ssh_for_node('slave-01') as ssh:
try:
gd.run_check_from_task(
remote=ssh,
path=post_virtual_ips[0]['cmd'])
except AssertionError:
import time
time.sleep(60)
gd.run_check_from_task(
remote=ssh,
path=post_virtual_ips[0]['cmd'])
self.env.make_snapshot("run_tasks_end_with_vips")
@ -309,10 +317,11 @@ class NeutronTun(TestBasic):
pre_cluster_haproxy = self.get_pre_test(tasks, 'cluster-haproxy')
post_cluster_haproxy = self.get_post_test(tasks, 'cluster-haproxy')
if pre_cluster_haproxy:
[gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(node),
path=pre_cluster_haproxy[0]['cmd'])
for node in ['slave-01']]
for node in ['slave-01']:
with self.fuel_web.get_ssh_for_node(node) as ssh:
gd.run_check_from_task(
remote=ssh,
path=pre_cluster_haproxy[0]['cmd'])
res = self.fuel_web.client.put_deployment_tasks_for_cluster(
cluster_id, data=['conntrackd'],
@ -327,10 +336,11 @@ class NeutronTun(TestBasic):
self.fuel_web.assert_task_success(task=res)
if post_cluster_haproxy:
[gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(node),
path=post_cluster_haproxy[0]['cmd'])
for node in ['slave-01']]
for node in ['slave-01']:
with self.fuel_web.get_ssh_for_node(node) as ssh:
gd.run_check_from_task(
remote=ssh,
path=post_cluster_haproxy[0]['cmd'])
self.env.make_snapshot("step_5_run_cluster_haproxy_controller")
@ -371,10 +381,11 @@ class NeutronTun(TestBasic):
pre_openstack_haproxy = self.get_pre_test(tasks, 'openstack-haproxy')
post_openstack_haproxy = self.get_post_test(tasks, 'openstack-haproxy')
if pre_openstack_haproxy:
[gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(node),
path=pre_openstack_haproxy[0]['cmd'])
for node in ['slave-01']]
for node in ['slave-01']:
with self.fuel_web.get_ssh_for_node(node) as ssh:
gd.run_check_from_task(
remote=ssh,
path=pre_openstack_haproxy[0]['cmd'])
res = self.fuel_web.client.put_deployment_tasks_for_cluster(
cluster_id, data=['openstack-haproxy'],
@ -400,19 +411,21 @@ class NeutronTun(TestBasic):
logger.debug('res info is {0}'.format(res))
self.fuel_web.assert_task_success(task=res)
if post_openstack_haproxy:
[gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(node),
path=post_openstack_haproxy[0]['cmd'])
for node in ['slave-01']]
for node in ['slave-01']:
with self.fuel_web.get_ssh_for_node(node) as ssh:
gd.run_check_from_task(
remote=ssh,
path=post_openstack_haproxy[0]['cmd'])
for service in ['memcached', 'openstack-cinder', 'database'
'rabbitmq', 'apache']:
# for service in ['memcached', 'openstack-cinder', 'database'
# 'rabbitmq', 'keystone', 'glance']:
if self.get_post_test(tasks, service):
[gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(node),
path=self.get_post_test(tasks, service)[0]['cmd'])
for node in ['slave-01']]
for node in ['slave-01']:
with self.fuel_web.get_ssh_for_node(node) as ssh:
gd.run_check_from_task(
remote=ssh,
path=self.get_post_test(tasks, service)[0]['cmd'])
self.env.make_snapshot("step_6_run_openstack_haproxy_controller")
@ -454,10 +467,11 @@ class NeutronTun(TestBasic):
pre_openstack_ctr = self.get_pre_test(tasks, 'openstack-controller')
post_openstack_ctr = self.get_post_test(tasks, 'openstack-controller')
if pre_openstack_ctr:
[gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(node),
path=pre_openstack_ctr[0]['cmd'])
for node in ['slave-01']]
for node in ['slave-01']:
with self.fuel_web.get_ssh_for_node(node) as ssh:
gd.run_check_from_task(
remote=ssh,
path=pre_openstack_ctr[0]['cmd'])
res = self.fuel_web.client.put_deployment_tasks_for_cluster(
cluster_id, data=['openstack-controller'],
@ -466,10 +480,11 @@ class NeutronTun(TestBasic):
self.fuel_web.assert_task_success(task=res)
if post_openstack_ctr:
[gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(node),
path=post_openstack_ctr[0]['cmd'])
for node in ['slave-01']]
for node in ['slave-01']:
with self.fuel_web.get_ssh_for_node(node) as ssh:
gd.run_check_from_task(
remote=ssh,
path=post_openstack_ctr[0]['cmd'])
self.env.make_snapshot("step_7_run_openstack_controller")
@ -517,10 +532,11 @@ class NeutronTun(TestBasic):
pre_net = self.get_pre_test(tasks, 'openstack-network')
if pre_net:
[gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(node),
path=pre_net[0]['cmd'])
for node in ['slave-01']]
for node in ['slave-01']:
with self.fuel_web.get_ssh_for_node(node) as ssh:
gd.run_check_from_task(
remote=ssh,
path=pre_net[0]['cmd'])
res = self.fuel_web.client.put_deployment_tasks_for_cluster(
cluster_id, data=[task['id'] for task in tasks],
@ -548,10 +564,11 @@ class NeutronTun(TestBasic):
for task in expected_task_list:
if self.get_post_test(tasks, task):
[gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(node),
path=self.get_post_test(tasks, task)[0]['cmd'])
for node in ['slave-01']]
for node in ['slave-01']:
with self.fuel_web.get_ssh_for_node(node) as ssh:
gd.run_check_from_task(
remote=ssh,
path=self.get_post_test(tasks, task)[0]['cmd'])
try:
self.fuel_web.run_ostf(cluster_id=cluster_id,
test_sets=['sanity'],
@ -601,10 +618,11 @@ class NeutronTun(TestBasic):
pre_top_compute = self.get_pre_test(tasks, 'top-role-compute')
post_top_compute = self.get_post_test(tasks, 'top-role-compute')
if pre_top_compute:
[gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(node),
path=pre_top_compute[0]['cmd'])
for node in ['slave-02']]
for node in ['slave-02']:
with self.fuel_web.get_ssh_for_node(node) as ssh:
gd.run_check_from_task(
remote=ssh,
path=pre_top_compute[0]['cmd'])
res = self.fuel_web.client.put_deployment_tasks_for_cluster(
cluster_id, data=['top-role-compute'],
@ -613,18 +631,20 @@ class NeutronTun(TestBasic):
self.fuel_web.assert_task_success(task=res)
if post_top_compute:
[gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(node),
path=post_top_compute[0]['cmd'])
for node in ['slave-02']]
for node in ['slave-02']:
with self.fuel_web.get_ssh_for_node(node) as ssh:
gd.run_check_from_task(
remote=ssh,
path=post_top_compute[0]['cmd'])
pre_net = self.get_pre_test(tasks, 'openstack-network-compute')
post_net = self.get_post_test(tasks, 'openstack-network-compute')
if pre_net:
[gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(node),
path=pre_net[0]['cmd'])
for node in ['slave-02', 'slave-03']]
for node in ['slave-02', 'slave-03']:
with self.fuel_web.get_ssh_for_node(node) as ssh:
gd.run_check_from_task(
remote=ssh,
path=pre_net[0]['cmd'])
res = self.fuel_web.client.put_deployment_tasks_for_cluster(
cluster_id, data=['openstack-network-compute'],
@ -634,10 +654,11 @@ class NeutronTun(TestBasic):
self.fuel_web.assert_task_success(task=res)
if post_net:
[gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(node),
path=post_net[0]['cmd'])
for node in ['slave-02']]
for node in ['slave-02']:
with self.fuel_web.get_ssh_for_node(node) as ssh:
gd.run_check_from_task(
remote=ssh,
path=post_net[0]['cmd'])
self.env.make_snapshot("step_9_run_top_role_compute")
@ -682,10 +703,11 @@ class NeutronTun(TestBasic):
pre_top_cinder = self.get_pre_test(tasks, 'top-role-cinder')
post_top_cinder = self.get_post_test(tasks, 'top-role-cinder')
if pre_top_cinder:
[gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(node),
path=pre_top_cinder[0]['cmd'])
for node in ['slave-03']]
for node in ['slave-03']:
with self.fuel_web.get_ssh_for_node(node) as ssh:
gd.run_check_from_task(
remote=ssh,
path=pre_top_cinder[0]['cmd'])
res = self.fuel_web.client.put_deployment_tasks_for_cluster(
cluster_id, data=['top-role-cinder'],
@ -694,10 +716,11 @@ class NeutronTun(TestBasic):
self.fuel_web.assert_task_success(task=res)
if post_top_cinder:
[gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(node),
path=post_top_cinder[0]['cmd'])
for node in ['slave-03']]
for node in ['slave-03']:
with self.fuel_web.get_ssh_for_node(node) as ssh:
gd.run_check_from_task(
remote=ssh,
path=post_top_cinder[0]['cmd'])
# Run post_deployment
tasks = self.fuel_web.client.get_end_deployment_tasks(

View File

@ -196,50 +196,57 @@ class NeutronVlanCephMongo(TestBasic):
# check hiera
if self.get_post_test(tasks, 'hiera'):
[gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(node),
path=self.get_post_test(tasks, 'hiera')[0]['cmd'])
for node in nodes]
for node in nodes:
with self.fuel_web.get_ssh_for_node(node) as remote:
gd.run_check_from_task(
remote=remote,
path=self.get_post_test(tasks, 'hiera')[0]['cmd'])
# check globals
if self.get_post_test(tasks, 'globals'):
[gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(node),
path=self.get_post_test(tasks, 'globals')[0]['cmd'])
for node in nodes]
for node in nodes:
with self.fuel_web.get_ssh_for_node(node) as remote:
gd.run_check_from_task(
remote=remote,
path=self.get_post_test(tasks, 'globals')[0]['cmd'])
# check netcondfig
if self.get_post_test(tasks, 'netconfig'):
[gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(node),
path=self.get_post_test(tasks, 'netconfig')[0]['cmd'])
for node in nodes]
for node in nodes:
with self.fuel_web.get_ssh_for_node(node) as remote:
gd.run_check_from_task(
remote=remote,
path=self.get_post_test(tasks, 'netconfig')[0]['cmd'])
# check firewall
if self.get_post_test(all_tasks, 'firewall'):
[gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(node),
path=self.get_post_test(all_tasks, 'firewall')[0]['cmd'])
for node in nodes]
for node in nodes:
with self.fuel_web.get_ssh_for_node(node) as remote:
gd.run_check_from_task(
remote=remote,
path=self.get_post_test(all_tasks,
'firewall')[0]['cmd'])
# check hosts
if self.get_post_test(tasks, 'hosts'):
[gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(node),
path=self.get_post_test(tasks, 'hosts')[0]['cmd'])
for node in nodes]
for node in nodes:
with self.fuel_web.get_ssh_for_node(node) as remote:
gd.run_check_from_task(
remote=remote,
path=self.get_post_test(tasks, 'hosts')[0]['cmd'])
# check tools
if self.get_post_test(all_tasks, 'tools'):
[gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(node),
path=self.get_post_test(all_tasks, 'tools')[0]['cmd'])
for node in nodes]
for node in nodes:
with self.fuel_web.get_ssh_for_node(node) as remote:
gd.run_check_from_task(
remote=remote,
path=self.get_post_test(all_tasks, 'tools')[0]['cmd'])
self.env.make_snapshot('step_2_run_tasks_end_with_hosts')
@ -298,10 +305,12 @@ class NeutronVlanCephMongo(TestBasic):
# check mongo
if self.get_post_test(tasks, 'top-role-mongo'):
[gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(node),
path=self.get_post_test(tasks, 'tope-role-mongo')[0]['cmd'])
for node in mongo_nodes]
for node in mongo_nodes:
with self.fuel_web.get_ssh_for_node(node) as remote:
gd.run_check_from_task(
remote=remote,
path=self.get_post_test(tasks,
'tope-role-mongo')[0]['cmd'])
self.env.make_snapshot('step_3_run_top_role_mongo_single')
@ -356,10 +365,11 @@ class NeutronVlanCephMongo(TestBasic):
self.fuel_web.assert_task_success(task)
if self.get_post_test(tasks, 'top-role-primary-mongo'):
gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(primary_mongo.name),
path=self.get_post_test(
tasks, 'top-role-primary-mongo')[0]['cmd'])
with self.fuel_web.get_ssh_for_node(primary_mongo.name) as remote:
gd.run_check_from_task(
remote=remote,
path=self.get_post_test(
tasks, 'top-role-primary-mongo')[0]['cmd'])
self.env.make_snapshot('step_4_run_top_role_primary_mongo_single')
@ -403,9 +413,11 @@ class NeutronVlanCephMongo(TestBasic):
pre_cluster = self.get_pre_test(tasks, 'cluster')
post_cluster = self.get_post_test(tasks, 'cluster')
if pre_cluster:
gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(primary_controller.name),
path=pre_cluster[0]['cmd'])
with self.fuel_web.get_ssh_for_node(
primary_controller.name) as remote:
gd.run_check_from_task(
remote=remote,
path=pre_cluster[0]['cmd'])
res = self.fuel_web.client.put_deployment_tasks_for_cluster(
cluster_id, data=['cluster'],
@ -413,9 +425,12 @@ class NeutronVlanCephMongo(TestBasic):
self.fuel_web.assert_task_success(task=res)
if post_cluster:
gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(primary_controller.name),
path=post_cluster[0]['cmd'])
with self.fuel_web\
.get_ssh_for_node(primary_controller.name) as remote:
gd.run_check_from_task(
remote=remote,
path=post_cluster[0]['cmd'])
self.env.make_snapshot("step_5_run_cluster_primary_controller")
@test(depends_on=[step_5_run_cluster_primary_controller],
@ -458,10 +473,11 @@ class NeutronVlanCephMongo(TestBasic):
pre_virtual_ips = self.get_pre_test(tasks, 'virtual_ips')
post_virtual_ips = self.get_post_test(tasks, 'virtual_ips')
if pre_virtual_ips:
gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(
primary_controller.name),
path=pre_virtual_ips[0]['cmd'])
with self.fuel_web\
.get_ssh_for_node(primary_controller.name) as remote:
gd.run_check_from_task(
remote=remote,
path=pre_virtual_ips[0]['cmd'])
res = self.fuel_web.client.put_deployment_tasks_for_cluster(
cluster_id, data=['virtual_ips'],
@ -469,10 +485,11 @@ class NeutronVlanCephMongo(TestBasic):
self.fuel_web.assert_task_success(task=res)
if post_virtual_ips:
gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(
primary_controller.name),
path=post_virtual_ips[0]['cmd'])
with self.fuel_web\
.get_ssh_for_node(primary_controller.name) as remote:
gd.run_check_from_task(
remote=remote,
path=post_virtual_ips[0]['cmd'])
self.env.make_snapshot('step_6_run_virtual_ips_primary_controller')
@ -519,10 +536,11 @@ class NeutronVlanCephMongo(TestBasic):
post_cluster_haproxy = self.get_post_test(tasks, 'cluster-haproxy')
if pre_cluster_haproxy:
gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(
primary_controller.name),
path=pre_cluster_haproxy[0]['cmd'])
with self.fuel_web\
.get_ssh_for_node(primary_controller.name) as remote:
gd.run_check_from_task(
remote=remote,
path=pre_cluster_haproxy[0]['cmd'])
res = self.fuel_web.client.put_deployment_tasks_for_cluster(
cluster_id, data=['cluster-haproxy'],
@ -532,10 +550,11 @@ class NeutronVlanCephMongo(TestBasic):
self.fuel_web.assert_task_success(task=res)
if post_cluster_haproxy:
gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(
primary_controller.name),
path=post_cluster_haproxy[0]['cmd'])
with self.fuel_web\
.get_ssh_for_node(primary_controller.name) as remote:
gd.run_check_from_task(
remote=remote,
path=post_cluster_haproxy[0]['cmd'])
self.env.make_snapshot(
"step_7_run_cluster_haproxy_primary_controller")
@ -581,10 +600,11 @@ class NeutronVlanCephMongo(TestBasic):
post_openstack_haproxy = self.get_post_test(tasks, 'openstack-haproxy')
if pre_openstack_haproxy:
gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(
primary_controller.name),
path=pre_openstack_haproxy[0]['cmd'])
with self.fuel_web\
.get_ssh_for_node(primary_controller.name) as remote:
gd.run_check_from_task(
remote=remote,
path=pre_openstack_haproxy[0]['cmd'])
res = self.fuel_web.client.put_deployment_tasks_for_cluster(
cluster_id, data=['openstack-haproxy'],
@ -594,10 +614,11 @@ class NeutronVlanCephMongo(TestBasic):
self.fuel_web.assert_task_success(task=res)
if post_openstack_haproxy:
gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(
primary_controller.name),
path=post_openstack_haproxy[0]['cmd'])
with self.fuel_web\
.get_ssh_for_node(primary_controller.name) as remote:
gd.run_check_from_task(
remote=remote,
path=post_openstack_haproxy[0]['cmd'])
res = self.fuel_web.client.put_deployment_tasks_for_cluster(
cluster_id, data=['dns-server'],
@ -623,10 +644,11 @@ class NeutronVlanCephMongo(TestBasic):
for service in ['memcached', 'openstack-cinder', 'database'
'rabbitmq', 'keystone', 'glance']:
if self.get_post_test(tasks, service):
gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(
primary_controller.name),
path=self.get_post_test(tasks, service)[0]['cmd'])
with self.fuel_web\
.get_ssh_for_node(primary_controller.name) as remote:
gd.run_check_from_task(
remote=remote,
path=self.get_post_test(tasks, service)[0]['cmd'])
self.env.make_snapshot(
"step_8_run_openstack_haproxy_primary_controller")
@ -674,10 +696,11 @@ class NeutronVlanCephMongo(TestBasic):
pre_openstack_ctr = self.get_pre_test(tasks, 'openstack-controller')
post_openstack_ctr = self.get_post_test(tasks, 'openstack-controller')
if pre_openstack_ctr:
gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(
primary_controller.name),
path=pre_openstack_ctr[0]['cmd'])
with self.fuel_web\
.get_ssh_for_node(primary_controller.name) as remote:
gd.run_check_from_task(
remote=remote,
path=pre_openstack_ctr[0]['cmd'])
res = self.fuel_web.client.put_deployment_tasks_for_cluster(
cluster_id, data=['openstack-controller'],
@ -686,10 +709,11 @@ class NeutronVlanCephMongo(TestBasic):
self.fuel_web.assert_task_success(task=res)
if post_openstack_ctr:
gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(
primary_controller.name),
path=post_openstack_ctr[0]['cmd'])
with self.fuel_web\
.get_ssh_for_node(primary_controller.name) as remote:
gd.run_check_from_task(
remote=remote,
path=post_openstack_ctr[0]['cmd'])
self.env.make_snapshot(
"step_9_run_openstack_primary_controller")
@ -745,10 +769,11 @@ class NeutronVlanCephMongo(TestBasic):
pre_net = self.get_pre_test(tasks, 'openstack-network')
if pre_net:
gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(
primary_controller.name),
path=pre_net[0]['cmd'])
with self.fuel_web\
.get_ssh_for_node(primary_controller.name) as remote:
gd.run_check_from_task(
remote=remote,
path=pre_net[0]['cmd'])
res = self.fuel_web.client.put_deployment_tasks_for_cluster(
cluster_id, data=[task['id'] for task in tasks],
@ -770,10 +795,11 @@ class NeutronVlanCephMongo(TestBasic):
for task in expected_task_list:
if self.get_post_test(tasks, task):
gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(
primary_controller.name),
path=self.get_post_test(tasks, task)[0]['cmd'])
with self.fuel_web\
.get_ssh_for_node(primary_controller.name) as remote:
gd.run_check_from_task(
remote=remote,
path=self.get_post_test(tasks, task)[0]['cmd'])
try:
self.fuel_web.run_ostf(cluster_id=cluster_id,
test_sets=['sanity'],
@ -834,10 +860,11 @@ class NeutronVlanCephMongo(TestBasic):
pre_cluster = self.get_pre_test(tasks, 'cluster')
post_cluster = self.get_post_test(tasks, 'cluster')
if pre_cluster:
[gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(node),
path=pre_cluster[0]['cmd'])
for node in ['slave-02', 'slave-03']]
for node in ['slave-02', 'slave-03']:
with self.fuel_web.get_ssh_for_node(node) as remote:
gd.run_check_from_task(
remote=remote,
path=pre_cluster[0]['cmd'])
res = self.fuel_web.client.put_deployment_tasks_for_cluster(
cluster_id, data=['cluster'],
@ -845,10 +872,12 @@ class NeutronVlanCephMongo(TestBasic):
self.fuel_web.assert_task_success(task=res)
if post_cluster:
[gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(node),
path=post_cluster[0]['cmd'])
for node in ['slave-02', 'slave-03']]
for node in ['slave-02', 'slave-03']:
with self.fuel_web.get_ssh_for_node(node) as remote:
gd.run_check_from_task(
remote=remote,
path=post_cluster[0]['cmd'])
self.env.make_snapshot("step_11_run_cluster_controller")
@test(depends_on=[step_11_run_cluster_controller],
@ -895,10 +924,11 @@ class NeutronVlanCephMongo(TestBasic):
pre_virtual_ips = self.get_pre_test(tasks, 'virtual_ips')
post_virtual_ips = self.get_post_test(tasks, 'virtual_ips')
if pre_virtual_ips:
[gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(node),
path=pre_virtual_ips[0]['cmd'])
for node in ['slave-02', 'slave-03']]
for node in ['slave-02', 'slave-03']:
with self.fuel_web.get_ssh_for_node(node) as remote:
gd.run_check_from_task(
remote=remote,
path=pre_virtual_ips[0]['cmd'])
res = self.fuel_web.client.put_deployment_tasks_for_cluster(
cluster_id, data=['virtual_ips'],
@ -907,10 +937,11 @@ class NeutronVlanCephMongo(TestBasic):
self.fuel_web.assert_task_success(task=res)
if post_virtual_ips:
[gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(node),
path=post_virtual_ips[0]['cmd'])
for node in ['slave-02', 'slave-03']]
for node in ['slave-02', 'slave-03']:
with self.fuel_web.get_ssh_for_node(node) as remote:
gd.run_check_from_task(
remote=remote,
path=post_virtual_ips[0]['cmd'])
self.env.make_snapshot("step_12_run_virtual_ips_controller")
@ -958,10 +989,11 @@ class NeutronVlanCephMongo(TestBasic):
post_cluster_haproxy = self.get_post_test(tasks, 'cluster-haproxy')
if pre_cluster_haproxy:
[gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(node),
path=pre_cluster_haproxy[0]['cmd'])
for node in ['slave-02', 'slave-3']]
for node in ['slave-02', 'slave-3']:
with self.fuel_web.get_ssh_for_node(node) as remote:
gd.run_check_from_task(
remote=remote,
path=pre_cluster_haproxy[0]['cmd'])
res = self.fuel_web.client.put_deployment_tasks_for_cluster(
cluster_id, data=['cluster-haproxy'],
@ -971,10 +1003,11 @@ class NeutronVlanCephMongo(TestBasic):
self.fuel_web.assert_task_success(task=res)
if post_cluster_haproxy:
[gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(node),
path=post_cluster_haproxy[0]['cmd'])
for node in ['slave-02', 'slave-03']]
for node in ['slave-02', 'slave-03']:
with self.fuel_web.get_ssh_for_node(node) as remote:
gd.run_check_from_task(
remote=remote,
path=post_cluster_haproxy[0]['cmd'])
self.env.make_snapshot(
"step_13_run_cluster_haproxy_controller")
@ -1024,10 +1057,11 @@ class NeutronVlanCephMongo(TestBasic):
pre_openstack_haproxy = self.get_pre_test(tasks, 'openstack-haproxy')
post_openstack_haproxy = self.get_post_test(tasks, 'openstack-haproxy')
if pre_openstack_haproxy:
[gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(node),
path=pre_openstack_haproxy[0]['cmd'])
for node in ['slave-02', 'slave-03']]
for node in ['slave-02', 'slave-03']:
with self.fuel_web.get_ssh_for_node(node) as remote:
gd.run_check_from_task(
remote=remote,
path=pre_openstack_haproxy[0]['cmd'])
res = self.fuel_web.client.put_deployment_tasks_for_cluster(
cluster_id, data=['openstack-haproxy'],
@ -1057,17 +1091,20 @@ class NeutronVlanCephMongo(TestBasic):
logger.debug('res info is {0}'.format(res))
self.fuel_web.assert_task_success(task=res)
if post_openstack_haproxy:
[gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(node),
path=post_openstack_haproxy[0]['cmd'])
for node in ['slave-01']]
for node in ['slave-01']:
with self.fuel_web.get_ssh_for_node(node) as remote:
gd.run_check_from_task(
remote=remote,
path=post_openstack_haproxy[0]['cmd'])
for service in ['memcached', 'openstack-cinder'
'rabbitmq', 'keystone', 'glance', 'database']:
if self.get_post_test(tasks, service):
[gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(node),
path=self.get_post_test(tasks, service)[0]['cmd'])
for node in ['slave-02', 'slave-03']]
for node in ['slave-02', 'slave-03']:
with self.fuel_web.get_ssh_for_node(node) as remote:
gd.run_check_from_task(
remote=remote,
path=self.get_post_test(tasks, service)[0]['cmd'])
self.env.make_snapshot("step_14_run_openstack_haproxy_controller")
@ -1114,10 +1151,11 @@ class NeutronVlanCephMongo(TestBasic):
pre_openstack_ctr = self.get_pre_test(tasks, 'openstack-controller')
post_openstack_ctr = self.get_post_test(tasks, 'openstack-controller')
if pre_openstack_ctr:
[gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(node),
path=pre_openstack_ctr[0]['cmd'])
for node in ['slave-02', 'slave-01']]
for node in ['slave-02', 'slave-01']:
with self.fuel_web.get_ssh_for_node(node) as remote:
gd.run_check_from_task(
remote=remote,
path=pre_openstack_ctr[0]['cmd'])
res = self.fuel_web.client.put_deployment_tasks_for_cluster(
cluster_id, data=['openstack-controller'],
@ -1126,10 +1164,11 @@ class NeutronVlanCephMongo(TestBasic):
self.fuel_web.assert_task_success(task=res)
if post_openstack_ctr:
[gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(node),
path=post_openstack_ctr[0]['cmd'])
for node in ['slave-02', 'slave-03']]
for node in ['slave-02', 'slave-03']:
with self.fuel_web.get_ssh_for_node(node) as remote:
gd.run_check_from_task(
remote=remote,
path=post_openstack_ctr[0]['cmd'])
self.env.make_snapshot("step_15_run_openstack_controller")
@ -1183,10 +1222,11 @@ class NeutronVlanCephMongo(TestBasic):
pre_net = self.get_pre_test(tasks, 'openstack-network')
if pre_net:
[gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(node),
path=pre_net[0]['cmd'])
for node in ['slave-02', 'slave-03']]
for node in ['slave-02', 'slave-03']:
with self.fuel_web.get_ssh_for_node(node) as remote:
gd.run_check_from_task(
remote=remote,
path=pre_net[0]['cmd'])
res = self.fuel_web.client.put_deployment_tasks_for_cluster(
cluster_id, data=[task['id'] for task in tasks],
@ -1207,10 +1247,11 @@ class NeutronVlanCephMongo(TestBasic):
for task in expected_task_list:
if self.get_post_test(tasks, task):
[gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(node),
path=self.get_post_test(tasks, task)[0]['cmd'])
for node in ['slave-02', 'slave-03']]
for node in ['slave-02', 'slave-03']:
with self.fuel_web.get_ssh_for_node(node) as remote:
gd.run_check_from_task(
remote=remote,
path=self.get_post_test(tasks, task)[0]['cmd'])
try:
self.fuel_web.run_ostf(cluster_id=cluster_id,
test_sets=['sanity'],
@ -1261,10 +1302,11 @@ class NeutronVlanCephMongo(TestBasic):
pre_top_compute = self.get_pre_test(tasks, 'top-role-compute')
post_top_compute = self.get_post_test(tasks, 'top-role-compute')
if pre_top_compute:
[gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(node),
path=pre_top_compute[0]['cmd'])
for node in ['slave-04', 'slave-05']]
for node in ['slave-04', 'slave-05']:
with self.fuel_web.get_ssh_for_node(node) as remote:
gd.run_check_from_task(
remote=remote,
path=pre_top_compute[0]['cmd'])
res = self.fuel_web.client.put_deployment_tasks_for_cluster(
cluster_id, data=['top-role-compute'],
@ -1273,19 +1315,21 @@ class NeutronVlanCephMongo(TestBasic):
self.fuel_web.assert_task_success(task=res)
if post_top_compute:
[gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(node),
path=post_top_compute[0]['cmd'])
for node in ['slave-04', 'slave-05']]
for node in ['slave-04', 'slave-05']:
with self.fuel_web.get_ssh_for_node(node) as remote:
gd.run_check_from_task(
remote=remote,
path=post_top_compute[0]['cmd'])
for service in ['openstack-network-compute', 'ceilometer-compute']:
pre_test = self.get_pre_test(tasks, service)
post_test = self.get_post_test(tasks, service)
if pre_test:
[gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(node),
path=pre_test[0]['cmd'])
for node in ['slave-04', 'slave-05']]
for node in ['slave-04', 'slave-05']:
with self.fuel_web.get_ssh_for_node(node) as remote:
gd.run_check_from_task(
remote=remote,
path=pre_test[0]['cmd'])
res = self.fuel_web.client.put_deployment_tasks_for_cluster(
cluster_id, data=[service],
@ -1295,10 +1339,11 @@ class NeutronVlanCephMongo(TestBasic):
self.fuel_web.assert_task_success(task=res)
if post_test:
[gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(node),
path=post_test[0]['cmd'])
for node in ['slave-04', 'slave-05']]
for node in ['slave-04', 'slave-05']:
with self.fuel_web.get_ssh_for_node(node) as remote:
gd.run_check_from_task(
remote=remote,
path=post_test[0]['cmd'])
self.env.make_snapshot("step_17_run_top_role_compute")
@ -1340,10 +1385,11 @@ class NeutronVlanCephMongo(TestBasic):
pre_top_ceph = self.get_pre_test(tasks, 'top-role-ceph-osd')
post_top_ceph = self.get_post_test(tasks, 'top-role-ceph-osd')
if pre_top_ceph:
[gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(node),
path=pre_top_ceph[0]['cmd'])
for node in ['slave-04', 'slave-05']]
for node in ['slave-04', 'slave-05']:
with self.fuel_web.get_ssh_for_node(node) as remote:
gd.run_check_from_task(
remote=remote,
path=pre_top_ceph[0]['cmd'])
res = self.fuel_web.client.put_deployment_tasks_for_cluster(
cluster_id, data=['top-role-ceph-osd'],
@ -1352,10 +1398,11 @@ class NeutronVlanCephMongo(TestBasic):
self.fuel_web.assert_task_success(task=res)
if post_top_ceph:
[gd.run_check_from_task(
remote=self.fuel_web.get_ssh_for_node(node),
path=post_top_ceph[0]['cmd'])
for node in ['slave-02', 'slave-03']]
for node in ['slave-02', 'slave-03']:
with self.fuel_web.get_ssh_for_node(node) as remote:
gd.run_check_from_task(
remote=remote,
path=post_top_ceph[0]['cmd'])
self.env.make_snapshot("step_18_run_top_role_ceph_osd")

View File

@ -49,14 +49,13 @@ class ContrailPlugin(TestBasic):
_pack_path = [CONTRAIL_PLUGIN_PACK_UB_PATH, CONTRAIL_PLUGIN_PACK_CEN_PATH]
def _upload_contrail_packages(self):
def _upload_contrail_packages(self, remote):
for pack in self._pack_path:
node_ssh = self.env.d_env.get_admin_remote()
if os.path.splitext(pack)[1] in [".deb", ".rpm"]:
pkg_name = os.path.basename(pack)
logger.debug("Uploading package {0} "
"to master node".format(pkg_name))
node_ssh.upload(pack, self._pack_copy_path)
remote.upload(pack, self._pack_copy_path)
else:
logger.error('Failed to upload file')
@ -86,21 +85,23 @@ class ContrailPlugin(TestBasic):
self.env.revert_snapshot("ready_with_%d_slaves" % slaves)
# copy plugin to the master node
checkers.upload_tarball(
self.env.d_env.get_admin_remote(),
CONTRAIL_PLUGIN_PATH, '/var')
with self.env.d_env.get_admin_remote() as remote:
# install plugin
checkers.install_plugin_check_code(
self.env.d_env.get_admin_remote(),
plugin=os.path.basename(CONTRAIL_PLUGIN_PATH))
# copy plugin to the master node
checkers.upload_tarball(
remote,
CONTRAIL_PLUGIN_PATH, '/var')
# copy additional packages to the master node
self._upload_contrail_packages()
# install plugin
checkers.install_plugin_check_code(
remote,
plugin=os.path.basename(CONTRAIL_PLUGIN_PATH))
# install packages
self._install_packages(self.env.d_env.get_admin_remote())
# copy additional packages to the master node
self._upload_contrail_packages(remote)
# install packages
self._install_packages(remote)
# prepare fuel
self._assign_net_provider(pub_net)

View File

@ -56,17 +56,16 @@ class TestElasticsearchPlugin(TestBasic):
"""
self.env.revert_snapshot("ready_with_3_slaves")
# copy plugin to the master node
with self.env.d_env.get_admin_remote() as remote:
# copy plugin to the master node
checkers.upload_tarball(
remote,
ELASTICSEARCH_KIBANA_PLUGIN_PATH, '/var')
checkers.upload_tarball(
self.env.d_env.get_admin_remote(),
ELASTICSEARCH_KIBANA_PLUGIN_PATH, '/var')
# install plugin
checkers.install_plugin_check_code(
self.env.d_env.get_admin_remote(),
plugin=os.path.basename(ELASTICSEARCH_KIBANA_PLUGIN_PATH))
# install plugin
checkers.install_plugin_check_code(
remote,
plugin=os.path.basename(ELASTICSEARCH_KIBANA_PLUGIN_PATH))
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,

View File

@ -104,17 +104,16 @@ class EMCPlugin(TestBasic):
"""
self.env.revert_snapshot("ready_with_5_slaves")
# copy plugin to the master node
with self.env.d_env.get_admin_remote() as remote:
# copy plugin to the master node
checkers.upload_tarball(
remote,
CONF.EMC_PLUGIN_PATH, '/var')
checkers.upload_tarball(
self.env.d_env.get_admin_remote(),
CONF.EMC_PLUGIN_PATH, '/var')
# install plugin
checkers.install_plugin_check_code(
self.env.d_env.get_admin_remote(),
plugin=os.path.basename(CONF.EMC_PLUGIN_PATH))
# install plugin
checkers.install_plugin_check_code(
remote,
plugin=os.path.basename(CONF.EMC_PLUGIN_PATH))
settings = None
@ -200,6 +199,10 @@ class EMCPlugin(TestBasic):
cinder_volume_comps = [self.check_service(compute, "cinder-volume")
for compute in compute_remotes]
# closing connections
[remote.clear() for remote in controller_remotes]
[remote.clear() for remote in compute_remotes]
asserts.assert_equal(sum(cinder_volume_comps), 0,
"Cluster has active cinder-volume on compute")

View File

@ -56,15 +56,17 @@ class ExamplePlugin(TestBasic):
# copy plugin to the master node
checkers.check_archive_type(EXAMPLE_PLUGIN_PATH)
checkers.upload_tarball(
self.env.d_env.get_admin_remote(),
EXAMPLE_PLUGIN_PATH, '/var')
# install plugin
with self.env.d_env.get_admin_remote() as remote:
checkers.upload_tarball(
remote,
EXAMPLE_PLUGIN_PATH, '/var')
checkers.install_plugin_check_code(
self.env.d_env.get_admin_remote(),
plugin=os.path.basename(EXAMPLE_PLUGIN_PATH))
# install plugin
checkers.install_plugin_check_code(
remote,
plugin=os.path.basename(EXAMPLE_PLUGIN_PATH))
segment_type = NEUTRON_SEGMENT['vlan']
cluster_id = self.fuel_web.create_cluster(
@ -282,14 +284,16 @@ class ExamplePlugin(TestBasic):
# copy plugin to the master node
checkers.check_archive_type(EXAMPLE_PLUGIN_PATH)
checkers.upload_tarball(
self.env.d_env.get_admin_remote(), EXAMPLE_PLUGIN_PATH, '/var')
# install plugin
with self.env.d_env.get_admin_remote() as remote:
checkers.upload_tarball(
remote, EXAMPLE_PLUGIN_PATH, '/var')
checkers.install_plugin_check_code(
self.env.d_env.get_admin_remote(),
plugin=os.path.basename(EXAMPLE_PLUGIN_PATH))
# install plugin
checkers.install_plugin_check_code(
remote,
plugin=os.path.basename(EXAMPLE_PLUGIN_PATH))
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
@ -373,14 +377,16 @@ class ExamplePlugin(TestBasic):
# copy plugin to the master node
checkers.check_archive_type(EXAMPLE_PLUGIN_PATH)
checkers.upload_tarball(
self.env.d_env.get_admin_remote(), EXAMPLE_PLUGIN_PATH, '/var')
# install plugin
with self.env.d_env.get_admin_remote() as remote:
checkers.upload_tarball(
remote, EXAMPLE_PLUGIN_PATH, '/var')
checkers.install_plugin_check_code(
self.env.d_env.get_admin_remote(),
plugin=os.path.basename(EXAMPLE_PLUGIN_PATH))
# install plugin
checkers.install_plugin_check_code(
remote,
plugin=os.path.basename(EXAMPLE_PLUGIN_PATH))
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,

View File

@ -67,16 +67,15 @@ class GlusterfsPlugin(TestBasic):
"""
self.env.revert_snapshot("ready_with_3_slaves")
# copy plugin to the master node
with self.env.d_env.get_admin_remote() as remote:
# copy plugin to the master node
checkers.upload_tarball(
remote, GLUSTER_PLUGIN_PATH, '/var')
checkers.upload_tarball(
self.env.d_env.get_admin_remote(), GLUSTER_PLUGIN_PATH, '/var')
# install plugin
checkers.install_plugin_check_code(
self.env.d_env.get_admin_remote(),
plugin=os.path.basename(GLUSTER_PLUGIN_PATH))
# install plugin
checkers.install_plugin_check_code(
remote,
plugin=os.path.basename(GLUSTER_PLUGIN_PATH))
settings = None
@ -113,10 +112,11 @@ class GlusterfsPlugin(TestBasic):
for node in ('slave-01', 'slave-03'):
_ip = self.fuel_web.get_nailgun_node_by_name(node)['ip']
self.check_glusterfs_conf(
remote=self.env.d_env.get_ssh_to_remote(_ip),
path='/etc/cinder/glusterfs',
gfs_endpoint=GLUSTER_CLUSTER_ENDPOINT)
with self.env.d_env.get_ssh_to_remote(_ip) as remote:
self.check_glusterfs_conf(
remote=remote,
path='/etc/cinder/glusterfs',
gfs_endpoint=GLUSTER_CLUSTER_ENDPOINT)
self.fuel_web.verify_network(cluster_id)
@ -153,16 +153,16 @@ class GlusterfsPlugin(TestBasic):
"""
self.env.revert_snapshot("ready_with_5_slaves")
# copy plugin to the master node
with self.env.d_env.get_admin_remote() as remote:
# copy plugin to the master node
checkers.upload_tarball(
remote, GLUSTER_PLUGIN_PATH, '/var')
checkers.upload_tarball(
self.env.d_env.get_admin_remote(), GLUSTER_PLUGIN_PATH, '/var')
# install plugin
# install plugin
checkers.install_plugin_check_code(
self.env.d_env.get_admin_remote(),
plugin=os.path.basename(GLUSTER_PLUGIN_PATH))
checkers.install_plugin_check_code(
remote,
plugin=os.path.basename(GLUSTER_PLUGIN_PATH))
settings = None
@ -198,10 +198,11 @@ class GlusterfsPlugin(TestBasic):
self.fuel_web.deploy_cluster_wait(cluster_id)
_ip = self.fuel_web.get_nailgun_node_by_name("slave-03")['ip']
self.check_glusterfs_conf(
remote=self.env.d_env.get_ssh_to_remote(_ip),
path='/etc/cinder/glusterfs',
gfs_endpoint=GLUSTER_CLUSTER_ENDPOINT)
with self.env.d_env.get_ssh_to_remote(_ip) as remote:
self.check_glusterfs_conf(
remote=remote,
path='/etc/cinder/glusterfs',
gfs_endpoint=GLUSTER_CLUSTER_ENDPOINT)
self.fuel_web.verify_network(cluster_id)
@ -220,10 +221,11 @@ class GlusterfsPlugin(TestBasic):
for node in ('slave-03', 'slave-04', 'slave-05'):
_ip = self.fuel_web.get_nailgun_node_by_name(node)['ip']
self.check_glusterfs_conf(
remote=self.env.d_env.get_ssh_to_remote(_ip),
path='/etc/cinder/glusterfs',
gfs_endpoint=GLUSTER_CLUSTER_ENDPOINT)
with self.env.d_env.get_ssh_to_remote(_ip) as remote:
self.check_glusterfs_conf(
remote=remote,
path='/etc/cinder/glusterfs',
gfs_endpoint=GLUSTER_CLUSTER_ENDPOINT)
self.fuel_web.run_ostf(
cluster_id=cluster_id)

View File

@ -113,16 +113,16 @@ class LbaasPlugin(TestBasic):
"""
self.env.revert_snapshot("ready_with_3_slaves")
# copy plugin to the master node
with self.env.d_env.get_admin_remote() as remote:
# copy plugin to the master node
checkers.upload_tarball(
remote, LBAAS_PLUGIN_PATH, '/var')
checkers.upload_tarball(
self.env.d_env.get_admin_remote(), LBAAS_PLUGIN_PATH, '/var')
# install plugin
# install plugin
checkers.install_plugin_check_code(
self.env.d_env.get_admin_remote(),
plugin=os.path.basename(LBAAS_PLUGIN_PATH))
checkers.install_plugin_check_code(
remote,
plugin=os.path.basename(LBAAS_PLUGIN_PATH))
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
@ -198,16 +198,15 @@ class LbaasPlugin(TestBasic):
"""
self.env.revert_snapshot("ready_with_3_slaves")
# copy plugin to the master node
with self.env.d_env.get_admin_remote() as remote:
# copy plugin to the master node
checkers.upload_tarball(
remote, LBAAS_PLUGIN_PATH, '/var')
checkers.upload_tarball(
self.env.d_env.get_admin_remote(), LBAAS_PLUGIN_PATH, '/var')
# install plugin
checkers.install_plugin_check_code(
self.env.d_env.get_admin_remote(),
plugin=os.path.basename(LBAAS_PLUGIN_PATH))
# install plugin
checkers.install_plugin_check_code(
remote,
plugin=os.path.basename(LBAAS_PLUGIN_PATH))
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,

View File

@ -58,29 +58,28 @@ class TestLmaCollectorPlugin(TestBasic):
"""
self.env.revert_snapshot("ready_with_5_slaves")
# copy plugins to the master node
with self.env.d_env.get_admin_remote() as remote:
# copy plugins to the master node
checkers.upload_tarball(
remote,
conf.LMA_COLLECTOR_PLUGIN_PATH, "/var")
checkers.upload_tarball(
remote,
conf.ELASTICSEARCH_KIBANA_PLUGIN_PATH, "/var")
checkers.upload_tarball(
remote,
conf.INFLUXDB_GRAFANA_PLUGIN_PATH, "/var")
checkers.upload_tarball(
self.env.d_env.get_admin_remote(),
conf.LMA_COLLECTOR_PLUGIN_PATH, "/var")
checkers.upload_tarball(
self.env.d_env.get_admin_remote(),
conf.ELASTICSEARCH_KIBANA_PLUGIN_PATH, "/var")
checkers.upload_tarball(
self.env.d_env.get_admin_remote(),
conf.INFLUXDB_GRAFANA_PLUGIN_PATH, "/var")
# install plugins
checkers.install_plugin_check_code(
self.env.d_env.get_admin_remote(),
plugin=os.path.basename(conf.LMA_COLLECTOR_PLUGIN_PATH))
checkers.install_plugin_check_code(
self.env.d_env.get_admin_remote(),
plugin=os.path.basename(conf.ELASTICSEARCH_KIBANA_PLUGIN_PATH))
checkers.install_plugin_check_code(
self.env.d_env.get_admin_remote(),
plugin=os.path.basename(conf.INFLUXDB_GRAFANA_PLUGIN_PATH))
# install plugins
checkers.install_plugin_check_code(
remote,
plugin=os.path.basename(conf.LMA_COLLECTOR_PLUGIN_PATH))
checkers.install_plugin_check_code(
remote,
plugin=os.path.basename(conf.ELASTICSEARCH_KIBANA_PLUGIN_PATH))
checkers.install_plugin_check_code(
remote,
plugin=os.path.basename(conf.INFLUXDB_GRAFANA_PLUGIN_PATH))
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,

View File

@ -165,11 +165,13 @@ class ZabbixPlugin(TestBasic):
"""
self.env.revert_snapshot("ready_with_5_slaves")
checkers.upload_tarball(
self.env.d_env.get_admin_remote(), conf.ZABBIX_PLUGIN_PATH, "/var")
checkers.install_plugin_check_code(
self.env.d_env.get_admin_remote(),
plugin=os.path.basename(conf.ZABBIX_PLUGIN_PATH))
with self.env.d_env.get_admin_remote() as remote:
checkers.upload_tarball(
remote, conf.ZABBIX_PLUGIN_PATH, "/var")
checkers.install_plugin_check_code(
remote,
plugin=os.path.basename(conf.ZABBIX_PLUGIN_PATH))
settings = None
if conf.NEUTRON_ENABLE:
@ -251,13 +253,14 @@ class ZabbixPlugin(TestBasic):
"""
self.env.revert_snapshot("ready_with_5_slaves")
for plugin in [conf.ZABBIX_PLUGIN_PATH,
conf.ZABBIX_SNMP_PLUGIN_PATH]:
checkers.upload_tarball(
self.env.d_env.get_admin_remote(), plugin, "/var")
checkers.install_plugin_check_code(
self.env.d_env.get_admin_remote(),
plugin=os.path.basename(plugin))
with self.env.d_env.get_admin_remote() as remote:
for plugin in [conf.ZABBIX_PLUGIN_PATH,
conf.ZABBIX_SNMP_PLUGIN_PATH]:
checkers.upload_tarball(
remote, plugin, "/var")
checkers.install_plugin_check_code(
remote,
plugin=os.path.basename(plugin))
settings = None
@ -373,14 +376,15 @@ class ZabbixPlugin(TestBasic):
"""
self.env.revert_snapshot("ready_with_5_slaves")
for plugin in [conf.ZABBIX_PLUGIN_PATH,
conf.ZABBIX_SNMP_PLUGIN_PATH,
conf.ZABBIX_SNMP_EMC_PLUGIN_PATH]:
checkers.upload_tarball(
self.env.d_env.get_admin_remote(), plugin, "/var")
checkers.install_plugin_check_code(
self.env.d_env.get_admin_remote(),
plugin=os.path.basename(plugin))
with self.env.d_env.get_admin_remote() as remote:
for plugin in [conf.ZABBIX_PLUGIN_PATH,
conf.ZABBIX_SNMP_PLUGIN_PATH,
conf.ZABBIX_SNMP_EMC_PLUGIN_PATH]:
checkers.upload_tarball(
remote, plugin, "/var")
checkers.install_plugin_check_code(
remote,
plugin=os.path.basename(plugin))
settings = None
@ -463,14 +467,15 @@ class ZabbixPlugin(TestBasic):
"""
self.env.revert_snapshot("ready_with_5_slaves")
for plugin in [conf.ZABBIX_PLUGIN_PATH,
conf.ZABBIX_SNMP_PLUGIN_PATH,
conf.ZABBIX_SNMP_EXTREME_PLUGIN_PATH]:
checkers.upload_tarball(
self.env.d_env.get_admin_remote(), plugin, "/var")
checkers.install_plugin_check_code(
self.env.d_env.get_admin_remote(),
plugin=os.path.basename(plugin))
with self.env.d_env.get_admin_remote() as remote:
for plugin in [conf.ZABBIX_PLUGIN_PATH,
conf.ZABBIX_SNMP_PLUGIN_PATH,
conf.ZABBIX_SNMP_EXTREME_PLUGIN_PATH]:
checkers.upload_tarball(
remote, plugin, "/var")
checkers.install_plugin_check_code(
remote,
plugin=os.path.basename(plugin))
settings = None

View File

@ -80,8 +80,8 @@ class TestAdminNode(TestBasic):
"""
self.env.revert_snapshot("empty")
ps_output = self.env.d_env.get_admin_remote().execute(
'ps ax')['stdout']
with self.env.d_env.get_admin_remote() as remote:
ps_output = remote.execute('ps ax')['stdout']
astute_master = filter(lambda x: 'astute master' in x, ps_output)
logger.info("Found astute processes: %s" % astute_master)
assert_equal(len(astute_master), 1)
@ -521,14 +521,14 @@ class FuelMasterMigrate(TestBasic):
wait(lambda: icmp_ping(self.env.get_admin_node_ip()),
timeout=60 * 15, timeout_msg='Master node has not become online '
'after rebooting')
wait(lambda: self.env.d_env.get_admin_remote(), timeout=60 * 15,
timeout_msg='Master node has not become online after rebooting')
checkers.wait_phrase_in_log(self.env.d_env.get_admin_remote(),
60 * 90, interval=0.1,
phrase='Stop network and up with '
'new settings',
log_path='/var/log/fuel-migrate.log')
self.env.d_env.nodes().admin.await(network_name=self.d_env.admin_net,
timeout=60 * 15)
with self.env.d_env.get_admin_remote() as remote:
checkers.wait_phrase_in_log(remote,
60 * 90, interval=0.1,
phrase='Stop network and up with '
'new settings',
log_path='/var/log/fuel-migrate.log')
logger.info('Shutting down network')
wait(lambda: not icmp_ping(self.env.get_admin_node_ip()),
@ -538,7 +538,8 @@ class FuelMasterMigrate(TestBasic):
timeout=60 * 15,
timeout_msg='Master node has not become online shutting network')
wait(lambda: self.env.d_env.get_admin_remote(), timeout=60 * 10)
self.env.d_env.nodes().admin.await(network_name=self.d_env.admin_net,
timeout=60 * 10)
logger.info("Check containers")
self.env.docker_actions.wait_for_ready_containers(timeout=60 * 30)

View File

@ -120,8 +120,8 @@ class CephCompact(TestBasic):
data=image_data)
volume = os_conn.create_volume(size=1, image_id=image.id)
remote = self.fuel_web.get_ssh_for_node('slave-01')
rbd_list = ceph.get_rbd_images_list(remote, 'volumes')
with self.fuel_web.get_ssh_for_node('slave-01') as remote:
rbd_list = ceph.get_rbd_images_list(remote, 'volumes')
for item in rbd_list:
if volume.id in item['image']:
@ -342,17 +342,18 @@ class CephHA(TestBasic):
# create image
devops_node = self.fuel_web.get_nailgun_primary_node(
self.env.d_env.nodes().slaves[0])
slave = self.fuel_web.get_ssh_for_node(devops_node.name)
if settings.OPENSTACK_RELEASE_CENTOS in settings.OPENSTACK_RELEASE:
slave.execute(". openrc; glance image-create --name"
" 'custom-image' --disk-format qcow2"
" --container-format bare"
" --file /opt/vm/cirros-x86_64-disk.img")
else:
slave.execute(". openrc; glance image-create --name"
" 'custom-image' --disk-format qcow2"
" --container-format bare --file"
" /usr/share/cirros-testvm/cirros-x86_64-disk.img")
with self.fuel_web.get_ssh_for_node(devops_node.name) as slave:
if settings.OPENSTACK_RELEASE_CENTOS in settings.OPENSTACK_RELEASE:
slave.execute(". openrc; glance image-create --name"
" 'custom-image' --disk-format qcow2"
" --container-format bare"
" --file /opt/vm/cirros-x86_64-disk.img")
else:
slave.execute(
". openrc; glance image-create --name"
" 'custom-image' --disk-format qcow2"
" --container-format bare --file"
" /usr/share/cirros-testvm/cirros-x86_64-disk.img")
image = os_conn.get_image_by_name('custom-image')
@ -478,10 +479,10 @@ class CephRadosGW(TestBasic):
logger.info("Check all HAProxy backends on {}".format(
node['meta']['system']['fqdn']))
haproxy_status = checkers.check_haproxy_backend(remote)
remote.clear()
assert_equal(haproxy_status['exit_code'], 1,
"HAProxy backends are DOWN. {0}".format(
haproxy_status))
remote.clear()
self.fuel_web.check_ceph_status(cluster_id)
@ -490,12 +491,11 @@ class CephRadosGW(TestBasic):
test_sets=['ha', 'smoke', 'sanity'])
# Check the radosqw daemon is started
remote = self.fuel_web.get_ssh_for_node('slave-01')
radosgw_started = lambda: len(remote.check_call(
'ps aux | grep "/usr/bin/radosgw -n '
'client.radosgw.gateway"')['stdout']) == 3
assert_true(radosgw_started(), 'radosgw daemon started')
remote.clear()
with self.fuel_web.get_ssh_for_node('slave-01') as remote:
radosgw_started = lambda: len(remote.check_call(
'ps aux | grep "/usr/bin/radosgw -n '
'client.radosgw.gateway"')['stdout']) == 3
assert_true(radosgw_started(), 'radosgw daemon started')
self.env.make_snapshot("ceph_rados_gw")
@ -829,9 +829,10 @@ class CheckCephPartitionsAfterReboot(TestBasic):
self.show_step(7, node)
logger.info("Get partitions for {node}".format(node=node))
_ip = self.fuel_web.get_nailgun_node_by_name(node)['ip']
before_reboot_partitions = [checkers.get_ceph_partitions(
self.env.d_env.get_ssh_to_remote(_ip),
"/dev/vd{p}".format(p=part)) for part in ["b", "c"]]
with self.env.d_env.get_ssh_to_remote(_ip) as remote:
before_reboot_partitions = [checkers.get_ceph_partitions(
remote,
"/dev/vd{p}".format(p=part)) for part in ["b", "c"]]
self.show_step(8, node)
logger.info("Warm-restart nodes")
@ -843,9 +844,10 @@ class CheckCephPartitionsAfterReboot(TestBasic):
node=node
))
_ip = self.fuel_web.get_nailgun_node_by_name(node)['ip']
after_reboot_partitions = [checkers.get_ceph_partitions(
self.env.d_env.get_ssh_to_remote(_ip),
"/dev/vd{p}".format(p=part)) for part in ["b", "c"]]
with self.env.d_env.get_ssh_to_remote(_ip) as remote:
after_reboot_partitions = [checkers.get_ceph_partitions(
remote,
"/dev/vd{p}".format(p=part)) for part in ["b", "c"]]
if before_reboot_partitions != after_reboot_partitions:
logger.info("Partitions don`t match")
@ -864,9 +866,10 @@ class CheckCephPartitionsAfterReboot(TestBasic):
self.show_step(12, node)
_ip = self.fuel_web.get_nailgun_node_by_name(node)['ip']
after_reboot_partitions = [checkers.get_ceph_partitions(
self.env.d_env.get_ssh_to_remote(_ip),
"/dev/vd{p}".format(p=part)) for part in ["b", "c"]]
with self.env.d_env.get_ssh_to_remote(_ip) as remote:
after_reboot_partitions = [checkers.get_ceph_partitions(
remote,
"/dev/vd{p}".format(p=part)) for part in ["b", "c"]]
if before_reboot_partitions != after_reboot_partitions:
logger.info("Partitions don`t match")

View File

@ -69,23 +69,23 @@ class CommandLineMinimal(TestBasic):
cluster_id,
{'slave-01': ['controller']}
)
remote = self.env.d_env.get_admin_remote()
node_id = self.fuel_web.get_nailgun_node_by_devops_node(
self.env.d_env.nodes().slaves[0])['id']
remote.execute('fuel node --node {0} --provision --env {1}'.format
(node_id, cluster_id))
self.fuel_web.provisioning_cluster_wait(cluster_id)
remote.execute('fuel node --node {0} --end hiera --env {1}'.format
(node_id, cluster_id))
try:
wait(lambda: int(
remote.execute(
'fuel task | grep deployment | awk \'{print $9}\'')
['stdout'][0].rstrip()) == 100, timeout=120)
except TimeoutError:
raise TimeoutError("hiera manifest was not applyed")
role = remote.execute('ssh -q node-{0} "hiera role"'.format
(node_id))['stdout'][0].rstrip()
with self.env.d_env.get_admin_remote() as remote:
node_id = self.fuel_web.get_nailgun_node_by_devops_node(
self.env.d_env.nodes().slaves[0])['id']
remote.execute('fuel node --node {0} --provision --env {1}'.format
(node_id, cluster_id))
self.fuel_web.provisioning_cluster_wait(cluster_id)
remote.execute('fuel node --node {0} --end hiera --env {1}'.format
(node_id, cluster_id))
try:
wait(lambda: int(
remote.execute(
'fuel task | grep deployment | awk \'{print $9}\'')
['stdout'][0].rstrip()) == 100, timeout=120)
except TimeoutError:
raise TimeoutError("hiera manifest was not applyed")
role = remote.execute('ssh -q node-{0} "hiera role"'
.format(node_id))['stdout'][0].rstrip()
assert_equal(role, 'primary-controller', "node with deployed hiera "
"was not found")
@ -303,12 +303,12 @@ class CommandLine(TestBasic):
"""
self.env.revert_snapshot("cli_selected_nodes_deploy")
remote = self.env.d_env.get_admin_remote()
node_id = self.fuel_web.get_nailgun_node_by_devops_node(
self.env.d_env.nodes().slaves[2])['id']
with self.env.d_env.get_admin_remote() as remote:
node_id = self.fuel_web.get_nailgun_node_by_devops_node(
self.env.d_env.nodes().slaves[2])['id']
assert_true(check_cobbler_node_exists(remote, node_id),
"node-{0} is not found".format(node_id))
assert_true(check_cobbler_node_exists(remote, node_id),
"node-{0} is not found".format(node_id))
self.env.d_env.nodes().slaves[2].destroy()
try:
wait(
@ -317,25 +317,35 @@ class CommandLine(TestBasic):
slaves[2])['online'], timeout=60 * 6)
except TimeoutError:
raise
with self.env.d_env.get_admin_remote() as remote:
res = remote.execute('fuel node --node-id {0} --delete-from-db'
.format(node_id))
assert_true(
remote.execute('fuel node --node-id {0} --delete-from-db'.
format(node_id))['exit_code'] == 0,
"Offline node-{0} was not deleted from database".format(node_id)
)
try:
wait(
lambda: not remote.execute(
"fuel node | awk '{{print $1}}' | grep -w '{0}'".
format(node_id))['exit_code'] == 0, timeout=60 * 2)
except TimeoutError:
raise TimeoutError(
"After deletion node-{0} is found in fuel list".
format(node_id))
assert_false(check_cobbler_node_exists(remote, node_id),
res['exit_code'] == 0,
"Offline node-{0} was not"
"deleted from database".format(node_id))
with self.env.d_env.get_admin_remote() as remote:
try:
wait(
lambda: not remote.execute(
"fuel node | awk '{{print $1}}' | grep -w '{0}'".
format(node_id))['exit_code'] == 0, timeout=60 * 2)
except TimeoutError:
raise TimeoutError(
"After deletion node-{0} is found in fuel list".
format(node_id))
with self.env.d_env.get_admin_remote() as remote:
is_cobler_node_exists = check_cobbler_node_exists(remote, node_id)
assert_false(is_cobler_node_exists,
"After deletion node-{0} is found in cobbler list".
format(node_id))
cluster_id = ''.join(remote.execute(
"fuel env | tail -n 1 | awk {'print $1'}")['stdout']).rstrip()
with self.env.d_env.get_admin_remote() as remote:
cluster_id = ''.join(remote.execute(
"fuel env | tail -n 1 | awk {'print $1'}")['stdout']).rstrip()
self.fuel_web.verify_network(cluster_id)
@ -359,19 +369,23 @@ class CommandLine(TestBasic):
"""
self.env.revert_snapshot("cli_selected_nodes_deploy")
remote = self.env.d_env.get_admin_remote()
cluster_id = self.fuel_web.get_last_created_cluster()
with self.env.d_env.get_admin_remote() as remote:
res = remote.execute('fuel --env {0} env delete'
.format(cluster_id))
assert_true(
remote.execute('fuel --env {0} env delete'.format(cluster_id))
['exit_code'] == 0)
try:
wait(lambda:
remote.execute(
"fuel env | awk '{print $1}' | tail -n 1 | grep '^.$'")
['exit_code'] == 1, timeout=60 * 6)
except TimeoutError:
raise TimeoutError(
"cluster {0} was not deleted".format(cluster_id))
res['exit_code'] == 0)
with self.env.d_env.get_admin_remote() as remote:
try:
wait(lambda:
remote.execute("fuel env | awk '{print $1}'"
" | tail -n 1 | grep '^.$'")
['exit_code'] == 1, timeout=60 * 6)
except TimeoutError:
raise TimeoutError(
"cluster {0} was not deleted".format(cluster_id))
assert_false(
check_cluster_presence(cluster_id, self.env.postgres_actions),
"cluster {0} is found".format(cluster_id))

View File

@ -95,6 +95,8 @@ class CustomHostname(TestBasic):
"{0} node is not accessible by its default "
"hostname {1}".format(devops_node.name, node['hostname']))
admin_remote.clear()
self.env.make_snapshot("default_hostname")
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
@ -120,8 +122,6 @@ class CustomHostname(TestBasic):
for method in ('API', 'CLI'):
self.env.revert_snapshot("ready_with_5_slaves")
admin_remote = self.env.d_env.get_admin_remote()
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=settings.DEPLOYMENT_MODE,
@ -150,9 +150,10 @@ class CustomHostname(TestBasic):
self.fuel_web.client.set_hostname(node['id'],
custom_hostname)
elif method == 'CLI':
admin_remote.execute(
'fuel node --node-id {0} --hostname '
'{1}'.format(node['id'], custom_hostname))
with self.env.d_env.get_admin_remote() as admin_remote:
admin_remote.execute(
'fuel node --node-id {0} --hostname '
'{1}'.format(node['id'], custom_hostname))
self.fuel_web.deploy_cluster_wait(cluster_id, check_services=False)
@ -169,9 +170,10 @@ class CustomHostname(TestBasic):
custom_hostnames):
devops_node = self.fuel_web.get_devops_node_by_nailgun_node(
node)
hostname = admin_remote.execute(
"ssh -q {0} hostname "
"-s".format(custom_hostname))['stdout'][0].strip()
with self.env.d_env.get_admin_remote() as admin_remote:
hostname = admin_remote.execute(
"ssh -q {0} hostname "
"-s".format(custom_hostname))['stdout'][0].strip()
assert_equal(
custom_hostname,
hostname,

View File

@ -291,9 +291,8 @@ class HAOneControllerNeutron(HAOneControllerNeutronBase):
self.fuel_web.run_ostf(cluster_id=cluster_id)
_ip = self.fuel_web.get_nailgun_node_by_name("slave-03")['ip']
remote = self.env.d_env.get_ssh_to_remote(_ip)
result = remote.execute('readlink /etc/astute.yaml')['stdout']
with self.env.d_env.get_ssh_to_remote(_ip) as remote:
result = remote.execute('readlink /etc/astute.yaml')['stdout']
assert_true("base-os" in result[0],
"Role mismatch. Node slave-03 is not base-os")

View File

@ -137,17 +137,18 @@ class NeutronGreHa(TestBasic):
self.env.d_env.nodes().slaves[0])
logger.debug("devops node name is {0}".format(devops_node.name))
_ip = self.fuel_web.get_nailgun_node_by_name(devops_node.name)['ip']
remote = self.env.d_env.get_ssh_to_remote(_ip)
for i in range(5):
try:
with self.env.d_env.get_ssh_to_remote(_ip) as remote:
for i in range(5):
try:
checkers.check_swift_ring(remote)
break
except AssertionError:
result = remote.execute(
"/usr/local/bin/swift-rings-rebalance.sh")
logger.debug("command execution result is {0}"
.format(result))
else:
checkers.check_swift_ring(remote)
break
except AssertionError:
result = remote.execute(
"/usr/local/bin/swift-rings-rebalance.sh")
logger.debug("command execution result is {0}".format(result))
else:
checkers.check_swift_ring(remote)
self.fuel_web.run_ostf(
cluster_id=cluster_id,
@ -215,17 +216,18 @@ class NeutronVlanHa(TestBasic):
self.env.d_env.nodes().slaves[0])
logger.debug("devops node name is {0}".format(devops_node.name))
_ip = self.fuel_web.get_nailgun_node_by_name(devops_node.name)['ip']
remote = self.env.d_env.get_ssh_to_remote(_ip)
for i in range(5):
try:
with self.env.d_env.get_ssh_to_remote(_ip) as remote:
for i in range(5):
try:
checkers.check_swift_ring(remote)
break
except AssertionError:
result = remote.execute(
"/usr/local/bin/swift-rings-rebalance.sh")
logger.debug("command execution result is {0}"
.format(result))
else:
checkers.check_swift_ring(remote)
break
except AssertionError:
result = remote.execute(
"/usr/local/bin/swift-rings-rebalance.sh")
logger.debug("command execution result is {0}".format(result))
else:
checkers.check_swift_ring(remote)
self.fuel_web.run_ostf(
cluster_id=cluster_id, test_sets=['ha', 'smoke', 'sanity'])

View File

@ -90,12 +90,13 @@ class SaharaHAOneController(TestBasic):
LOGGER.debug('Verify Sahara service on controller')
_ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
checkers.verify_service(
self.env.d_env.get_ssh_to_remote(_ip),
service_name='sahara-api')
checkers.verify_service(
self.env.d_env.get_ssh_to_remote(_ip),
service_name='sahara-engine')
with self.env.d_env.get_ssh_to_remote(_ip) as remote:
checkers.verify_service(
remote,
service_name='sahara-api')
checkers.verify_service(
remote,
service_name='sahara-engine')
LOGGER.debug('Run all sanity and smoke tests')
path_to_tests = 'fuel_health.tests.sanity.test_sanity_sahara.'
@ -198,12 +199,13 @@ class SaharaHA(TestBasic):
LOGGER.debug('Verify Sahara service on all controllers')
for slave in ["slave-01", "slave-02", "slave-03"]:
_ip = self.fuel_web.get_nailgun_node_by_name(slave)['ip']
checkers.verify_service(
self.env.d_env.get_ssh_to_remote(_ip),
service_name='sahara-api')
checkers.verify_service(
self.env.d_env.get_ssh_to_remote(_ip),
service_name='sahara-engine')
with self.env.d_env.get_ssh_to_remote(_ip) as remote:
checkers.verify_service(
remote,
service_name='sahara-api')
checkers.verify_service(
remote,
service_name='sahara-engine')
LOGGER.debug('Run all sanity and smoke tests')
path_to_tests = 'fuel_health.tests.sanity.test_sanity_sahara.'
@ -292,9 +294,10 @@ class MuranoHAOneController(TestBasic):
data['user'], data['password'], data['tenant'])
self.fuel_web.assert_cluster_ready(os_conn, smiles_count=5)
_ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
checkers.verify_service(
self.env.d_env.get_ssh_to_remote(_ip),
service_name='murano-api')
with self.env.d_env.get_ssh_to_remote(_ip) as remote:
checkers.verify_service(
remote,
service_name='murano-api')
LOGGER.debug('Run sanity and functional Murano OSTF tests')
self.fuel_web.run_single_ostf_test(
@ -380,9 +383,10 @@ class MuranoHA(TestBasic):
self.fuel_web.assert_cluster_ready(os_conn, smiles_count=13)
for slave in ["slave-01", "slave-02", "slave-03"]:
_ip = self.fuel_web.get_nailgun_node_by_name(slave)['ip']
checkers.verify_service(
self.env.d_env.get_ssh_to_remote(_ip),
service_name='murano-api')
with self.env.d_env.get_ssh_to_remote(_ip) as remote:
checkers.verify_service(
remote,
service_name='murano-api')
LOGGER.debug('Run sanity and functional Murano OSTF tests')
self.fuel_web.run_single_ostf_test(
@ -543,13 +547,15 @@ class CeilometerHAOneControllerMongo(OSTFCeilometerHelper):
self.fuel_web.deploy_cluster_wait(cluster_id)
_ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
checkers.verify_service(
self.env.d_env.get_ssh_to_remote(_ip),
service_name='ceilometer-api')
with self.env.d_env.get_ssh_to_remote(_ip) as remote:
checkers.verify_service(
remote,
service_name='ceilometer-api')
_ip = self.fuel_web.get_nailgun_node_by_name("slave-03")['ip']
partitions = checkers.get_mongo_partitions(
self.env.d_env.get_ssh_to_remote(_ip), "vda5")
with self.env.d_env.get_ssh_to_remote(_ip) as remote:
partitions = checkers.get_mongo_partitions(remote, "vda5")
assert_equal(partitions[0].rstrip(), mongo_disk_gb,
'Mongo size {0} before deployment is not equal'
' to size after {1}'.format(mongo_disk_gb, partitions))
@ -598,9 +604,10 @@ class CeilometerHAOneControllerMongo(OSTFCeilometerHelper):
self.fuel_web.deploy_cluster_wait(cluster_id)
_ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
checkers.verify_service(
self.env.d_env.get_ssh_to_remote(_ip),
service_name='ceilometer-api')
with self.env.d_env.get_ssh_to_remote(_ip) as remote:
checkers.verify_service(
remote,
service_name='ceilometer-api')
self.run_tests(cluster_id)
self.env.make_snapshot("deploy_ceilometer_ha_one_controller_multirole")
@ -655,9 +662,10 @@ class CeilometerHAMongo(OSTFCeilometerHelper):
self.fuel_web.deploy_cluster_wait(cluster_id)
_ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
checkers.verify_service(
self.env.d_env.get_ssh_to_remote(_ip),
service_name='ceilometer-api')
with self.env.d_env.get_ssh_to_remote(_ip) as remote:
checkers.verify_service(
remote,
service_name='ceilometer-api')
self.run_tests(cluster_id,
skip_tests=['test_check_volume_notifications'])
@ -706,9 +714,10 @@ class CeilometerHAMongo(OSTFCeilometerHelper):
self.fuel_web.deploy_cluster_wait(cluster_id)
_ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
checkers.verify_service(
self.env.d_env.get_ssh_to_remote(_ip),
service_name='ceilometer-api')
with self.env.d_env.get_ssh_to_remote(_ip) as remote:
checkers.verify_service(
remote,
service_name='ceilometer-api')
self.run_tests(cluster_id)
self.env.make_snapshot("deploy_ceilometer_ha_multirole", is_make=True)
@ -800,9 +809,10 @@ class CeilometerHAMongo(OSTFCeilometerHelper):
self.fuel_web.deploy_cluster_wait(cluster_id)
_ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
checkers.verify_service(
self.env.d_env.get_ssh_to_remote(_ip),
service_name='ceilometer-api')
with self.env.d_env.get_ssh_to_remote(_ip) as remote:
checkers.verify_service(
remote,
service_name='ceilometer-api')
self.run_tests(cluster_id)
self.env.make_snapshot("deploy_ceilometer_ha_with_external_mongo")
@ -863,14 +873,16 @@ class HeatHAOneController(TestBasic):
self.fuel_web.assert_cluster_ready(os_conn, smiles_count=5)
_ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
checkers.verify_service(
self.env.d_env.get_ssh_to_remote(_ip),
service_name='heat-api', count=3)
with self.env.d_env.get_ssh_to_remote(_ip) as remote:
checkers.verify_service(
remote,
service_name='heat-api', count=3)
_ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
checkers.verify_service(
self.env.d_env.get_ssh_to_remote(_ip),
service_name='ceilometer-api')
with self.env.d_env.get_ssh_to_remote(_ip) as remote:
checkers.verify_service(
remote,
service_name='ceilometer-api')
LOGGER.debug('Run Heat OSTF platform tests')
@ -955,14 +967,13 @@ class HeatHA(TestBasic):
for slave in ["slave-01", "slave-02", "slave-03"]:
_ip = self.fuel_web.get_nailgun_node_by_name(slave)['ip']
checkers.verify_service(
self.env.d_env.get_ssh_to_remote(_ip),
service_name='heat-api', count=3)
_ip = self.fuel_web.get_nailgun_node_by_name(slave)['ip']
checkers.verify_service(
self.env.d_env.get_ssh_to_remote(_ip),
service_name='ceilometer-api')
with self.env.d_env.get_ssh_to_remote(_ip) as remote:
checkers.verify_service(
remote,
service_name='heat-api', count=3)
checkers.verify_service(
remote,
service_name='ceilometer-api')
LOGGER.debug('Run Heat OSTF platform tests')

View File

@ -130,10 +130,10 @@ class PatchingTests(TestBasic):
for repo in patching_repos:
patching.connect_slaves_to_repo(self.env, slaves, repo)
if settings.PATCHING_MASTER_MIRRORS:
for repo in patching_master_repos:
remote = self.env.d_env.get_admin_remote()
install_pkg(remote, 'yum-utils')
patching.connect_admin_to_repo(self.env, repo)
with self.env.d_env.get_admin_remote() as remote:
for repo in patching_master_repos:
install_pkg(remote, 'yum-utils')
patching.connect_admin_to_repo(self.env, repo)
# Step #5
if settings.LATE_ARTIFACTS_JOB_URL:
@ -219,9 +219,10 @@ class PatchingTests(TestBasic):
test_sets=['smoke', 'ha'])
if "ceph-osd" in roles:
remote_ceph = self.fuel_web.get_ssh_for_node(
'slave-0{}'.format(number_of_nodes + 1))
self.fuel_web.prepare_ceph_to_delete(remote_ceph)
with self.fuel_web.get_ssh_for_node(
'slave-0{}'.format(number_of_nodes + 1)
) as remote_ceph:
self.fuel_web.prepare_ceph_to_delete(remote_ceph)
nailgun_node = self.fuel_web.update_nodes(
cluster_id, node, False, True)
@ -302,8 +303,8 @@ class PatchingMasterTests(TestBasic):
'" failed.'.format(self.snapshot_name))
# Step #1
remote = self.env.d_env.get_admin_remote()
install_pkg(remote, 'yum-utils')
with self.env.d_env.get_admin_remote() as remote:
install_pkg(remote, 'yum-utils')
patching_repos = patching.add_remote_repositories(
self.env, settings.PATCHING_MASTER_MIRRORS)
@ -377,9 +378,10 @@ class PatchingMasterTests(TestBasic):
test_sets=['smoke', 'ha'])
if "ceph-osd" in roles:
remote_ceph = self.fuel_web.get_ssh_for_node(
'slave-0{}'.format(number_of_nodes + 1))
self.fuel_web.prepare_ceph_to_delete(remote_ceph)
with self.fuel_web.get_ssh_for_node(
'slave-0{}'.format(number_of_nodes + 1)
) as remote:
self.fuel_web.prepare_ceph_to_delete(remote)
nailgun_node = self.fuel_web.update_nodes(
cluster_id, node, False, True)
nodes = filter(

View File

@ -298,17 +298,18 @@ class TestHaFailoverBase(TestBasic):
self.env.revert_snapshot(self.snapshot_name)
for devops_node in self.env.d_env.nodes().slaves[:3]:
remote = self.fuel_web.get_ssh_for_node(devops_node.name)
logger.info('Terminating MySQL on {0}'.format(devops_node.name))
with self.fuel_web.get_ssh_for_node(devops_node.name) as remote:
logger.info('Terminating MySQL on {0}'
.format(devops_node.name))
try:
remote.check_call('pkill -9 -x "mysqld"')
except:
logger.error('MySQL on {0} is down after snapshot revert'.
format(devops_node.name))
raise
try:
remote.check_call('pkill -9 -x "mysqld"')
except:
logger.error('MySQL on {0} is down after snapshot revert'.
format(devops_node.name))
raise
check_mysql(remote, devops_node.name)
check_mysql(remote, devops_node.name)
cluster_id = self.fuel_web.client.get_cluster_id(
self.__class__.__name__)
@ -327,16 +328,16 @@ class TestHaFailoverBase(TestBasic):
self.env.revert_snapshot(self.snapshot_name)
for devops_node in self.env.d_env.nodes().slaves[:3]:
remote = self.fuel_web.get_ssh_for_node(devops_node.name)
remote.check_call('kill -9 $(pidof haproxy)')
with self.fuel_web.get_ssh_for_node(devops_node.name) as remote:
remote.check_call('kill -9 $(pidof haproxy)')
def haproxy_started():
ret = remote.execute(
'[ -f /var/run/haproxy.pid ] && '
'[ "$(ps -p $(cat /var/run/haproxy.pid) -o pid=)" == '
'"$(pidof haproxy)" ]'
)
return ret['exit_code'] == 0
def haproxy_started():
ret = remote.execute(
'[ -f /var/run/haproxy.pid ] && '
'[ "$(ps -p $(cat /var/run/haproxy.pid) -o pid=)" == '
'"$(pidof haproxy)" ]'
)
return ret['exit_code'] == 0
wait(haproxy_started, timeout=20)
assert_true(haproxy_started(), 'haproxy restarted')
@ -413,51 +414,62 @@ class TestHaFailoverBase(TestBasic):
" AMQP server: AMQP connection test returned 1"
heat_name = 'heat-engine'
ocf_status = \
'script -q -c "OCF_ROOT=/usr/lib/ocf' \
' /usr/lib/ocf/resource.d/fuel/{0}' \
' monitor 2>&1"'.format(heat_name)
remote = self.fuel_web.get_ssh_for_node(
self.env.d_env.nodes().slaves[0].name)
pid = ''.join(remote.execute('pgrep heat-engine')['stdout'])
get_ocf_status = ''.join(
remote.execute(ocf_status)['stdout']).rstrip()
node_name = self.env.d_env.nodes().slaves[0].name
with self.fuel_web.get_ssh_for_node(node_name) as remote:
pid = ''.join(remote.execute('pgrep {0}'
.format(heat_name))['stdout'])
get_ocf_status = ''.join(
remote.execute(ocf_status)['stdout']).rstrip()
assert_true(ocf_success in get_ocf_status,
"heat engine is not succeeded, status is {0}".format(
get_ocf_status))
assert_true(len(remote.execute(
"netstat -nap | grep {0} | grep :5673".
format(pid))['stdout']) > 0, 'There is no amqp connections')
remote.execute("iptables -I OUTPUT 1 -m owner --uid-owner heat -m"
" state --state NEW,ESTABLISHED,RELATED -j DROP")
cmd = "netstat -nap | grep {0} | grep :5673".format(pid)
wait(lambda: len(remote.execute(cmd)['stdout']) == 0, timeout=300)
with self.fuel_web.get_ssh_for_node(node_name) as remote:
amqp_con = len(remote.execute(
"netstat -nap | grep {0} | grep :5673".
format(pid))['stdout'])
assert_true(amqp_con > 0, 'There is no amqp connections')
get_ocf_status = ''.join(
remote.execute(ocf_status)['stdout']).rstrip()
with self.fuel_web.get_ssh_for_node(node_name) as remote:
remote.execute("iptables -I OUTPUT 1 -m owner --uid-owner heat -m"
" state --state NEW,ESTABLISHED,RELATED -j DROP")
cmd = "netstat -nap | grep {0} | grep :5673".format(pid)
wait(lambda: len(remote.execute(cmd)['stdout']) == 0, timeout=300)
get_ocf_status = ''.join(
remote.execute(ocf_status)['stdout']).rstrip()
logger.info('ocf status after blocking is {0}'.format(
get_ocf_status))
assert_true(ocf_error in get_ocf_status,
"heat engine is running, status is {0}".format(
get_ocf_status))
remote.execute("iptables -D OUTPUT 1 -m owner --uid-owner heat -m"
" state --state NEW,ESTABLISHED,RELATED")
_wait(lambda: assert_true(ocf_success in ''.join(
remote.execute(ocf_status)['stdout']).rstrip()), timeout=240)
newpid = ''.join(remote.execute('pgrep heat-engine')['stdout'])
assert_true(pid != newpid, "heat pid is still the same")
get_ocf_status = ''.join(remote.execute(
ocf_status)['stdout']).rstrip()
with self.fuel_web.get_ssh_for_node(node_name) as remote:
remote.execute("iptables -D OUTPUT 1 -m owner --uid-owner heat -m"
" state --state NEW,ESTABLISHED,RELATED")
_wait(lambda: assert_true(ocf_success in ''.join(
remote.execute(ocf_status)['stdout']).rstrip()), timeout=240)
newpid = ''.join(remote.execute('pgrep {0}'
.format(heat_name))['stdout'])
assert_true(pid != newpid, "heat pid is still the same")
get_ocf_status = ''.join(remote.execute(
ocf_status)['stdout']).rstrip()
assert_true(ocf_success in get_ocf_status,
"heat engine is not succeeded, status is {0}".format(
get_ocf_status))
assert_true(len(
remote.execute("netstat -nap | grep {0} | grep :5673".format(
newpid))['stdout']) > 0)
with self.fuel_web.get_ssh_for_node(node_name) as remote:
heat = len(
remote.execute("netstat -nap | grep {0} | grep :5673"
.format(newpid))['stdout'])
assert_true(heat > 0)
cluster_id = self.fuel_web.get_last_created_cluster()
self.fuel_web.run_ostf(cluster_id=cluster_id)
@ -467,17 +479,17 @@ class TestHaFailoverBase(TestBasic):
self.env.revert_snapshot(self.snapshot_name)
for devops_node in self.env.d_env.nodes().slaves[3:5]:
remote = self.fuel_web.get_ssh_for_node(devops_node.name)
remote.execute("kill -9 `pgrep nova-compute`")
wait(
lambda: len(remote.execute('pgrep nova-compute')['stdout'])
== 1, timeout=120)
assert_true(len(remote.execute('pgrep nova-compute')['stdout'])
== 1, 'Nova service was not restarted')
assert_true(len(remote.execute(
"grep \"nova-compute.*trying to restart\" "
"/var/log/monit.log")['stdout']) > 0,
'Nova service was not restarted')
with self.fuel_web.get_ssh_for_node(devops_node.name) as remote:
remote.execute("kill -9 `pgrep nova-compute`")
wait(
lambda: len(remote.execute('pgrep nova-compute')['stdout'])
== 1, timeout=120)
assert_true(len(remote.execute('pgrep nova-compute')['stdout'])
== 1, 'Nova service was not restarted')
assert_true(len(remote.execute(
"grep \"nova-compute.*trying to restart\" "
"/var/log/monit.log")['stdout']) > 0,
'Nova service was not restarted')
def check_firewall_vulnerability(self):
if not self.env.d_env.has_snapshot(self.snapshot_name):
@ -494,30 +506,35 @@ class TestHaFailoverBase(TestBasic):
self.env.revert_snapshot(self.snapshot_name)
cluster_id = self.fuel_web.get_last_created_cluster()
for node in self.fuel_web.client.list_cluster_nodes(cluster_id):
remote = self.env.d_env.get_ssh_to_remote(node['ip'])
assert_true(
check_ping(remote, DNS, deadline=120, interval=10),
"No Internet access from {0}".format(node['fqdn'])
)
remote_compute = self.fuel_web.get_ssh_for_node('slave-05')
with self.env.d_env.get_ssh_to_remote(node['ip']) as remote:
assert_true(
check_ping(remote, DNS, deadline=120, interval=10),
"No Internet access from {0}".format(node['fqdn'])
)
devops_node = self.fuel_web.get_nailgun_primary_node(
self.env.d_env.nodes().slaves[0])
file_name = DOWNLOAD_LINK.split('/')[-1]
file_path = '/root/tmp'
remote_compute.execute(
"screen -S download -d -m bash -c 'mkdir -p {0} &&"
" cd {0} && wget --limit-rate=100k {1}'".format(file_path,
DOWNLOAD_LINK))
try:
wait(
lambda: remote_compute.execute("ls -1 {0}/{1}".format(
file_path, file_name))['exit_code'] == 0, timeout=60)
except TimeoutError:
raise TimeoutError(
"File download was not started")
file_size1 = get_file_size(remote_compute, file_name, file_path)
time.sleep(60)
file_size2 = get_file_size(remote_compute, file_name, file_path)
with self.fuel_web.get_ssh_for_node('slave-05') as remote:
remote.execute(
"screen -S download -d -m bash -c 'mkdir -p {0} &&"
" cd {0} && wget --limit-rate=100k {1}'".format(file_path,
DOWNLOAD_LINK))
with self.fuel_web.get_ssh_for_node('slave-05') as remote:
try:
wait(
lambda: remote.execute("ls -1 {0}/{1}".format(
file_path, file_name))['exit_code'] == 0, timeout=60)
except TimeoutError:
raise TimeoutError(
"File download was not started")
with self.fuel_web.get_ssh_for_node('slave-05') as remote:
file_size1 = get_file_size(remote, file_name, file_path)
time.sleep(60)
file_size2 = get_file_size(remote, file_name, file_path)
assert_true(file_size2 > file_size1,
"File download was interrupted, size of downloading "
"does not change")
@ -529,14 +546,16 @@ class TestHaFailoverBase(TestBasic):
except TimeoutError:
raise TimeoutError(
"Primary controller was not destroyed")
assert_true(
check_ping(remote_compute, DNS, deadline=120, interval=10),
"No Internet access from {0}".format(node['fqdn'])
)
with self.fuel_web.get_ssh_for_node('slave-05') as remote:
assert_true(
check_ping(remote, DNS, deadline=120, interval=10),
"No Internet access from {0}".format(node['fqdn'])
)
if OPENSTACK_RELEASE == OPENSTACK_RELEASE_UBUNTU:
file_size1 = get_file_size(remote_compute, file_name, file_path)
time.sleep(60)
file_size2 = get_file_size(remote_compute, file_name, file_path)
with self.fuel_web.get_ssh_for_node('slave-05') as remote:
file_size1 = get_file_size(remote, file_name, file_path)
time.sleep(60)
file_size2 = get_file_size(remote, file_name, file_path)
assert_true(file_size2 > file_size1,
"File download was interrupted, size of downloading "
"does not change")
@ -566,6 +585,8 @@ class TestHaFailoverBase(TestBasic):
logger.error('command failed to be executed'.format(
self.env.d_env.nodes().slaves[:1].name))
raise
finally:
remote.clear()
cluster_id = self.fuel_web.client.get_cluster_id(
self.__class__.__name__)
@ -764,11 +785,11 @@ class TestHaFailoverBase(TestBasic):
for node in pcm_nodes]
logger.debug("rabbit nodes are {}".format(rabbit_nodes))
slave1_remote = self.fuel_web.get_ssh_for_node(
self.env.d_env.nodes().slaves[0].name)
rabbit_slave1_name = None
slave1_name = ''.join(
slave1_remote.execute('hostname')['stdout']).strip()
with self.fuel_web.get_ssh_for_node(
self.env.d_env.nodes().slaves[0].name) as remote:
slave1_name = ''.join(
remote.execute('hostname')['stdout']).strip()
logger.debug('slave1 name is {}'.format(slave1_name))
for rabbit_node in rabbit_nodes:
if rabbit_node in slave1_name:
@ -777,32 +798,36 @@ class TestHaFailoverBase(TestBasic):
pcm_nodes.remove(slave1_name)
slave1_remote.execute('crm configure property maintenance-mode=true')
slave1_remote.execute('service corosync stop')
with self.fuel_web.get_ssh_for_node(
self.env.d_env.nodes().slaves[0].name) as remote:
remote.execute('crm configure property maintenance-mode=true')
remote.execute('service corosync stop')
remote = self.env.d_env.get_admin_remote()
cmd = "grep 'Ignoring alive node rabbit@{0}' /var/log/remote" \
"/{1}/rabbit-fence.log".format(rabbit_slave1_name, pcm_nodes[0])
try:
wait(
lambda: not remote.execute(cmd)['exit_code'], timeout=2 * 60)
except TimeoutError:
result = remote.execute(cmd)
assert_equal(0, result['exit_code'],
'alive rabbit node was not ignored,'
' result is {}'.format(result))
assert_equal(0, remote.execute(
"grep 'Got {0} that left cluster' /var/log/remote/{1}/"
"rabbit-fence.log".format(slave1_name,
pcm_nodes[0]))['exit_code'],
"slave {} didn't leave cluster".format(slave1_name))
assert_equal(0, remote.execute(
"grep 'Preparing to fence node rabbit@{0} from rabbit cluster'"
" /var/log/remote/{1}/"
"rabbit-fence.log".format(rabbit_slave1_name,
pcm_nodes[0]))['exit_code'],
"node {} wasn't prepared for"
" fencing".format(rabbit_slave1_name))
with self.env.d_env.get_admin_remote() as remote:
cmd = "grep 'Ignoring alive node rabbit@{0}' /var/log/remote" \
"/{1}/rabbit-fence.log".format(rabbit_slave1_name,
pcm_nodes[0])
try:
wait(
lambda: not remote.execute(cmd)['exit_code'],
timeout=2 * 60)
except TimeoutError:
result = remote.execute(cmd)
assert_equal(0, result['exit_code'],
'alive rabbit node was not ignored,'
' result is {}'.format(result))
assert_equal(0, remote.execute(
"grep 'Got {0} that left cluster' /var/log/remote/{1}/"
"rabbit-fence.log".format(slave1_name,
pcm_nodes[0]))['exit_code'],
"slave {} didn't leave cluster".format(slave1_name))
assert_equal(0, remote.execute(
"grep 'Preparing to fence node rabbit@{0} from rabbit cluster'"
" /var/log/remote/{1}/"
"rabbit-fence.log".format(rabbit_slave1_name,
pcm_nodes[0]))['exit_code'],
"node {} wasn't prepared for"
" fencing".format(rabbit_slave1_name))
rabbit_status = self.fuel_web.get_rabbit_running_nodes(
self.env.d_env.nodes().slaves[1].name)
@ -812,8 +837,10 @@ class TestHaFailoverBase(TestBasic):
"rabbit node {} is not in"
" rabbit status".format(rabbit_node))
slave1_remote.execute("service corosync start")
slave1_remote.execute("service pacemaker restart")
with self.fuel_web.get_ssh_for_node(
self.env.d_env.nodes().slaves[0].name) as remote:
remote.execute("service corosync start")
remote.execute("service pacemaker restart")
self.fuel_web.assert_pacemaker(self.env.d_env.nodes().slaves[0].name,
self.env.d_env.nodes().slaves[:3], [])
@ -831,11 +858,10 @@ class TestHaFailoverBase(TestBasic):
for node in pcm_nodes]
logger.debug("rabbit nodes are {}".format(rabbit_nodes))
slave1_remote = self.fuel_web.get_ssh_for_node(
self.env.d_env.nodes().slaves[0].name)
slave1_name = ''.join(
slave1_remote.execute('hostname')['stdout']).strip()
with self.fuel_web.get_ssh_for_node(
self.env.d_env.nodes().slaves[0].name) as remote:
slave1_name = ''.join(
remote.execute('hostname')['stdout']).strip()
logger.debug('slave1 name is {}'.format(slave1_name))
for rabbit_node in rabbit_nodes:
if rabbit_node in slave1_name:
@ -844,40 +870,45 @@ class TestHaFailoverBase(TestBasic):
pcm_nodes.remove(slave1_name)
slave1_remote.execute('crm configure property maintenance-mode=true')
slave1_remote.execute('rabbitmqctl stop_app')
slave1_remote.execute('service corosync stop')
with self.fuel_web.get_ssh_for_node(
self.env.d_env.nodes().slaves[0].name) as remote:
remote.execute('crm configure property maintenance-mode=true')
remote.execute('rabbitmqctl stop_app')
remote.execute('service corosync stop')
remote = self.env.d_env.get_admin_remote()
with self.env.d_env.get_admin_remote() as remote:
cmd = "grep 'Forgetting cluster node rabbit@{0}' /var/log/remote" \
"/{1}/rabbit-fence.log".format(rabbit_slave1_name, pcm_nodes[0])
try:
wait(
lambda: not remote.execute(cmd)['exit_code'], timeout=2 * 60)
except TimeoutError:
result = remote.execute(cmd)
assert_equal(0, result['exit_code'],
'dead rabbit node was not removed,'
' result is {}'.format(result))
cmd = "grep 'Forgetting cluster node rabbit@{0}' /var/log/remote" \
"/{1}/rabbit-fence.log".format(rabbit_slave1_name,
pcm_nodes[0])
try:
wait(
lambda: not remote.execute(cmd)['exit_code'],
timeout=2 * 60)
except TimeoutError:
result = remote.execute(cmd)
assert_equal(0, result['exit_code'],
'dead rabbit node was not removed,'
' result is {}'.format(result))
assert_equal(0, remote.execute(
"grep 'Got {0} that left cluster' /var/log/remote/{1}/"
"rabbit-fence.log".format(slave1_name,
pcm_nodes[0]))['exit_code'],
"node {} didn't leave cluster".format(slave1_name))
assert_equal(0, remote.execute(
"grep 'Preparing to fence node rabbit@{0} from rabbit cluster'"
" /var/log/remote/{1}/"
"rabbit-fence.log".format(rabbit_slave1_name,
pcm_nodes[0]))['exit_code'],
"node {} wasn't prepared for"
" fencing".format(rabbit_slave1_name))
assert_equal(0, remote.execute(
"grep 'Disconnecting node rabbit@{0}' /var/log/remote/{1}/"
"rabbit-fence.log".format(rabbit_slave1_name,
pcm_nodes[0]))['exit_code'],
"node {} wasn't disconnected".format(rabbit_slave1_name))
assert_equal(0, remote.execute(
"grep 'Got {0} that left cluster' /var/log/remote/{1}/"
"rabbit-fence.log".format(slave1_name,
pcm_nodes[0]))['exit_code'],
"node {} didn't leave cluster".format(slave1_name))
assert_equal(0, remote.execute(
"grep 'Preparing to fence node rabbit@{0} from rabbit cluster'"
" /var/log/remote/{1}/"
"rabbit-fence.log".format(rabbit_slave1_name,
pcm_nodes[0]))['exit_code'],
"node {} wasn't prepared for"
" fencing".format(rabbit_slave1_name))
assert_equal(0, remote.execute(
"grep 'Disconnecting node rabbit@{0}' /var/log/remote/{1}/"
"rabbit-fence.log".format(rabbit_slave1_name,
pcm_nodes[0]))['exit_code'],
"node {} wasn't disconnected"
.format(rabbit_slave1_name))
rabbit_nodes.remove(rabbit_slave1_name)
rabbit_status = self.fuel_web.get_rabbit_running_nodes(

View File

@ -205,11 +205,11 @@ class DeployHAOneControllerMasterNodeFail(base_test_case.TestBasic):
self.env.revert_snapshot("deploy_ha_dns_ntp")
remote = self.env.d_env.get_admin_remote()
_ip = self.fuel_web.get_nailgun_node_by_name('slave-01')['ip']
remote_slave = self.env.d_env.get_ssh_to_remote(_ip)
remote.execute("dockerctl shell cobbler killall dnsmasq")
checkers.external_dns_check(remote_slave)
with self.env.d_env.get_admin_remote() as remote:
_ip = self.fuel_web.get_nailgun_node_by_name('slave-01')['ip']
with self.env.d_env.get_ssh_to_remote(_ip) as remote_slave:
remote.execute("dockerctl shell cobbler killall dnsmasq")
checkers.external_dns_check(remote_slave)
@test(depends_on=[deploy_ha_dns_ntp],
groups=["external_ntp_ha"])
@ -227,9 +227,10 @@ class DeployHAOneControllerMasterNodeFail(base_test_case.TestBasic):
self.env.revert_snapshot("deploy_ha_dns_ntp")
cluster_id = self.fuel_web.get_last_created_cluster()
remote = self.env.d_env.get_admin_remote()
_ip = self.fuel_web.get_nailgun_node_by_name('slave-01')['ip']
remote_slave = self.env.d_env.get_ssh_to_remote(_ip)
vrouter_vip = self.fuel_web.get_management_vrouter_vip(cluster_id)
remote.execute("pkill -9 ntpd")
checkers.external_ntp_check(remote_slave, vrouter_vip)
with self.env.d_env.get_admin_remote() as remote:
_ip = self.fuel_web.get_nailgun_node_by_name('slave-01')['ip']
with self.env.d_env.get_ssh_to_remote(_ip) as remote_slave:
vrouter_vip = self.fuel_web\
.get_management_vrouter_vip(cluster_id)
remote.execute("pkill -9 ntpd")
checkers.external_ntp_check(remote_slave, vrouter_vip)

View File

@ -214,6 +214,9 @@ class TestNeutronFailover(base_test_case.TestBasic):
new_remote.execute("pcs resource clear p_neutron-l3-agent {0}".
format(node_with_l3))
new_remote.clear()
remote.clear()
@test(depends_on=[deploy_ha_neutron],
groups=["neutron_l3_migration_after_reset"])
@log_snapshot_after_test
@ -284,6 +287,8 @@ class TestNeutronFailover(base_test_case.TestBasic):
self.check_instance_connectivity(remote, dhcp_namespace, instance_ip)
remote.clear()
self.fuel_web.run_ostf(
cluster_id=cluster_id,
test_sets=['ha', 'smoke', 'sanity'])
@ -356,6 +361,8 @@ class TestNeutronFailover(base_test_case.TestBasic):
self.check_instance_connectivity(remote, dhcp_namespace, instance_ip)
remote.clear()
@retry(count=3, delay=120)
def run_single_test(cluster_id):
self.fuel_web.run_single_ostf_test(
@ -436,6 +443,6 @@ class TestNeutronFailover(base_test_case.TestBasic):
.format(cmd))
res = remote.execute(cmd)
break
remote.clear()
assert_equal(0, res['exit_code'],
'Most packages were dropped, result is {0}'.format(res))

View File

@ -66,8 +66,8 @@ class UpgradeFuelMaster(base_test_data.TestBasic):
cluster_id = self.fuel_web.get_last_created_cluster()
_ip = self.fuel_web.get_nailgun_node_by_name('slave-01')['ip']
remote = self.env.d_env.get_ssh_to_remote(_ip)
expected_kernel = self.get_slave_kernel(remote)
with self.env.d_env.get_ssh_to_remote(_ip) as remote:
expected_kernel = self.get_slave_kernel(remote)
self.env.admin_actions.upgrade_master_node()
@ -91,8 +91,8 @@ class UpgradeFuelMaster(base_test_data.TestBasic):
test_sets=['ha', 'smoke', 'sanity'])
if hlp_data.OPENSTACK_RELEASE_UBUNTU in hlp_data.OPENSTACK_RELEASE:
_ip = self.fuel_web.get_nailgun_node_by_name('slave-04')['ip']
remote = self.env.d_env.get_ssh_to_remote(_ip)
kernel = self.get_slave_kernel(remote)
with self.env.d_env.get_ssh_to_remote(_ip) as remote:
kernel = self.get_slave_kernel(remote)
checkers.check_kernel(kernel, expected_kernel)
create_diagnostic_snapshot(
self.env, "pass", "upgrade_ha_one_controller")
@ -134,8 +134,8 @@ class UpgradeFuelMaster(base_test_data.TestBasic):
self.fuel_web.verify_network(cluster_id)
self.fuel_web.run_ostf(cluster_id=cluster_id,
test_sets=['ha', 'smoke', 'sanity'])
remote_ceph = self.fuel_web.get_ssh_for_node('slave-03')
self.fuel_web.prepare_ceph_to_delete(remote_ceph)
with self.fuel_web.get_ssh_for_node('slave-03') as remote_ceph:
self.fuel_web.prepare_ceph_to_delete(remote_ceph)
nailgun_nodes = self.fuel_web.update_nodes(
cluster_id, {'slave-03': ['compute', 'ceph-osd']}, False, True)
task = self.fuel_web.deploy_cluster(cluster_id)
@ -216,8 +216,8 @@ class UpgradeFuelMaster(base_test_data.TestBasic):
if hlp_data.OPENSTACK_RELEASE_UBUNTU in hlp_data.OPENSTACK_RELEASE:
_ip = self.fuel_web.get_nailgun_node_by_name('slave-06')['ip']
remote = self.env.d_env.get_ssh_to_remote(_ip)
kernel = self.get_slave_kernel(remote)
with self.env.d_env.get_ssh_to_remote(_ip) as remote:
kernel = self.get_slave_kernel(remote)
logger.debug("ubuntu kernel version"
" on new node is {}".format(kernel))
self.fuel_web.verify_network(cluster_id)
@ -263,30 +263,30 @@ class UpgradeFuelMaster(base_test_data.TestBasic):
self.fuel_web.run_ostf(cluster_id=cluster_id,
test_sets=['ha', 'smoke', 'sanity'])
remote = self.env.d_env.get_admin_remote()
with self.env.d_env.get_admin_remote() as remote:
# Patching
update_command = 'yum update -y'
update_result = remote.execute(update_command)
logger.debug('Result of "{1}" command on master node: '
'{0}'.format(update_result, update_command))
assert_equal(int(update_result['exit_code']), 0,
'Packages update failed, '
'inspect logs for details')
# Patching
update_command = 'yum update -y'
update_result = remote.execute(update_command)
logger.debug('Result of "{1}" command on master node: '
'{0}'.format(update_result, update_command))
assert_equal(int(update_result['exit_code']), 0,
'Packages update failed, '
'inspect logs for details')
# Restart containers
destroy_command = 'dockerctl destroy all'
destroy_result = remote.execute(destroy_command)
logger.debug('Result of "{1}" command on master node: '
'{0}'.format(destroy_result, destroy_command))
assert_equal(int(destroy_result['exit_code']), 0,
'Destroy containers failed, '
'inspect logs for details')
# Restart containers
destroy_command = 'dockerctl destroy all'
destroy_result = remote.execute(destroy_command)
logger.debug('Result of "{1}" command on master node: '
'{0}'.format(destroy_result, destroy_command))
assert_equal(int(destroy_result['exit_code']), 0,
'Destroy containers failed, '
'inspect logs for details')
start_command = 'dockerctl start all'
start_result = remote.execute(start_command)
logger.debug('Result of "{1}" command on master node: '
'{0}'.format(start_result, start_command))
start_command = 'dockerctl start all'
start_result = remote.execute(start_command)
logger.debug('Result of "{1}" command on master node: '
'{0}'.format(start_result, start_command))
assert_equal(int(start_result['exit_code']), 0,
'Start containers failed, '
'inspect logs for details')
@ -395,8 +395,8 @@ class UpgradeFuelMaster(base_test_data.TestBasic):
assert_equal(str(cluster['net_provider']), 'neutron')
if hlp_data.OPENSTACK_RELEASE_UBUNTU in hlp_data.OPENSTACK_RELEASE:
_ip = self.fuel_web.get_nailgun_node_by_name('slave-04')['ip']
remote = self.env.d_env.get_ssh_to_remote(_ip)
kernel = self.get_slave_kernel(remote)
with self.env.d_env.get_ssh_to_remote(_ip) as remote:
kernel = self.get_slave_kernel(remote)
logger.debug("ubuntu kernel version"
" on new node is {}".format(kernel))
self.fuel_web.verify_network(cluster_id=cluster_id)
@ -563,8 +563,8 @@ class RollbackFuelMaster(base_test_data.TestBasic):
cluster_id = self.fuel_web.get_last_created_cluster()
_ip = self.fuel_web.get_nailgun_node_by_name('slave-01')['ip']
remote = self.env.d_env.get_ssh_to_remote(_ip)
expected_kernel = UpgradeFuelMaster.get_slave_kernel(remote)
with self.env.d_env.get_ssh_to_remote(_ip) as remote:
expected_kernel = UpgradeFuelMaster.get_slave_kernel(remote)
self.env.admin_actions.upgrade_master_node(rollback=True)
@ -587,8 +587,8 @@ class RollbackFuelMaster(base_test_data.TestBasic):
self.fuel_web.deploy_cluster_wait(cluster_id)
if hlp_data.OPENSTACK_RELEASE_UBUNTU in hlp_data.OPENSTACK_RELEASE:
_ip = self.fuel_web.get_nailgun_node_by_name('slave-04')['ip']
remote = self.env.d_env.get_ssh_to_remote(_ip)
kernel = UpgradeFuelMaster.get_slave_kernel(remote)
with self.env.d_env.get_ssh_to_remote(_ip) as remote:
kernel = UpgradeFuelMaster.get_slave_kernel(remote)
checkers.check_kernel(kernel, expected_kernel)
self.fuel_web.run_ostf(cluster_id=cluster_id,
test_sets=['ha', 'smoke', 'sanity'])

View File

@ -167,8 +167,8 @@ class UpgradeFuelChains(base_test_data.TestBasic):
if not self.env.revert_snapshot('upgrade_first_stage'):
raise SkipTest()
remote = self.env.d_env.get_admin_remote()
remote.execute("rm -rf /var/*upgrade*")
with self.env.d_env.get_admin_remote() as remote:
remote.execute("rm -rf /var/*upgrade*")
self.env.admin_actions.upgrade_master_node()